[ARM] serial: s3c2410: platform_get_irq() may return signed unnoticed
[linux-2.6/openmoko-kernel/knife-kernel.git] / drivers / scsi / lpfc / lpfc_els.c
blob886c5f1b11d2d7ce9663bdc009424a20a3f07552
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
31 #include "lpfc_hw.h"
32 #include "lpfc_sli.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
35 #include "lpfc.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
38 #include "lpfc_vport.h"
39 #include "lpfc_debugfs.h"
41 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
42 struct lpfc_iocbq *);
43 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
44 struct lpfc_iocbq *);
45 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
46 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
47 struct lpfc_nodelist *ndlp, uint8_t retry);
48 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
49 struct lpfc_iocbq *iocb);
50 static void lpfc_register_new_vport(struct lpfc_hba *phba,
51 struct lpfc_vport *vport,
52 struct lpfc_nodelist *ndlp);
54 static int lpfc_max_els_tries = 3;
56 int
57 lpfc_els_chk_latt(struct lpfc_vport *vport)
59 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
60 struct lpfc_hba *phba = vport->phba;
61 uint32_t ha_copy;
63 if (vport->port_state >= LPFC_VPORT_READY ||
64 phba->link_state == LPFC_LINK_DOWN)
65 return 0;
67 /* Read the HBA Host Attention Register */
68 ha_copy = readl(phba->HAregaddr);
70 if (!(ha_copy & HA_LATT))
71 return 0;
73 /* Pending Link Event during Discovery */
74 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
75 "0237 Pending Link Event during "
76 "Discovery: State x%x\n",
77 phba->pport->port_state);
79 /* CLEAR_LA should re-enable link attention events and
80 * we should then imediately take a LATT event. The
81 * LATT processing should call lpfc_linkdown() which
82 * will cleanup any left over in-progress discovery
83 * events.
85 spin_lock_irq(shost->host_lock);
86 vport->fc_flag |= FC_ABORT_DISCOVERY;
87 spin_unlock_irq(shost->host_lock);
89 if (phba->link_state != LPFC_CLEAR_LA)
90 lpfc_issue_clear_la(phba, vport);
92 return 1;
95 static struct lpfc_iocbq *
96 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
97 uint16_t cmdSize, uint8_t retry,
98 struct lpfc_nodelist *ndlp, uint32_t did,
99 uint32_t elscmd)
101 struct lpfc_hba *phba = vport->phba;
102 struct lpfc_iocbq *elsiocb;
103 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
104 struct ulp_bde64 *bpl;
105 IOCB_t *icmd;
108 if (!lpfc_is_link_up(phba))
109 return NULL;
111 /* Allocate buffer for command iocb */
112 elsiocb = lpfc_sli_get_iocbq(phba);
114 if (elsiocb == NULL)
115 return NULL;
117 icmd = &elsiocb->iocb;
119 /* fill in BDEs for command */
120 /* Allocate buffer for command payload */
121 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
122 if (pcmd)
123 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
124 if (!pcmd || !pcmd->virt)
125 goto els_iocb_free_pcmb_exit;
127 INIT_LIST_HEAD(&pcmd->list);
129 /* Allocate buffer for response payload */
130 if (expectRsp) {
131 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
132 if (prsp)
133 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
134 &prsp->phys);
135 if (!prsp || !prsp->virt)
136 goto els_iocb_free_prsp_exit;
137 INIT_LIST_HEAD(&prsp->list);
138 } else
139 prsp = NULL;
141 /* Allocate buffer for Buffer ptr list */
142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
143 if (pbuflist)
144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
145 &pbuflist->phys);
146 if (!pbuflist || !pbuflist->virt)
147 goto els_iocb_free_pbuf_exit;
149 INIT_LIST_HEAD(&pbuflist->list);
151 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
152 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
153 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
154 icmd->un.elsreq64.remoteID = did; /* DID */
155 if (expectRsp) {
156 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
157 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
158 icmd->ulpTimeout = phba->fc_ratov * 2;
159 } else {
160 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
161 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
163 icmd->ulpBdeCount = 1;
164 icmd->ulpLe = 1;
165 icmd->ulpClass = CLASS3;
167 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
168 icmd->un.elsreq64.myID = vport->fc_myDID;
170 /* For ELS_REQUEST64_CR, use the VPI by default */
171 icmd->ulpContext = vport->vpi;
172 icmd->ulpCt_h = 0;
173 icmd->ulpCt_l = 1;
176 bpl = (struct ulp_bde64 *) pbuflist->virt;
177 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
178 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
179 bpl->tus.f.bdeSize = cmdSize;
180 bpl->tus.f.bdeFlags = 0;
181 bpl->tus.w = le32_to_cpu(bpl->tus.w);
183 if (expectRsp) {
184 bpl++;
185 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
186 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
187 bpl->tus.f.bdeSize = FCELSSIZE;
188 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
189 bpl->tus.w = le32_to_cpu(bpl->tus.w);
192 /* prevent preparing iocb with NULL ndlp reference */
193 elsiocb->context1 = lpfc_nlp_get(ndlp);
194 if (!elsiocb->context1)
195 goto els_iocb_free_pbuf_exit;
196 elsiocb->context2 = pcmd;
197 elsiocb->context3 = pbuflist;
198 elsiocb->retry = retry;
199 elsiocb->vport = vport;
200 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
202 if (prsp) {
203 list_add(&prsp->list, &pcmd->list);
205 if (expectRsp) {
206 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
208 "0116 Xmit ELS command x%x to remote "
209 "NPORT x%x I/O tag: x%x, port state: x%x\n",
210 elscmd, did, elsiocb->iotag,
211 vport->port_state);
212 } else {
213 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
214 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
215 "0117 Xmit ELS response x%x to remote "
216 "NPORT x%x I/O tag: x%x, size: x%x\n",
217 elscmd, ndlp->nlp_DID, elsiocb->iotag,
218 cmdSize);
220 return elsiocb;
222 els_iocb_free_pbuf_exit:
223 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
224 kfree(pbuflist);
226 els_iocb_free_prsp_exit:
227 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
228 kfree(prsp);
230 els_iocb_free_pcmb_exit:
231 kfree(pcmd);
232 lpfc_sli_release_iocbq(phba, elsiocb);
233 return NULL;
236 static int
237 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
239 struct lpfc_hba *phba = vport->phba;
240 LPFC_MBOXQ_t *mbox;
241 struct lpfc_dmabuf *mp;
242 struct lpfc_nodelist *ndlp;
243 struct serv_parm *sp;
244 int rc;
245 int err = 0;
247 sp = &phba->fc_fabparam;
248 ndlp = lpfc_findnode_did(vport, Fabric_DID);
249 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
250 err = 1;
251 goto fail;
254 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
255 if (!mbox) {
256 err = 2;
257 goto fail;
260 vport->port_state = LPFC_FABRIC_CFG_LINK;
261 lpfc_config_link(phba, mbox);
262 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
263 mbox->vport = vport;
265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
266 if (rc == MBX_NOT_FINISHED) {
267 err = 3;
268 goto fail_free_mbox;
271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
272 if (!mbox) {
273 err = 4;
274 goto fail;
276 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
278 if (rc) {
279 err = 5;
280 goto fail_free_mbox;
283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
284 mbox->vport = vport;
285 /* increment the reference count on ndlp to hold reference
286 * for the callback routine.
288 mbox->context2 = lpfc_nlp_get(ndlp);
290 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
291 if (rc == MBX_NOT_FINISHED) {
292 err = 6;
293 goto fail_issue_reg_login;
296 return 0;
298 fail_issue_reg_login:
299 /* decrement the reference count on ndlp just incremented
300 * for the failed mbox command.
302 lpfc_nlp_put(ndlp);
303 mp = (struct lpfc_dmabuf *) mbox->context1;
304 lpfc_mbuf_free(phba, mp->virt, mp->phys);
305 kfree(mp);
306 fail_free_mbox:
307 mempool_free(mbox, phba->mbox_mem_pool);
309 fail:
310 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
311 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
312 "0249 Cannot issue Register Fabric login: Err %d\n", err);
313 return -ENXIO;
316 static int
317 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
318 struct serv_parm *sp, IOCB_t *irsp)
320 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
321 struct lpfc_hba *phba = vport->phba;
322 struct lpfc_nodelist *np;
323 struct lpfc_nodelist *next_np;
325 spin_lock_irq(shost->host_lock);
326 vport->fc_flag |= FC_FABRIC;
327 spin_unlock_irq(shost->host_lock);
329 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
330 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
331 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
333 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
335 if (phba->fc_topology == TOPOLOGY_LOOP) {
336 spin_lock_irq(shost->host_lock);
337 vport->fc_flag |= FC_PUBLIC_LOOP;
338 spin_unlock_irq(shost->host_lock);
339 } else {
341 * If we are a N-port connected to a Fabric, fixup sparam's so
342 * logins to devices on remote loops work.
344 vport->fc_sparam.cmn.altBbCredit = 1;
347 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
348 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
349 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
350 ndlp->nlp_class_sup = 0;
351 if (sp->cls1.classValid)
352 ndlp->nlp_class_sup |= FC_COS_CLASS1;
353 if (sp->cls2.classValid)
354 ndlp->nlp_class_sup |= FC_COS_CLASS2;
355 if (sp->cls3.classValid)
356 ndlp->nlp_class_sup |= FC_COS_CLASS3;
357 if (sp->cls4.classValid)
358 ndlp->nlp_class_sup |= FC_COS_CLASS4;
359 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
360 sp->cmn.bbRcvSizeLsb;
361 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
363 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
364 if (sp->cmn.response_multiple_NPort) {
365 lpfc_printf_vlog(vport, KERN_WARNING,
366 LOG_ELS | LOG_VPORT,
367 "1816 FLOGI NPIV supported, "
368 "response data 0x%x\n",
369 sp->cmn.response_multiple_NPort);
370 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
371 } else {
372 /* Because we asked f/w for NPIV it still expects us
373 to call reg_vnpid atleast for the physcial host */
374 lpfc_printf_vlog(vport, KERN_WARNING,
375 LOG_ELS | LOG_VPORT,
376 "1817 Fabric does not support NPIV "
377 "- configuring single port mode.\n");
378 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
382 if ((vport->fc_prevDID != vport->fc_myDID) &&
383 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
385 /* If our NportID changed, we need to ensure all
386 * remaining NPORTs get unreg_login'ed.
388 list_for_each_entry_safe(np, next_np,
389 &vport->fc_nodes, nlp_listp) {
390 if (!NLP_CHK_NODE_ACT(ndlp))
391 continue;
392 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
393 !(np->nlp_flag & NLP_NPR_ADISC))
394 continue;
395 spin_lock_irq(shost->host_lock);
396 np->nlp_flag &= ~NLP_NPR_ADISC;
397 spin_unlock_irq(shost->host_lock);
398 lpfc_unreg_rpi(vport, np);
400 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
401 lpfc_mbx_unreg_vpi(vport);
402 spin_lock_irq(shost->host_lock);
403 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
404 spin_unlock_irq(shost->host_lock);
408 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
410 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
411 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
412 lpfc_register_new_vport(phba, vport, ndlp);
413 return 0;
415 lpfc_issue_fabric_reglogin(vport);
416 return 0;
420 * We FLOGIed into an NPort, initiate pt2pt protocol
422 static int
423 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
424 struct serv_parm *sp)
426 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
427 struct lpfc_hba *phba = vport->phba;
428 LPFC_MBOXQ_t *mbox;
429 int rc;
431 spin_lock_irq(shost->host_lock);
432 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
433 spin_unlock_irq(shost->host_lock);
435 phba->fc_edtov = FF_DEF_EDTOV;
436 phba->fc_ratov = FF_DEF_RATOV;
437 rc = memcmp(&vport->fc_portname, &sp->portName,
438 sizeof(vport->fc_portname));
439 if (rc >= 0) {
440 /* This side will initiate the PLOGI */
441 spin_lock_irq(shost->host_lock);
442 vport->fc_flag |= FC_PT2PT_PLOGI;
443 spin_unlock_irq(shost->host_lock);
446 * N_Port ID cannot be 0, set our to LocalID the other
447 * side will be RemoteID.
450 /* not equal */
451 if (rc)
452 vport->fc_myDID = PT2PT_LocalID;
454 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
455 if (!mbox)
456 goto fail;
458 lpfc_config_link(phba, mbox);
460 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
461 mbox->vport = vport;
462 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
463 if (rc == MBX_NOT_FINISHED) {
464 mempool_free(mbox, phba->mbox_mem_pool);
465 goto fail;
467 /* Decrement ndlp reference count indicating that ndlp can be
468 * safely released when other references to it are done.
470 lpfc_nlp_put(ndlp);
472 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
473 if (!ndlp) {
475 * Cannot find existing Fabric ndlp, so allocate a
476 * new one
478 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
479 if (!ndlp)
480 goto fail;
481 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
482 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
483 ndlp = lpfc_enable_node(vport, ndlp,
484 NLP_STE_UNUSED_NODE);
485 if(!ndlp)
486 goto fail;
489 memcpy(&ndlp->nlp_portname, &sp->portName,
490 sizeof(struct lpfc_name));
491 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
492 sizeof(struct lpfc_name));
493 /* Set state will put ndlp onto node list if not already done */
494 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
495 spin_lock_irq(shost->host_lock);
496 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
497 spin_unlock_irq(shost->host_lock);
498 } else
499 /* This side will wait for the PLOGI, decrement ndlp reference
500 * count indicating that ndlp can be released when other
501 * references to it are done.
503 lpfc_nlp_put(ndlp);
505 /* If we are pt2pt with another NPort, force NPIV off! */
506 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
508 spin_lock_irq(shost->host_lock);
509 vport->fc_flag |= FC_PT2PT;
510 spin_unlock_irq(shost->host_lock);
512 /* Start discovery - this should just do CLEAR_LA */
513 lpfc_disc_start(vport);
514 return 0;
515 fail:
516 return -ENXIO;
519 static void
520 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
521 struct lpfc_iocbq *rspiocb)
523 struct lpfc_vport *vport = cmdiocb->vport;
524 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
525 IOCB_t *irsp = &rspiocb->iocb;
526 struct lpfc_nodelist *ndlp = cmdiocb->context1;
527 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
528 struct serv_parm *sp;
529 int rc;
531 /* Check to see if link went down during discovery */
532 if (lpfc_els_chk_latt(vport)) {
533 /* One additional decrement on node reference count to
534 * trigger the release of the node
536 lpfc_nlp_put(ndlp);
537 goto out;
540 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
541 "FLOGI cmpl: status:x%x/x%x state:x%x",
542 irsp->ulpStatus, irsp->un.ulpWord[4],
543 vport->port_state);
545 if (irsp->ulpStatus) {
546 /* Check for retry */
547 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
548 goto out;
550 /* FLOGI failed, so there is no fabric */
551 spin_lock_irq(shost->host_lock);
552 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
553 spin_unlock_irq(shost->host_lock);
555 /* If private loop, then allow max outstanding els to be
556 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
557 * alpa map would take too long otherwise.
559 if (phba->alpa_map[0] == 0) {
560 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
563 /* FLOGI failure */
564 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
565 "0100 FLOGI failure Data: x%x x%x "
566 "x%x\n",
567 irsp->ulpStatus, irsp->un.ulpWord[4],
568 irsp->ulpTimeout);
569 goto flogifail;
573 * The FLogI succeeded. Sync the data for the CPU before
574 * accessing it.
576 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
578 sp = prsp->virt + sizeof(uint32_t);
580 /* FLOGI completes successfully */
581 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
582 "0101 FLOGI completes sucessfully "
583 "Data: x%x x%x x%x x%x\n",
584 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
585 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
587 if (vport->port_state == LPFC_FLOGI) {
589 * If Common Service Parameters indicate Nport
590 * we are point to point, if Fport we are Fabric.
592 if (sp->cmn.fPort)
593 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
594 else
595 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
597 if (!rc)
598 goto out;
601 flogifail:
602 lpfc_nlp_put(ndlp);
604 if (!lpfc_error_lost_link(irsp)) {
605 /* FLOGI failed, so just use loop map to make discovery list */
606 lpfc_disc_list_loopmap(vport);
608 /* Start discovery */
609 lpfc_disc_start(vport);
610 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
611 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
612 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
613 (phba->link_state != LPFC_CLEAR_LA)) {
614 /* If FLOGI failed enable link interrupt. */
615 lpfc_issue_clear_la(phba, vport);
617 out:
618 lpfc_els_free_iocb(phba, cmdiocb);
621 static int
622 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
623 uint8_t retry)
625 struct lpfc_hba *phba = vport->phba;
626 struct serv_parm *sp;
627 IOCB_t *icmd;
628 struct lpfc_iocbq *elsiocb;
629 struct lpfc_sli_ring *pring;
630 uint8_t *pcmd;
631 uint16_t cmdsize;
632 uint32_t tmo;
633 int rc;
635 pring = &phba->sli.ring[LPFC_ELS_RING];
637 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
638 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
639 ndlp->nlp_DID, ELS_CMD_FLOGI);
641 if (!elsiocb)
642 return 1;
644 icmd = &elsiocb->iocb;
645 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
647 /* For FLOGI request, remainder of payload is service parameters */
648 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
649 pcmd += sizeof(uint32_t);
650 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
651 sp = (struct serv_parm *) pcmd;
653 /* Setup CSPs accordingly for Fabric */
654 sp->cmn.e_d_tov = 0;
655 sp->cmn.w2.r_a_tov = 0;
656 sp->cls1.classValid = 0;
657 sp->cls2.seqDelivery = 1;
658 sp->cls3.seqDelivery = 1;
659 if (sp->cmn.fcphLow < FC_PH3)
660 sp->cmn.fcphLow = FC_PH3;
661 if (sp->cmn.fcphHigh < FC_PH3)
662 sp->cmn.fcphHigh = FC_PH3;
664 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
665 sp->cmn.request_multiple_Nport = 1;
667 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
668 icmd->ulpCt_h = 1;
669 icmd->ulpCt_l = 0;
672 if (phba->fc_topology != TOPOLOGY_LOOP) {
673 icmd->un.elsreq64.myID = 0;
674 icmd->un.elsreq64.fl = 1;
677 tmo = phba->fc_ratov;
678 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
679 lpfc_set_disctmo(vport);
680 phba->fc_ratov = tmo;
682 phba->fc_stat.elsXmitFLOGI++;
683 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
686 "Issue FLOGI: opt:x%x",
687 phba->sli3_options, 0, 0);
689 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
690 if (rc == IOCB_ERROR) {
691 lpfc_els_free_iocb(phba, elsiocb);
692 return 1;
694 return 0;
698 lpfc_els_abort_flogi(struct lpfc_hba *phba)
700 struct lpfc_sli_ring *pring;
701 struct lpfc_iocbq *iocb, *next_iocb;
702 struct lpfc_nodelist *ndlp;
703 IOCB_t *icmd;
705 /* Abort outstanding I/O on NPort <nlp_DID> */
706 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
707 "0201 Abort outstanding I/O on NPort x%x\n",
708 Fabric_DID);
710 pring = &phba->sli.ring[LPFC_ELS_RING];
713 * Check the txcmplq for an iocb that matches the nport the driver is
714 * searching for.
716 spin_lock_irq(&phba->hbalock);
717 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
718 icmd = &iocb->iocb;
719 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
720 icmd->un.elsreq64.bdl.ulpIoTag32) {
721 ndlp = (struct lpfc_nodelist *)(iocb->context1);
722 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
723 (ndlp->nlp_DID == Fabric_DID))
724 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
727 spin_unlock_irq(&phba->hbalock);
729 return 0;
733 lpfc_initial_flogi(struct lpfc_vport *vport)
735 struct lpfc_hba *phba = vport->phba;
736 struct lpfc_nodelist *ndlp;
738 vport->port_state = LPFC_FLOGI;
739 lpfc_set_disctmo(vport);
741 /* First look for the Fabric ndlp */
742 ndlp = lpfc_findnode_did(vport, Fabric_DID);
743 if (!ndlp) {
744 /* Cannot find existing Fabric ndlp, so allocate a new one */
745 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
746 if (!ndlp)
747 return 0;
748 lpfc_nlp_init(vport, ndlp, Fabric_DID);
749 /* Put ndlp onto node list */
750 lpfc_enqueue_node(vport, ndlp);
751 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
752 /* re-setup ndlp without removing from node list */
753 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
754 if (!ndlp)
755 return 0;
758 if (lpfc_issue_els_flogi(vport, ndlp, 0))
759 /* This decrement of reference count to node shall kick off
760 * the release of the node.
762 lpfc_nlp_put(ndlp);
764 return 1;
768 lpfc_initial_fdisc(struct lpfc_vport *vport)
770 struct lpfc_hba *phba = vport->phba;
771 struct lpfc_nodelist *ndlp;
773 /* First look for the Fabric ndlp */
774 ndlp = lpfc_findnode_did(vport, Fabric_DID);
775 if (!ndlp) {
776 /* Cannot find existing Fabric ndlp, so allocate a new one */
777 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
778 if (!ndlp)
779 return 0;
780 lpfc_nlp_init(vport, ndlp, Fabric_DID);
781 /* Put ndlp onto node list */
782 lpfc_enqueue_node(vport, ndlp);
783 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
784 /* re-setup ndlp without removing from node list */
785 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
786 if (!ndlp)
787 return 0;
790 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
791 /* decrement node reference count to trigger the release of
792 * the node.
794 lpfc_nlp_put(ndlp);
795 return 0;
797 return 1;
800 void
801 lpfc_more_plogi(struct lpfc_vport *vport)
803 int sentplogi;
805 if (vport->num_disc_nodes)
806 vport->num_disc_nodes--;
808 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
809 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
810 "0232 Continue discovery with %d PLOGIs to go "
811 "Data: x%x x%x x%x\n",
812 vport->num_disc_nodes, vport->fc_plogi_cnt,
813 vport->fc_flag, vport->port_state);
814 /* Check to see if there are more PLOGIs to be sent */
815 if (vport->fc_flag & FC_NLP_MORE)
816 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
817 sentplogi = lpfc_els_disc_plogi(vport);
819 return;
822 static struct lpfc_nodelist *
823 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
824 struct lpfc_nodelist *ndlp)
826 struct lpfc_vport *vport = ndlp->vport;
827 struct lpfc_nodelist *new_ndlp;
828 struct lpfc_rport_data *rdata;
829 struct fc_rport *rport;
830 struct serv_parm *sp;
831 uint8_t name[sizeof(struct lpfc_name)];
832 uint32_t rc, keepDID = 0;
834 /* Fabric nodes can have the same WWPN so we don't bother searching
835 * by WWPN. Just return the ndlp that was given to us.
837 if (ndlp->nlp_type & NLP_FABRIC)
838 return ndlp;
840 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
841 memset(name, 0, sizeof(struct lpfc_name));
843 /* Now we find out if the NPort we are logging into, matches the WWPN
844 * we have for that ndlp. If not, we have some work to do.
846 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
848 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
849 return ndlp;
851 if (!new_ndlp) {
852 rc = memcmp(&ndlp->nlp_portname, name,
853 sizeof(struct lpfc_name));
854 if (!rc)
855 return ndlp;
856 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
857 if (!new_ndlp)
858 return ndlp;
859 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
860 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
861 rc = memcmp(&ndlp->nlp_portname, name,
862 sizeof(struct lpfc_name));
863 if (!rc)
864 return ndlp;
865 new_ndlp = lpfc_enable_node(vport, new_ndlp,
866 NLP_STE_UNUSED_NODE);
867 if (!new_ndlp)
868 return ndlp;
869 keepDID = new_ndlp->nlp_DID;
870 } else
871 keepDID = new_ndlp->nlp_DID;
873 lpfc_unreg_rpi(vport, new_ndlp);
874 new_ndlp->nlp_DID = ndlp->nlp_DID;
875 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
877 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
878 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
879 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
881 /* Set state will put new_ndlp on to node list if not already done */
882 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
884 /* Move this back to NPR state */
885 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
886 /* The new_ndlp is replacing ndlp totally, so we need
887 * to put ndlp on UNUSED list and try to free it.
890 /* Fix up the rport accordingly */
891 rport = ndlp->rport;
892 if (rport) {
893 rdata = rport->dd_data;
894 if (rdata->pnode == ndlp) {
895 lpfc_nlp_put(ndlp);
896 ndlp->rport = NULL;
897 rdata->pnode = lpfc_nlp_get(new_ndlp);
898 new_ndlp->rport = rport;
900 new_ndlp->nlp_type = ndlp->nlp_type;
902 /* We shall actually free the ndlp with both nlp_DID and
903 * nlp_portname fields equals 0 to avoid any ndlp on the
904 * nodelist never to be used.
906 if (ndlp->nlp_DID == 0) {
907 spin_lock_irq(&phba->ndlp_lock);
908 NLP_SET_FREE_REQ(ndlp);
909 spin_unlock_irq(&phba->ndlp_lock);
912 /* Two ndlps cannot have the same did on the nodelist */
913 ndlp->nlp_DID = keepDID;
914 lpfc_drop_node(vport, ndlp);
916 else {
917 lpfc_unreg_rpi(vport, ndlp);
918 /* Two ndlps cannot have the same did */
919 ndlp->nlp_DID = keepDID;
920 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
922 return new_ndlp;
925 void
926 lpfc_end_rscn(struct lpfc_vport *vport)
928 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
930 if (vport->fc_flag & FC_RSCN_MODE) {
932 * Check to see if more RSCNs came in while we were
933 * processing this one.
935 if (vport->fc_rscn_id_cnt ||
936 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
937 lpfc_els_handle_rscn(vport);
938 else {
939 spin_lock_irq(shost->host_lock);
940 vport->fc_flag &= ~FC_RSCN_MODE;
941 spin_unlock_irq(shost->host_lock);
946 static void
947 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
948 struct lpfc_iocbq *rspiocb)
950 struct lpfc_vport *vport = cmdiocb->vport;
951 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
952 IOCB_t *irsp;
953 struct lpfc_nodelist *ndlp;
954 struct lpfc_dmabuf *prsp;
955 int disc, rc, did, type;
957 /* we pass cmdiocb to state machine which needs rspiocb as well */
958 cmdiocb->context_un.rsp_iocb = rspiocb;
960 irsp = &rspiocb->iocb;
961 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
962 "PLOGI cmpl: status:x%x/x%x did:x%x",
963 irsp->ulpStatus, irsp->un.ulpWord[4],
964 irsp->un.elsreq64.remoteID);
966 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
967 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
968 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
969 "0136 PLOGI completes to NPort x%x "
970 "with no ndlp. Data: x%x x%x x%x\n",
971 irsp->un.elsreq64.remoteID,
972 irsp->ulpStatus, irsp->un.ulpWord[4],
973 irsp->ulpIoTag);
974 goto out;
977 /* Since ndlp can be freed in the disc state machine, note if this node
978 * is being used during discovery.
980 spin_lock_irq(shost->host_lock);
981 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
982 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
983 spin_unlock_irq(shost->host_lock);
984 rc = 0;
986 /* PLOGI completes to NPort <nlp_DID> */
987 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
988 "0102 PLOGI completes to NPort x%x "
989 "Data: x%x x%x x%x x%x x%x\n",
990 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
991 irsp->ulpTimeout, disc, vport->num_disc_nodes);
992 /* Check to see if link went down during discovery */
993 if (lpfc_els_chk_latt(vport)) {
994 spin_lock_irq(shost->host_lock);
995 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
996 spin_unlock_irq(shost->host_lock);
997 goto out;
1000 /* ndlp could be freed in DSM, save these values now */
1001 type = ndlp->nlp_type;
1002 did = ndlp->nlp_DID;
1004 if (irsp->ulpStatus) {
1005 /* Check for retry */
1006 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1007 /* ELS command is being retried */
1008 if (disc) {
1009 spin_lock_irq(shost->host_lock);
1010 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1011 spin_unlock_irq(shost->host_lock);
1013 goto out;
1015 /* PLOGI failed */
1016 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1017 if (lpfc_error_lost_link(irsp))
1018 rc = NLP_STE_FREED_NODE;
1019 else
1020 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1021 NLP_EVT_CMPL_PLOGI);
1022 } else {
1023 /* Good status, call state machine */
1024 prsp = list_entry(((struct lpfc_dmabuf *)
1025 cmdiocb->context2)->list.next,
1026 struct lpfc_dmabuf, list);
1027 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1028 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1029 NLP_EVT_CMPL_PLOGI);
1032 if (disc && vport->num_disc_nodes) {
1033 /* Check to see if there are more PLOGIs to be sent */
1034 lpfc_more_plogi(vport);
1036 if (vport->num_disc_nodes == 0) {
1037 spin_lock_irq(shost->host_lock);
1038 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1039 spin_unlock_irq(shost->host_lock);
1041 lpfc_can_disctmo(vport);
1042 lpfc_end_rscn(vport);
1046 out:
1047 lpfc_els_free_iocb(phba, cmdiocb);
1048 return;
1052 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1054 struct lpfc_hba *phba = vport->phba;
1055 struct serv_parm *sp;
1056 IOCB_t *icmd;
1057 struct lpfc_nodelist *ndlp;
1058 struct lpfc_iocbq *elsiocb;
1059 struct lpfc_sli_ring *pring;
1060 struct lpfc_sli *psli;
1061 uint8_t *pcmd;
1062 uint16_t cmdsize;
1063 int ret;
1065 psli = &phba->sli;
1066 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1068 ndlp = lpfc_findnode_did(vport, did);
1069 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1070 ndlp = NULL;
1072 /* If ndlp is not NULL, we will bump the reference count on it */
1073 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1074 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1075 ELS_CMD_PLOGI);
1076 if (!elsiocb)
1077 return 1;
1079 icmd = &elsiocb->iocb;
1080 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1082 /* For PLOGI request, remainder of payload is service parameters */
1083 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1084 pcmd += sizeof(uint32_t);
1085 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1086 sp = (struct serv_parm *) pcmd;
1088 if (sp->cmn.fcphLow < FC_PH_4_3)
1089 sp->cmn.fcphLow = FC_PH_4_3;
1091 if (sp->cmn.fcphHigh < FC_PH3)
1092 sp->cmn.fcphHigh = FC_PH3;
1094 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1095 "Issue PLOGI: did:x%x",
1096 did, 0, 0);
1098 phba->fc_stat.elsXmitPLOGI++;
1099 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1100 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1102 if (ret == IOCB_ERROR) {
1103 lpfc_els_free_iocb(phba, elsiocb);
1104 return 1;
1106 return 0;
1109 static void
1110 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1111 struct lpfc_iocbq *rspiocb)
1113 struct lpfc_vport *vport = cmdiocb->vport;
1114 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1115 IOCB_t *irsp;
1116 struct lpfc_sli *psli;
1117 struct lpfc_nodelist *ndlp;
1119 psli = &phba->sli;
1120 /* we pass cmdiocb to state machine which needs rspiocb as well */
1121 cmdiocb->context_un.rsp_iocb = rspiocb;
1123 irsp = &(rspiocb->iocb);
1124 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1125 spin_lock_irq(shost->host_lock);
1126 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1127 spin_unlock_irq(shost->host_lock);
1129 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1130 "PRLI cmpl: status:x%x/x%x did:x%x",
1131 irsp->ulpStatus, irsp->un.ulpWord[4],
1132 ndlp->nlp_DID);
1133 /* PRLI completes to NPort <nlp_DID> */
1134 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1135 "0103 PRLI completes to NPort x%x "
1136 "Data: x%x x%x x%x x%x\n",
1137 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1138 irsp->ulpTimeout, vport->num_disc_nodes);
1140 vport->fc_prli_sent--;
1141 /* Check to see if link went down during discovery */
1142 if (lpfc_els_chk_latt(vport))
1143 goto out;
1145 if (irsp->ulpStatus) {
1146 /* Check for retry */
1147 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1148 /* ELS command is being retried */
1149 goto out;
1151 /* PRLI failed */
1152 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1153 if (lpfc_error_lost_link(irsp))
1154 goto out;
1155 else
1156 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1157 NLP_EVT_CMPL_PRLI);
1158 } else
1159 /* Good status, call state machine */
1160 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1161 NLP_EVT_CMPL_PRLI);
1162 out:
1163 lpfc_els_free_iocb(phba, cmdiocb);
1164 return;
1168 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1169 uint8_t retry)
1171 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1172 struct lpfc_hba *phba = vport->phba;
1173 PRLI *npr;
1174 IOCB_t *icmd;
1175 struct lpfc_iocbq *elsiocb;
1176 struct lpfc_sli_ring *pring;
1177 struct lpfc_sli *psli;
1178 uint8_t *pcmd;
1179 uint16_t cmdsize;
1181 psli = &phba->sli;
1182 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1184 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1185 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1186 ndlp->nlp_DID, ELS_CMD_PRLI);
1187 if (!elsiocb)
1188 return 1;
1190 icmd = &elsiocb->iocb;
1191 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1193 /* For PRLI request, remainder of payload is service parameters */
1194 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
1195 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
1196 pcmd += sizeof(uint32_t);
1198 /* For PRLI, remainder of payload is PRLI parameter page */
1199 npr = (PRLI *) pcmd;
1201 * If our firmware version is 3.20 or later,
1202 * set the following bits for FC-TAPE support.
1204 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1205 npr->ConfmComplAllowed = 1;
1206 npr->Retry = 1;
1207 npr->TaskRetryIdReq = 1;
1209 npr->estabImagePair = 1;
1210 npr->readXferRdyDis = 1;
1212 /* For FCP support */
1213 npr->prliType = PRLI_FCP_TYPE;
1214 npr->initiatorFunc = 1;
1216 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1217 "Issue PRLI: did:x%x",
1218 ndlp->nlp_DID, 0, 0);
1220 phba->fc_stat.elsXmitPRLI++;
1221 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
1222 spin_lock_irq(shost->host_lock);
1223 ndlp->nlp_flag |= NLP_PRLI_SND;
1224 spin_unlock_irq(shost->host_lock);
1225 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1226 spin_lock_irq(shost->host_lock);
1227 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1228 spin_unlock_irq(shost->host_lock);
1229 lpfc_els_free_iocb(phba, elsiocb);
1230 return 1;
1232 vport->fc_prli_sent++;
1233 return 0;
1236 void
1237 lpfc_more_adisc(struct lpfc_vport *vport)
1239 int sentadisc;
1241 if (vport->num_disc_nodes)
1242 vport->num_disc_nodes--;
1243 /* Continue discovery with <num_disc_nodes> ADISCs to go */
1244 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1245 "0210 Continue discovery with %d ADISCs to go "
1246 "Data: x%x x%x x%x\n",
1247 vport->num_disc_nodes, vport->fc_adisc_cnt,
1248 vport->fc_flag, vport->port_state);
1249 /* Check to see if there are more ADISCs to be sent */
1250 if (vport->fc_flag & FC_NLP_MORE) {
1251 lpfc_set_disctmo(vport);
1252 /* go thru NPR nodes and issue any remaining ELS ADISCs */
1253 sentadisc = lpfc_els_disc_adisc(vport);
1255 return;
1258 static void
1259 lpfc_rscn_disc(struct lpfc_vport *vport)
1261 lpfc_can_disctmo(vport);
1263 /* RSCN discovery */
1264 /* go thru NPR nodes and issue ELS PLOGIs */
1265 if (vport->fc_npr_cnt)
1266 if (lpfc_els_disc_plogi(vport))
1267 return;
1269 lpfc_end_rscn(vport);
1272 static void
1273 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1274 struct lpfc_iocbq *rspiocb)
1276 struct lpfc_vport *vport = cmdiocb->vport;
1277 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1278 IOCB_t *irsp;
1279 struct lpfc_nodelist *ndlp;
1280 int disc;
1282 /* we pass cmdiocb to state machine which needs rspiocb as well */
1283 cmdiocb->context_un.rsp_iocb = rspiocb;
1285 irsp = &(rspiocb->iocb);
1286 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1288 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1289 "ADISC cmpl: status:x%x/x%x did:x%x",
1290 irsp->ulpStatus, irsp->un.ulpWord[4],
1291 ndlp->nlp_DID);
1293 /* Since ndlp can be freed in the disc state machine, note if this node
1294 * is being used during discovery.
1296 spin_lock_irq(shost->host_lock);
1297 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1298 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1299 spin_unlock_irq(shost->host_lock);
1300 /* ADISC completes to NPort <nlp_DID> */
1301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1302 "0104 ADISC completes to NPort x%x "
1303 "Data: x%x x%x x%x x%x x%x\n",
1304 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1305 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1306 /* Check to see if link went down during discovery */
1307 if (lpfc_els_chk_latt(vport)) {
1308 spin_lock_irq(shost->host_lock);
1309 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1310 spin_unlock_irq(shost->host_lock);
1311 goto out;
1314 if (irsp->ulpStatus) {
1315 /* Check for retry */
1316 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1317 /* ELS command is being retried */
1318 if (disc) {
1319 spin_lock_irq(shost->host_lock);
1320 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1321 spin_unlock_irq(shost->host_lock);
1322 lpfc_set_disctmo(vport);
1324 goto out;
1326 /* ADISC failed */
1327 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1328 if (!lpfc_error_lost_link(irsp))
1329 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1330 NLP_EVT_CMPL_ADISC);
1331 } else
1332 /* Good status, call state machine */
1333 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1334 NLP_EVT_CMPL_ADISC);
1336 if (disc && vport->num_disc_nodes) {
1337 /* Check to see if there are more ADISCs to be sent */
1338 lpfc_more_adisc(vport);
1340 /* Check to see if we are done with ADISC authentication */
1341 if (vport->num_disc_nodes == 0) {
1342 /* If we get here, there is nothing left to ADISC */
1344 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1345 * and continue discovery.
1347 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1348 !(vport->fc_flag & FC_RSCN_MODE)) {
1349 lpfc_issue_reg_vpi(phba, vport);
1350 goto out;
1353 * For SLI2, we need to set port_state to READY
1354 * and continue discovery.
1356 if (vport->port_state < LPFC_VPORT_READY) {
1357 /* If we get here, there is nothing to ADISC */
1358 if (vport->port_type == LPFC_PHYSICAL_PORT)
1359 lpfc_issue_clear_la(phba, vport);
1361 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1362 vport->num_disc_nodes = 0;
1363 /* go thru NPR list, issue ELS PLOGIs */
1364 if (vport->fc_npr_cnt)
1365 lpfc_els_disc_plogi(vport);
1367 if (!vport->num_disc_nodes) {
1368 spin_lock_irq(shost->host_lock);
1369 vport->fc_flag &=
1370 ~FC_NDISC_ACTIVE;
1371 spin_unlock_irq(
1372 shost->host_lock);
1373 lpfc_can_disctmo(vport);
1376 vport->port_state = LPFC_VPORT_READY;
1377 } else {
1378 lpfc_rscn_disc(vport);
1382 out:
1383 lpfc_els_free_iocb(phba, cmdiocb);
1384 return;
1388 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1389 uint8_t retry)
1391 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1392 struct lpfc_hba *phba = vport->phba;
1393 ADISC *ap;
1394 IOCB_t *icmd;
1395 struct lpfc_iocbq *elsiocb;
1396 struct lpfc_sli *psli = &phba->sli;
1397 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1398 uint8_t *pcmd;
1399 uint16_t cmdsize;
1401 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1402 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1403 ndlp->nlp_DID, ELS_CMD_ADISC);
1404 if (!elsiocb)
1405 return 1;
1407 icmd = &elsiocb->iocb;
1408 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1410 /* For ADISC request, remainder of payload is service parameters */
1411 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1412 pcmd += sizeof(uint32_t);
1414 /* Fill in ADISC payload */
1415 ap = (ADISC *) pcmd;
1416 ap->hardAL_PA = phba->fc_pref_ALPA;
1417 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1418 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1419 ap->DID = be32_to_cpu(vport->fc_myDID);
1421 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1422 "Issue ADISC: did:x%x",
1423 ndlp->nlp_DID, 0, 0);
1425 phba->fc_stat.elsXmitADISC++;
1426 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1427 spin_lock_irq(shost->host_lock);
1428 ndlp->nlp_flag |= NLP_ADISC_SND;
1429 spin_unlock_irq(shost->host_lock);
1430 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1431 spin_lock_irq(shost->host_lock);
1432 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1433 spin_unlock_irq(shost->host_lock);
1434 lpfc_els_free_iocb(phba, elsiocb);
1435 return 1;
1437 return 0;
1440 static void
1441 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1442 struct lpfc_iocbq *rspiocb)
1444 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1445 struct lpfc_vport *vport = ndlp->vport;
1446 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1447 IOCB_t *irsp;
1448 struct lpfc_sli *psli;
1450 psli = &phba->sli;
1451 /* we pass cmdiocb to state machine which needs rspiocb as well */
1452 cmdiocb->context_un.rsp_iocb = rspiocb;
1454 irsp = &(rspiocb->iocb);
1455 spin_lock_irq(shost->host_lock);
1456 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1457 spin_unlock_irq(shost->host_lock);
1459 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1460 "LOGO cmpl: status:x%x/x%x did:x%x",
1461 irsp->ulpStatus, irsp->un.ulpWord[4],
1462 ndlp->nlp_DID);
1463 /* LOGO completes to NPort <nlp_DID> */
1464 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1465 "0105 LOGO completes to NPort x%x "
1466 "Data: x%x x%x x%x x%x\n",
1467 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1468 irsp->ulpTimeout, vport->num_disc_nodes);
1469 /* Check to see if link went down during discovery */
1470 if (lpfc_els_chk_latt(vport))
1471 goto out;
1473 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
1474 /* NLP_EVT_DEVICE_RM should unregister the RPI
1475 * which should abort all outstanding IOs.
1477 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1478 NLP_EVT_DEVICE_RM);
1479 goto out;
1482 if (irsp->ulpStatus) {
1483 /* Check for retry */
1484 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1485 /* ELS command is being retried */
1486 goto out;
1487 /* LOGO failed */
1488 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1489 if (lpfc_error_lost_link(irsp))
1490 goto out;
1491 else
1492 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1493 NLP_EVT_CMPL_LOGO);
1494 } else
1495 /* Good status, call state machine.
1496 * This will unregister the rpi if needed.
1498 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1499 NLP_EVT_CMPL_LOGO);
1500 out:
1501 lpfc_els_free_iocb(phba, cmdiocb);
1502 return;
1506 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1507 uint8_t retry)
1509 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1510 struct lpfc_hba *phba = vport->phba;
1511 IOCB_t *icmd;
1512 struct lpfc_iocbq *elsiocb;
1513 struct lpfc_sli_ring *pring;
1514 struct lpfc_sli *psli;
1515 uint8_t *pcmd;
1516 uint16_t cmdsize;
1517 int rc;
1519 psli = &phba->sli;
1520 pring = &psli->ring[LPFC_ELS_RING];
1522 spin_lock_irq(shost->host_lock);
1523 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1524 spin_unlock_irq(shost->host_lock);
1525 return 0;
1527 spin_unlock_irq(shost->host_lock);
1529 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
1530 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1531 ndlp->nlp_DID, ELS_CMD_LOGO);
1532 if (!elsiocb)
1533 return 1;
1535 icmd = &elsiocb->iocb;
1536 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1537 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1538 pcmd += sizeof(uint32_t);
1540 /* Fill in LOGO payload */
1541 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
1542 pcmd += sizeof(uint32_t);
1543 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
1545 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1546 "Issue LOGO: did:x%x",
1547 ndlp->nlp_DID, 0, 0);
1549 phba->fc_stat.elsXmitLOGO++;
1550 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1551 spin_lock_irq(shost->host_lock);
1552 ndlp->nlp_flag |= NLP_LOGO_SND;
1553 spin_unlock_irq(shost->host_lock);
1554 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1556 if (rc == IOCB_ERROR) {
1557 spin_lock_irq(shost->host_lock);
1558 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1559 spin_unlock_irq(shost->host_lock);
1560 lpfc_els_free_iocb(phba, elsiocb);
1561 return 1;
1563 return 0;
1566 static void
1567 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1568 struct lpfc_iocbq *rspiocb)
1570 struct lpfc_vport *vport = cmdiocb->vport;
1571 IOCB_t *irsp;
1573 irsp = &rspiocb->iocb;
1575 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1576 "ELS cmd cmpl: status:x%x/x%x did:x%x",
1577 irsp->ulpStatus, irsp->un.ulpWord[4],
1578 irsp->un.elsreq64.remoteID);
1579 /* ELS cmd tag <ulpIoTag> completes */
1580 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1581 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
1582 irsp->ulpIoTag, irsp->ulpStatus,
1583 irsp->un.ulpWord[4], irsp->ulpTimeout);
1584 /* Check to see if link went down during discovery */
1585 lpfc_els_chk_latt(vport);
1586 lpfc_els_free_iocb(phba, cmdiocb);
1587 return;
1591 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1593 struct lpfc_hba *phba = vport->phba;
1594 IOCB_t *icmd;
1595 struct lpfc_iocbq *elsiocb;
1596 struct lpfc_sli_ring *pring;
1597 struct lpfc_sli *psli;
1598 uint8_t *pcmd;
1599 uint16_t cmdsize;
1600 struct lpfc_nodelist *ndlp;
1602 psli = &phba->sli;
1603 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1604 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
1606 ndlp = lpfc_findnode_did(vport, nportid);
1607 if (!ndlp) {
1608 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1609 if (!ndlp)
1610 return 1;
1611 lpfc_nlp_init(vport, ndlp, nportid);
1612 lpfc_enqueue_node(vport, ndlp);
1613 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1614 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1615 if (!ndlp)
1616 return 1;
1619 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1620 ndlp->nlp_DID, ELS_CMD_SCR);
1622 if (!elsiocb) {
1623 /* This will trigger the release of the node just
1624 * allocated
1626 lpfc_nlp_put(ndlp);
1627 return 1;
1630 icmd = &elsiocb->iocb;
1631 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1633 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1634 pcmd += sizeof(uint32_t);
1636 /* For SCR, remainder of payload is SCR parameter page */
1637 memset(pcmd, 0, sizeof(SCR));
1638 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1641 "Issue SCR: did:x%x",
1642 ndlp->nlp_DID, 0, 0);
1644 phba->fc_stat.elsXmitSCR++;
1645 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1646 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1647 /* The additional lpfc_nlp_put will cause the following
1648 * lpfc_els_free_iocb routine to trigger the rlease of
1649 * the node.
1651 lpfc_nlp_put(ndlp);
1652 lpfc_els_free_iocb(phba, elsiocb);
1653 return 1;
1655 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1656 * trigger the release of node.
1658 lpfc_nlp_put(ndlp);
1659 return 0;
1662 static int
1663 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1665 struct lpfc_hba *phba = vport->phba;
1666 IOCB_t *icmd;
1667 struct lpfc_iocbq *elsiocb;
1668 struct lpfc_sli_ring *pring;
1669 struct lpfc_sli *psli;
1670 FARP *fp;
1671 uint8_t *pcmd;
1672 uint32_t *lp;
1673 uint16_t cmdsize;
1674 struct lpfc_nodelist *ondlp;
1675 struct lpfc_nodelist *ndlp;
1677 psli = &phba->sli;
1678 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1679 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
1681 ndlp = lpfc_findnode_did(vport, nportid);
1682 if (!ndlp) {
1683 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1684 if (!ndlp)
1685 return 1;
1686 lpfc_nlp_init(vport, ndlp, nportid);
1687 lpfc_enqueue_node(vport, ndlp);
1688 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1689 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1690 if (!ndlp)
1691 return 1;
1694 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1695 ndlp->nlp_DID, ELS_CMD_RNID);
1696 if (!elsiocb) {
1697 /* This will trigger the release of the node just
1698 * allocated
1700 lpfc_nlp_put(ndlp);
1701 return 1;
1704 icmd = &elsiocb->iocb;
1705 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1707 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1708 pcmd += sizeof(uint32_t);
1710 /* Fill in FARPR payload */
1711 fp = (FARP *) (pcmd);
1712 memset(fp, 0, sizeof(FARP));
1713 lp = (uint32_t *) pcmd;
1714 *lp++ = be32_to_cpu(nportid);
1715 *lp++ = be32_to_cpu(vport->fc_myDID);
1716 fp->Rflags = 0;
1717 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1719 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
1720 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1721 ondlp = lpfc_findnode_did(vport, nportid);
1722 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
1723 memcpy(&fp->OportName, &ondlp->nlp_portname,
1724 sizeof(struct lpfc_name));
1725 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1726 sizeof(struct lpfc_name));
1729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1730 "Issue FARPR: did:x%x",
1731 ndlp->nlp_DID, 0, 0);
1733 phba->fc_stat.elsXmitFARPR++;
1734 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1735 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1736 /* The additional lpfc_nlp_put will cause the following
1737 * lpfc_els_free_iocb routine to trigger the release of
1738 * the node.
1740 lpfc_nlp_put(ndlp);
1741 lpfc_els_free_iocb(phba, elsiocb);
1742 return 1;
1744 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1745 * trigger the release of the node.
1747 lpfc_nlp_put(ndlp);
1748 return 0;
1751 void
1752 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1755 struct lpfc_work_evt *evtp;
1757 spin_lock_irq(shost->host_lock);
1758 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1759 spin_unlock_irq(shost->host_lock);
1760 del_timer_sync(&nlp->nlp_delayfunc);
1761 nlp->nlp_last_elscmd = 0;
1763 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
1764 list_del_init(&nlp->els_retry_evt.evt_listp);
1765 /* Decrement nlp reference count held for the delayed retry */
1766 evtp = &nlp->els_retry_evt;
1767 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
1770 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1771 spin_lock_irq(shost->host_lock);
1772 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1773 spin_unlock_irq(shost->host_lock);
1774 if (vport->num_disc_nodes) {
1775 /* Check to see if there are more
1776 * PLOGIs to be sent
1778 lpfc_more_plogi(vport);
1780 if (vport->num_disc_nodes == 0) {
1781 spin_lock_irq(shost->host_lock);
1782 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1783 spin_unlock_irq(shost->host_lock);
1784 lpfc_can_disctmo(vport);
1785 lpfc_end_rscn(vport);
1789 return;
1792 void
1793 lpfc_els_retry_delay(unsigned long ptr)
1795 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
1796 struct lpfc_vport *vport = ndlp->vport;
1797 struct lpfc_hba *phba = vport->phba;
1798 unsigned long flags;
1799 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1801 ndlp = (struct lpfc_nodelist *) ptr;
1802 phba = ndlp->vport->phba;
1803 evtp = &ndlp->els_retry_evt;
1805 spin_lock_irqsave(&phba->hbalock, flags);
1806 if (!list_empty(&evtp->evt_listp)) {
1807 spin_unlock_irqrestore(&phba->hbalock, flags);
1808 return;
1811 /* We need to hold the node by incrementing the reference
1812 * count until the queued work is done
1814 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
1815 evtp->evt = LPFC_EVT_ELS_RETRY;
1816 list_add_tail(&evtp->evt_listp, &phba->work_list);
1817 if (phba->work_wait)
1818 lpfc_worker_wake_up(phba);
1820 spin_unlock_irqrestore(&phba->hbalock, flags);
1821 return;
1824 void
1825 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1827 struct lpfc_vport *vport = ndlp->vport;
1828 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1829 uint32_t cmd, did, retry;
1831 spin_lock_irq(shost->host_lock);
1832 did = ndlp->nlp_DID;
1833 cmd = ndlp->nlp_last_elscmd;
1834 ndlp->nlp_last_elscmd = 0;
1836 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1837 spin_unlock_irq(shost->host_lock);
1838 return;
1841 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1842 spin_unlock_irq(shost->host_lock);
1844 * If a discovery event readded nlp_delayfunc after timer
1845 * firing and before processing the timer, cancel the
1846 * nlp_delayfunc.
1848 del_timer_sync(&ndlp->nlp_delayfunc);
1849 retry = ndlp->nlp_retry;
1851 switch (cmd) {
1852 case ELS_CMD_FLOGI:
1853 lpfc_issue_els_flogi(vport, ndlp, retry);
1854 break;
1855 case ELS_CMD_PLOGI:
1856 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
1857 ndlp->nlp_prev_state = ndlp->nlp_state;
1858 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1860 break;
1861 case ELS_CMD_ADISC:
1862 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
1863 ndlp->nlp_prev_state = ndlp->nlp_state;
1864 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1866 break;
1867 case ELS_CMD_PRLI:
1868 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
1869 ndlp->nlp_prev_state = ndlp->nlp_state;
1870 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1872 break;
1873 case ELS_CMD_LOGO:
1874 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
1875 ndlp->nlp_prev_state = ndlp->nlp_state;
1876 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1878 break;
1879 case ELS_CMD_FDISC:
1880 lpfc_issue_els_fdisc(vport, ndlp, retry);
1881 break;
1883 return;
1886 static int
1887 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1888 struct lpfc_iocbq *rspiocb)
1890 struct lpfc_vport *vport = cmdiocb->vport;
1891 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1892 IOCB_t *irsp = &rspiocb->iocb;
1893 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1894 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1895 uint32_t *elscmd;
1896 struct ls_rjt stat;
1897 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
1898 int logerr = 0;
1899 uint32_t cmd = 0;
1900 uint32_t did;
1903 /* Note: context2 may be 0 for internal driver abort
1904 * of delays ELS command.
1907 if (pcmd && pcmd->virt) {
1908 elscmd = (uint32_t *) (pcmd->virt);
1909 cmd = *elscmd++;
1912 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
1913 did = ndlp->nlp_DID;
1914 else {
1915 /* We should only hit this case for retrying PLOGI */
1916 did = irsp->un.elsreq64.remoteID;
1917 ndlp = lpfc_findnode_did(vport, did);
1918 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
1919 && (cmd != ELS_CMD_PLOGI))
1920 return 1;
1923 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1924 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
1925 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
1927 switch (irsp->ulpStatus) {
1928 case IOSTAT_FCP_RSP_ERROR:
1929 case IOSTAT_REMOTE_STOP:
1930 break;
1932 case IOSTAT_LOCAL_REJECT:
1933 switch ((irsp->un.ulpWord[4] & 0xff)) {
1934 case IOERR_LOOP_OPEN_FAILURE:
1935 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
1936 delay = 1000;
1937 retry = 1;
1938 break;
1940 case IOERR_ILLEGAL_COMMAND:
1941 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1942 "0124 Retry illegal cmd x%x "
1943 "retry:x%x delay:x%x\n",
1944 cmd, cmdiocb->retry, delay);
1945 retry = 1;
1946 /* All command's retry policy */
1947 maxretry = 8;
1948 if (cmdiocb->retry > 2)
1949 delay = 1000;
1950 break;
1952 case IOERR_NO_RESOURCES:
1953 logerr = 1; /* HBA out of resources */
1954 retry = 1;
1955 if (cmdiocb->retry > 100)
1956 delay = 100;
1957 maxretry = 250;
1958 break;
1960 case IOERR_ILLEGAL_FRAME:
1961 delay = 100;
1962 retry = 1;
1963 break;
1965 case IOERR_SEQUENCE_TIMEOUT:
1966 case IOERR_INVALID_RPI:
1967 retry = 1;
1968 break;
1970 break;
1972 case IOSTAT_NPORT_RJT:
1973 case IOSTAT_FABRIC_RJT:
1974 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
1975 retry = 1;
1976 break;
1978 break;
1980 case IOSTAT_NPORT_BSY:
1981 case IOSTAT_FABRIC_BSY:
1982 logerr = 1; /* Fabric / Remote NPort out of resources */
1983 retry = 1;
1984 break;
1986 case IOSTAT_LS_RJT:
1987 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
1988 /* Added for Vendor specifc support
1989 * Just keep retrying for these Rsn / Exp codes
1991 switch (stat.un.b.lsRjtRsnCode) {
1992 case LSRJT_UNABLE_TPC:
1993 if (stat.un.b.lsRjtRsnCodeExp ==
1994 LSEXP_CMD_IN_PROGRESS) {
1995 if (cmd == ELS_CMD_PLOGI) {
1996 delay = 1000;
1997 maxretry = 48;
1999 retry = 1;
2000 break;
2002 if (cmd == ELS_CMD_PLOGI) {
2003 delay = 1000;
2004 maxretry = lpfc_max_els_tries + 1;
2005 retry = 1;
2006 break;
2008 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2009 (cmd == ELS_CMD_FDISC) &&
2010 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
2011 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2012 "0125 FDISC Failed (x%x). "
2013 "Fabric out of resources\n",
2014 stat.un.lsRjtError);
2015 lpfc_vport_set_state(vport,
2016 FC_VPORT_NO_FABRIC_RSCS);
2018 break;
2020 case LSRJT_LOGICAL_BSY:
2021 if ((cmd == ELS_CMD_PLOGI) ||
2022 (cmd == ELS_CMD_PRLI)) {
2023 delay = 1000;
2024 maxretry = 48;
2025 } else if (cmd == ELS_CMD_FDISC) {
2026 /* FDISC retry policy */
2027 maxretry = 48;
2028 if (cmdiocb->retry >= 32)
2029 delay = 1000;
2031 retry = 1;
2032 break;
2034 case LSRJT_LOGICAL_ERR:
2035 /* There are some cases where switches return this
2036 * error when they are not ready and should be returning
2037 * Logical Busy. We should delay every time.
2039 if (cmd == ELS_CMD_FDISC &&
2040 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2041 maxretry = 3;
2042 delay = 1000;
2043 retry = 1;
2044 break;
2046 case LSRJT_PROTOCOL_ERR:
2047 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2048 (cmd == ELS_CMD_FDISC) &&
2049 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
2050 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2052 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2053 "0123 FDISC Failed (x%x). "
2054 "Fabric Detected Bad WWN\n",
2055 stat.un.lsRjtError);
2056 lpfc_vport_set_state(vport,
2057 FC_VPORT_FABRIC_REJ_WWN);
2059 break;
2061 break;
2063 case IOSTAT_INTERMED_RSP:
2064 case IOSTAT_BA_RJT:
2065 break;
2067 default:
2068 break;
2071 if (did == FDMI_DID)
2072 retry = 1;
2074 if ((cmd == ELS_CMD_FLOGI) &&
2075 (phba->fc_topology != TOPOLOGY_LOOP) &&
2076 !lpfc_error_lost_link(irsp)) {
2077 /* FLOGI retry policy */
2078 retry = 1;
2079 maxretry = 48;
2080 if (cmdiocb->retry >= 32)
2081 delay = 1000;
2084 if ((++cmdiocb->retry) >= maxretry) {
2085 phba->fc_stat.elsRetryExceeded++;
2086 retry = 0;
2089 if ((vport->load_flag & FC_UNLOADING) != 0)
2090 retry = 0;
2092 if (retry) {
2094 /* Retry ELS command <elsCmd> to remote NPORT <did> */
2095 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2096 "0107 Retry ELS command x%x to remote "
2097 "NPORT x%x Data: x%x x%x\n",
2098 cmd, did, cmdiocb->retry, delay);
2100 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
2101 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
2102 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
2103 /* Don't reset timer for no resources */
2105 /* If discovery / RSCN timer is running, reset it */
2106 if (timer_pending(&vport->fc_disctmo) ||
2107 (vport->fc_flag & FC_RSCN_MODE))
2108 lpfc_set_disctmo(vport);
2111 phba->fc_stat.elsXmitRetry++;
2112 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
2113 phba->fc_stat.elsDelayRetry++;
2114 ndlp->nlp_retry = cmdiocb->retry;
2116 /* delay is specified in milliseconds */
2117 mod_timer(&ndlp->nlp_delayfunc,
2118 jiffies + msecs_to_jiffies(delay));
2119 spin_lock_irq(shost->host_lock);
2120 ndlp->nlp_flag |= NLP_DELAY_TMO;
2121 spin_unlock_irq(shost->host_lock);
2123 ndlp->nlp_prev_state = ndlp->nlp_state;
2124 if (cmd == ELS_CMD_PRLI)
2125 lpfc_nlp_set_state(vport, ndlp,
2126 NLP_STE_REG_LOGIN_ISSUE);
2127 else
2128 lpfc_nlp_set_state(vport, ndlp,
2129 NLP_STE_NPR_NODE);
2130 ndlp->nlp_last_elscmd = cmd;
2132 return 1;
2134 switch (cmd) {
2135 case ELS_CMD_FLOGI:
2136 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2137 return 1;
2138 case ELS_CMD_FDISC:
2139 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2140 return 1;
2141 case ELS_CMD_PLOGI:
2142 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2143 ndlp->nlp_prev_state = ndlp->nlp_state;
2144 lpfc_nlp_set_state(vport, ndlp,
2145 NLP_STE_PLOGI_ISSUE);
2147 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
2148 return 1;
2149 case ELS_CMD_ADISC:
2150 ndlp->nlp_prev_state = ndlp->nlp_state;
2151 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2152 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
2153 return 1;
2154 case ELS_CMD_PRLI:
2155 ndlp->nlp_prev_state = ndlp->nlp_state;
2156 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2157 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
2158 return 1;
2159 case ELS_CMD_LOGO:
2160 ndlp->nlp_prev_state = ndlp->nlp_state;
2161 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2162 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
2163 return 1;
2166 /* No retry ELS command <elsCmd> to remote NPORT <did> */
2167 if (logerr) {
2168 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2169 "0137 No retry ELS command x%x to remote "
2170 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
2171 cmd, did, irsp->ulpStatus,
2172 irsp->un.ulpWord[4]);
2174 else {
2175 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2176 "0108 No retry ELS command x%x to remote "
2177 "NPORT x%x Retried:%d Error:x%x/%x\n",
2178 cmd, did, cmdiocb->retry, irsp->ulpStatus,
2179 irsp->un.ulpWord[4]);
2181 return 0;
2184 static int
2185 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2187 struct lpfc_dmabuf *buf_ptr;
2189 /* Free the response before processing the command. */
2190 if (!list_empty(&buf_ptr1->list)) {
2191 list_remove_head(&buf_ptr1->list, buf_ptr,
2192 struct lpfc_dmabuf,
2193 list);
2194 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2195 kfree(buf_ptr);
2197 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2198 kfree(buf_ptr1);
2199 return 0;
2202 static int
2203 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2205 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2206 kfree(buf_ptr);
2207 return 0;
2211 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
2213 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2214 struct lpfc_nodelist *ndlp;
2216 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
2217 if (ndlp) {
2218 if (ndlp->nlp_flag & NLP_DEFER_RM) {
2219 lpfc_nlp_put(ndlp);
2221 /* If the ndlp is not being used by another discovery
2222 * thread, free it.
2224 if (!lpfc_nlp_not_used(ndlp)) {
2225 /* If ndlp is being used by another discovery
2226 * thread, just clear NLP_DEFER_RM
2228 ndlp->nlp_flag &= ~NLP_DEFER_RM;
2231 else
2232 lpfc_nlp_put(ndlp);
2233 elsiocb->context1 = NULL;
2235 /* context2 = cmd, context2->next = rsp, context3 = bpl */
2236 if (elsiocb->context2) {
2237 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
2238 /* Firmware could still be in progress of DMAing
2239 * payload, so don't free data buffer till after
2240 * a hbeat.
2242 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
2243 buf_ptr = elsiocb->context2;
2244 elsiocb->context2 = NULL;
2245 if (buf_ptr) {
2246 buf_ptr1 = NULL;
2247 spin_lock_irq(&phba->hbalock);
2248 if (!list_empty(&buf_ptr->list)) {
2249 list_remove_head(&buf_ptr->list,
2250 buf_ptr1, struct lpfc_dmabuf,
2251 list);
2252 INIT_LIST_HEAD(&buf_ptr1->list);
2253 list_add_tail(&buf_ptr1->list,
2254 &phba->elsbuf);
2255 phba->elsbuf_cnt++;
2257 INIT_LIST_HEAD(&buf_ptr->list);
2258 list_add_tail(&buf_ptr->list, &phba->elsbuf);
2259 phba->elsbuf_cnt++;
2260 spin_unlock_irq(&phba->hbalock);
2262 } else {
2263 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
2264 lpfc_els_free_data(phba, buf_ptr1);
2268 if (elsiocb->context3) {
2269 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
2270 lpfc_els_free_bpl(phba, buf_ptr);
2272 lpfc_sli_release_iocbq(phba, elsiocb);
2273 return 0;
2276 static void
2277 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2278 struct lpfc_iocbq *rspiocb)
2280 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2281 struct lpfc_vport *vport = cmdiocb->vport;
2282 IOCB_t *irsp;
2284 irsp = &rspiocb->iocb;
2285 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2286 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
2287 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
2288 /* ACC to LOGO completes to NPort <nlp_DID> */
2289 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2290 "0109 ACC to LOGO completes to NPort x%x "
2291 "Data: x%x x%x x%x\n",
2292 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2293 ndlp->nlp_rpi);
2295 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
2296 /* NPort Recovery mode or node is just allocated */
2297 if (!lpfc_nlp_not_used(ndlp)) {
2298 /* If the ndlp is being used by another discovery
2299 * thread, just unregister the RPI.
2301 lpfc_unreg_rpi(vport, ndlp);
2302 } else {
2303 /* Indicate the node has already released, should
2304 * not reference to it from within lpfc_els_free_iocb.
2306 cmdiocb->context1 = NULL;
2309 lpfc_els_free_iocb(phba, cmdiocb);
2310 return;
2313 void
2314 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2316 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2317 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2319 pmb->context1 = NULL;
2320 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2321 kfree(mp);
2322 mempool_free(pmb, phba->mbox_mem_pool);
2323 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2324 lpfc_nlp_put(ndlp);
2325 /* This is the end of the default RPI cleanup logic for this
2326 * ndlp. If no other discovery threads are using this ndlp.
2327 * we should free all resources associated with it.
2329 lpfc_nlp_not_used(ndlp);
2331 return;
2334 static void
2335 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2336 struct lpfc_iocbq *rspiocb)
2338 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2339 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
2340 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
2341 IOCB_t *irsp;
2342 uint8_t *pcmd;
2343 LPFC_MBOXQ_t *mbox = NULL;
2344 struct lpfc_dmabuf *mp = NULL;
2345 uint32_t ls_rjt = 0;
2347 irsp = &rspiocb->iocb;
2349 if (cmdiocb->context_un.mbox)
2350 mbox = cmdiocb->context_un.mbox;
2352 /* First determine if this is a LS_RJT cmpl. Note, this callback
2353 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
2355 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
2356 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
2357 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
2358 /* A LS_RJT associated with Default RPI cleanup has its own
2359 * seperate code path.
2361 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2362 ls_rjt = 1;
2365 /* Check to see if link went down during discovery */
2366 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
2367 if (mbox) {
2368 mp = (struct lpfc_dmabuf *) mbox->context1;
2369 if (mp) {
2370 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2371 kfree(mp);
2373 mempool_free(mbox, phba->mbox_mem_pool);
2375 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
2376 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2377 if (lpfc_nlp_not_used(ndlp)) {
2378 ndlp = NULL;
2379 /* Indicate the node has already released,
2380 * should not reference to it from within
2381 * the routine lpfc_els_free_iocb.
2383 cmdiocb->context1 = NULL;
2385 goto out;
2388 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2389 "ELS rsp cmpl: status:x%x/x%x did:x%x",
2390 irsp->ulpStatus, irsp->un.ulpWord[4],
2391 cmdiocb->iocb.un.elsreq64.remoteID);
2392 /* ELS response tag <ulpIoTag> completes */
2393 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2394 "0110 ELS response tag x%x completes "
2395 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
2396 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
2397 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
2398 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2399 ndlp->nlp_rpi);
2400 if (mbox) {
2401 if ((rspiocb->iocb.ulpStatus == 0)
2402 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2403 lpfc_unreg_rpi(vport, ndlp);
2404 /* Increment reference count to ndlp to hold the
2405 * reference to ndlp for the callback function.
2407 mbox->context2 = lpfc_nlp_get(ndlp);
2408 mbox->vport = vport;
2409 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
2410 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2411 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2413 else {
2414 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
2415 ndlp->nlp_prev_state = ndlp->nlp_state;
2416 lpfc_nlp_set_state(vport, ndlp,
2417 NLP_STE_REG_LOGIN_ISSUE);
2419 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
2420 != MBX_NOT_FINISHED)
2421 goto out;
2422 else
2423 /* Decrement the ndlp reference count we
2424 * set for this failed mailbox command.
2426 lpfc_nlp_put(ndlp);
2428 /* ELS rsp: Cannot issue reg_login for <NPortid> */
2429 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2430 "0138 ELS rsp: Cannot issue reg_login for x%x "
2431 "Data: x%x x%x x%x\n",
2432 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2433 ndlp->nlp_rpi);
2435 if (lpfc_nlp_not_used(ndlp)) {
2436 ndlp = NULL;
2437 /* Indicate node has already been released,
2438 * should not reference to it from within
2439 * the routine lpfc_els_free_iocb.
2441 cmdiocb->context1 = NULL;
2443 } else {
2444 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
2445 if (!lpfc_error_lost_link(irsp) &&
2446 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
2447 if (lpfc_nlp_not_used(ndlp)) {
2448 ndlp = NULL;
2449 /* Indicate node has already been
2450 * released, should not reference
2451 * to it from within the routine
2452 * lpfc_els_free_iocb.
2454 cmdiocb->context1 = NULL;
2458 mp = (struct lpfc_dmabuf *) mbox->context1;
2459 if (mp) {
2460 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2461 kfree(mp);
2463 mempool_free(mbox, phba->mbox_mem_pool);
2465 out:
2466 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2467 spin_lock_irq(shost->host_lock);
2468 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2469 spin_unlock_irq(shost->host_lock);
2471 /* If the node is not being used by another discovery thread,
2472 * and we are sending a reject, we are done with it.
2473 * Release driver reference count here and free associated
2474 * resources.
2476 if (ls_rjt)
2477 if (lpfc_nlp_not_used(ndlp))
2478 /* Indicate node has already been released,
2479 * should not reference to it from within
2480 * the routine lpfc_els_free_iocb.
2482 cmdiocb->context1 = NULL;
2485 lpfc_els_free_iocb(phba, cmdiocb);
2486 return;
2490 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
2491 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2492 LPFC_MBOXQ_t *mbox)
2494 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2495 struct lpfc_hba *phba = vport->phba;
2496 IOCB_t *icmd;
2497 IOCB_t *oldcmd;
2498 struct lpfc_iocbq *elsiocb;
2499 struct lpfc_sli_ring *pring;
2500 struct lpfc_sli *psli;
2501 uint8_t *pcmd;
2502 uint16_t cmdsize;
2503 int rc;
2504 ELS_PKT *els_pkt_ptr;
2506 psli = &phba->sli;
2507 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2508 oldcmd = &oldiocb->iocb;
2510 switch (flag) {
2511 case ELS_CMD_ACC:
2512 cmdsize = sizeof(uint32_t);
2513 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2514 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2515 if (!elsiocb) {
2516 spin_lock_irq(shost->host_lock);
2517 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2518 spin_unlock_irq(shost->host_lock);
2519 return 1;
2522 icmd = &elsiocb->iocb;
2523 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2524 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2525 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2526 pcmd += sizeof(uint32_t);
2528 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2529 "Issue ACC: did:x%x flg:x%x",
2530 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2531 break;
2532 case ELS_CMD_PLOGI:
2533 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2534 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2535 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2536 if (!elsiocb)
2537 return 1;
2539 icmd = &elsiocb->iocb;
2540 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2541 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2543 if (mbox)
2544 elsiocb->context_un.mbox = mbox;
2546 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2547 pcmd += sizeof(uint32_t);
2548 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2550 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2551 "Issue ACC PLOGI: did:x%x flg:x%x",
2552 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2553 break;
2554 case ELS_CMD_PRLO:
2555 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2556 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2557 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
2558 if (!elsiocb)
2559 return 1;
2561 icmd = &elsiocb->iocb;
2562 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2563 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2565 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
2566 sizeof(uint32_t) + sizeof(PRLO));
2567 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
2568 els_pkt_ptr = (ELS_PKT *) pcmd;
2569 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
2571 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2572 "Issue ACC PRLO: did:x%x flg:x%x",
2573 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2574 break;
2575 default:
2576 return 1;
2578 /* Xmit ELS ACC response tag <ulpIoTag> */
2579 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2580 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
2581 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
2582 elsiocb->iotag, elsiocb->iocb.ulpContext,
2583 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2584 ndlp->nlp_rpi);
2585 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2586 spin_lock_irq(shost->host_lock);
2587 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2588 spin_unlock_irq(shost->host_lock);
2589 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
2590 } else {
2591 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2594 phba->fc_stat.elsXmitACC++;
2595 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2596 if (rc == IOCB_ERROR) {
2597 lpfc_els_free_iocb(phba, elsiocb);
2598 return 1;
2600 return 0;
2604 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2605 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2606 LPFC_MBOXQ_t *mbox)
2608 struct lpfc_hba *phba = vport->phba;
2609 IOCB_t *icmd;
2610 IOCB_t *oldcmd;
2611 struct lpfc_iocbq *elsiocb;
2612 struct lpfc_sli_ring *pring;
2613 struct lpfc_sli *psli;
2614 uint8_t *pcmd;
2615 uint16_t cmdsize;
2616 int rc;
2618 psli = &phba->sli;
2619 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2621 cmdsize = 2 * sizeof(uint32_t);
2622 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2623 ndlp->nlp_DID, ELS_CMD_LS_RJT);
2624 if (!elsiocb)
2625 return 1;
2627 icmd = &elsiocb->iocb;
2628 oldcmd = &oldiocb->iocb;
2629 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2630 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2632 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
2633 pcmd += sizeof(uint32_t);
2634 *((uint32_t *) (pcmd)) = rejectError;
2636 if (mbox)
2637 elsiocb->context_un.mbox = mbox;
2639 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2640 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2641 "0129 Xmit ELS RJT x%x response tag x%x "
2642 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2643 "rpi x%x\n",
2644 rejectError, elsiocb->iotag,
2645 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2646 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2647 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2648 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
2649 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
2651 phba->fc_stat.elsXmitLSRJT++;
2652 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2653 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2655 if (rc == IOCB_ERROR) {
2656 lpfc_els_free_iocb(phba, elsiocb);
2657 return 1;
2659 return 0;
2663 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2664 struct lpfc_nodelist *ndlp)
2666 struct lpfc_hba *phba = vport->phba;
2667 struct lpfc_sli *psli = &phba->sli;
2668 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2669 ADISC *ap;
2670 IOCB_t *icmd, *oldcmd;
2671 struct lpfc_iocbq *elsiocb;
2672 uint8_t *pcmd;
2673 uint16_t cmdsize;
2674 int rc;
2676 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2677 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2678 ndlp->nlp_DID, ELS_CMD_ACC);
2679 if (!elsiocb)
2680 return 1;
2682 icmd = &elsiocb->iocb;
2683 oldcmd = &oldiocb->iocb;
2684 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2686 /* Xmit ADISC ACC response tag <ulpIoTag> */
2687 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2688 "0130 Xmit ADISC ACC response iotag x%x xri: "
2689 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
2690 elsiocb->iotag, elsiocb->iocb.ulpContext,
2691 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2692 ndlp->nlp_rpi);
2693 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2695 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2696 pcmd += sizeof(uint32_t);
2698 ap = (ADISC *) (pcmd);
2699 ap->hardAL_PA = phba->fc_pref_ALPA;
2700 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2701 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2702 ap->DID = be32_to_cpu(vport->fc_myDID);
2704 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2705 "Issue ACC ADISC: did:x%x flg:x%x",
2706 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2708 phba->fc_stat.elsXmitACC++;
2709 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2710 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2711 if (rc == IOCB_ERROR) {
2712 lpfc_els_free_iocb(phba, elsiocb);
2713 return 1;
2715 return 0;
2719 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2720 struct lpfc_nodelist *ndlp)
2722 struct lpfc_hba *phba = vport->phba;
2723 PRLI *npr;
2724 lpfc_vpd_t *vpd;
2725 IOCB_t *icmd;
2726 IOCB_t *oldcmd;
2727 struct lpfc_iocbq *elsiocb;
2728 struct lpfc_sli_ring *pring;
2729 struct lpfc_sli *psli;
2730 uint8_t *pcmd;
2731 uint16_t cmdsize;
2732 int rc;
2734 psli = &phba->sli;
2735 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2737 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2738 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2739 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2740 if (!elsiocb)
2741 return 1;
2743 icmd = &elsiocb->iocb;
2744 oldcmd = &oldiocb->iocb;
2745 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2746 /* Xmit PRLI ACC response tag <ulpIoTag> */
2747 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2748 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
2749 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2750 elsiocb->iotag, elsiocb->iocb.ulpContext,
2751 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2752 ndlp->nlp_rpi);
2753 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2755 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2756 pcmd += sizeof(uint32_t);
2758 /* For PRLI, remainder of payload is PRLI parameter page */
2759 memset(pcmd, 0, sizeof(PRLI));
2761 npr = (PRLI *) pcmd;
2762 vpd = &phba->vpd;
2764 * If our firmware version is 3.20 or later,
2765 * set the following bits for FC-TAPE support.
2767 if (vpd->rev.feaLevelHigh >= 0x02) {
2768 npr->ConfmComplAllowed = 1;
2769 npr->Retry = 1;
2770 npr->TaskRetryIdReq = 1;
2773 npr->acceptRspCode = PRLI_REQ_EXECUTED;
2774 npr->estabImagePair = 1;
2775 npr->readXferRdyDis = 1;
2776 npr->ConfmComplAllowed = 1;
2778 npr->prliType = PRLI_FCP_TYPE;
2779 npr->initiatorFunc = 1;
2781 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2782 "Issue ACC PRLI: did:x%x flg:x%x",
2783 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2785 phba->fc_stat.elsXmitACC++;
2786 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2788 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2789 if (rc == IOCB_ERROR) {
2790 lpfc_els_free_iocb(phba, elsiocb);
2791 return 1;
2793 return 0;
2796 static int
2797 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2798 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2800 struct lpfc_hba *phba = vport->phba;
2801 RNID *rn;
2802 IOCB_t *icmd, *oldcmd;
2803 struct lpfc_iocbq *elsiocb;
2804 struct lpfc_sli_ring *pring;
2805 struct lpfc_sli *psli;
2806 uint8_t *pcmd;
2807 uint16_t cmdsize;
2808 int rc;
2810 psli = &phba->sli;
2811 pring = &psli->ring[LPFC_ELS_RING];
2813 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
2814 + (2 * sizeof(struct lpfc_name));
2815 if (format)
2816 cmdsize += sizeof(RNID_TOP_DISC);
2818 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2819 ndlp->nlp_DID, ELS_CMD_ACC);
2820 if (!elsiocb)
2821 return 1;
2823 icmd = &elsiocb->iocb;
2824 oldcmd = &oldiocb->iocb;
2825 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2826 /* Xmit RNID ACC response tag <ulpIoTag> */
2827 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2828 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
2829 elsiocb->iotag, elsiocb->iocb.ulpContext);
2830 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2831 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2832 pcmd += sizeof(uint32_t);
2834 memset(pcmd, 0, sizeof(RNID));
2835 rn = (RNID *) (pcmd);
2836 rn->Format = format;
2837 rn->CommonLen = (2 * sizeof(struct lpfc_name));
2838 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2839 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2840 switch (format) {
2841 case 0:
2842 rn->SpecificLen = 0;
2843 break;
2844 case RNID_TOPOLOGY_DISC:
2845 rn->SpecificLen = sizeof(RNID_TOP_DISC);
2846 memcpy(&rn->un.topologyDisc.portName,
2847 &vport->fc_portname, sizeof(struct lpfc_name));
2848 rn->un.topologyDisc.unitType = RNID_HBA;
2849 rn->un.topologyDisc.physPort = 0;
2850 rn->un.topologyDisc.attachedNodes = 0;
2851 break;
2852 default:
2853 rn->CommonLen = 0;
2854 rn->SpecificLen = 0;
2855 break;
2858 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2859 "Issue ACC RNID: did:x%x flg:x%x",
2860 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2862 phba->fc_stat.elsXmitACC++;
2863 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2864 lpfc_nlp_put(ndlp);
2865 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2866 * it could be freed */
2868 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2869 if (rc == IOCB_ERROR) {
2870 lpfc_els_free_iocb(phba, elsiocb);
2871 return 1;
2873 return 0;
2877 lpfc_els_disc_adisc(struct lpfc_vport *vport)
2879 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2880 struct lpfc_nodelist *ndlp, *next_ndlp;
2881 int sentadisc = 0;
2883 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2884 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2885 if (!NLP_CHK_NODE_ACT(ndlp))
2886 continue;
2887 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2888 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2889 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2890 spin_lock_irq(shost->host_lock);
2891 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2892 spin_unlock_irq(shost->host_lock);
2893 ndlp->nlp_prev_state = ndlp->nlp_state;
2894 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2895 lpfc_issue_els_adisc(vport, ndlp, 0);
2896 sentadisc++;
2897 vport->num_disc_nodes++;
2898 if (vport->num_disc_nodes >=
2899 vport->cfg_discovery_threads) {
2900 spin_lock_irq(shost->host_lock);
2901 vport->fc_flag |= FC_NLP_MORE;
2902 spin_unlock_irq(shost->host_lock);
2903 break;
2907 if (sentadisc == 0) {
2908 spin_lock_irq(shost->host_lock);
2909 vport->fc_flag &= ~FC_NLP_MORE;
2910 spin_unlock_irq(shost->host_lock);
2912 return sentadisc;
2916 lpfc_els_disc_plogi(struct lpfc_vport *vport)
2918 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2919 struct lpfc_nodelist *ndlp, *next_ndlp;
2920 int sentplogi = 0;
2922 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
2923 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2924 if (!NLP_CHK_NODE_ACT(ndlp))
2925 continue;
2926 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2927 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2928 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
2929 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
2930 ndlp->nlp_prev_state = ndlp->nlp_state;
2931 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2932 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2933 sentplogi++;
2934 vport->num_disc_nodes++;
2935 if (vport->num_disc_nodes >=
2936 vport->cfg_discovery_threads) {
2937 spin_lock_irq(shost->host_lock);
2938 vport->fc_flag |= FC_NLP_MORE;
2939 spin_unlock_irq(shost->host_lock);
2940 break;
2944 if (sentplogi) {
2945 lpfc_set_disctmo(vport);
2947 else {
2948 spin_lock_irq(shost->host_lock);
2949 vport->fc_flag &= ~FC_NLP_MORE;
2950 spin_unlock_irq(shost->host_lock);
2952 return sentplogi;
2955 void
2956 lpfc_els_flush_rscn(struct lpfc_vport *vport)
2958 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2959 struct lpfc_hba *phba = vport->phba;
2960 int i;
2962 spin_lock_irq(shost->host_lock);
2963 if (vport->fc_rscn_flush) {
2964 /* Another thread is walking fc_rscn_id_list on this vport */
2965 spin_unlock_irq(shost->host_lock);
2966 return;
2968 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
2969 vport->fc_rscn_flush = 1;
2970 spin_unlock_irq(shost->host_lock);
2972 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2973 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2974 vport->fc_rscn_id_list[i] = NULL;
2976 spin_lock_irq(shost->host_lock);
2977 vport->fc_rscn_id_cnt = 0;
2978 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2979 spin_unlock_irq(shost->host_lock);
2980 lpfc_can_disctmo(vport);
2981 /* Indicate we are done walking this fc_rscn_id_list */
2982 vport->fc_rscn_flush = 0;
2986 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2988 D_ID ns_did;
2989 D_ID rscn_did;
2990 uint32_t *lp;
2991 uint32_t payload_len, i;
2992 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2994 ns_did.un.word = did;
2996 /* Never match fabric nodes for RSCNs */
2997 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2998 return 0;
3000 /* If we are doing a FULL RSCN rediscovery, match everything */
3001 if (vport->fc_flag & FC_RSCN_DISCOVERY)
3002 return did;
3004 spin_lock_irq(shost->host_lock);
3005 if (vport->fc_rscn_flush) {
3006 /* Another thread is walking fc_rscn_id_list on this vport */
3007 spin_unlock_irq(shost->host_lock);
3008 return 0;
3010 /* Indicate we are walking fc_rscn_id_list on this vport */
3011 vport->fc_rscn_flush = 1;
3012 spin_unlock_irq(shost->host_lock);
3013 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
3014 lp = vport->fc_rscn_id_list[i]->virt;
3015 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
3016 payload_len -= sizeof(uint32_t); /* take off word 0 */
3017 while (payload_len) {
3018 rscn_did.un.word = be32_to_cpu(*lp++);
3019 payload_len -= sizeof(uint32_t);
3020 switch (rscn_did.un.b.resv) {
3021 case 0: /* Single N_Port ID effected */
3022 if (ns_did.un.word == rscn_did.un.word)
3023 goto return_did_out;
3024 break;
3025 case 1: /* Whole N_Port Area effected */
3026 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3027 && (ns_did.un.b.area == rscn_did.un.b.area))
3028 goto return_did_out;
3029 break;
3030 case 2: /* Whole N_Port Domain effected */
3031 if (ns_did.un.b.domain == rscn_did.un.b.domain)
3032 goto return_did_out;
3033 break;
3034 default:
3035 /* Unknown Identifier in RSCN node */
3036 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3037 "0217 Unknown Identifier in "
3038 "RSCN payload Data: x%x\n",
3039 rscn_did.un.word);
3040 case 3: /* Whole Fabric effected */
3041 goto return_did_out;
3045 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3046 vport->fc_rscn_flush = 0;
3047 return 0;
3048 return_did_out:
3049 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3050 vport->fc_rscn_flush = 0;
3051 return did;
3054 static int
3055 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3057 struct lpfc_nodelist *ndlp = NULL;
3059 /* Look at all nodes effected by pending RSCNs and move
3060 * them to NPR state.
3063 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3064 if (!NLP_CHK_NODE_ACT(ndlp) ||
3065 ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
3066 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
3067 continue;
3069 lpfc_disc_state_machine(vport, ndlp, NULL,
3070 NLP_EVT_DEVICE_RECOVERY);
3073 * Make sure NLP_DELAY_TMO is NOT running after a device
3074 * recovery event.
3076 if (ndlp->nlp_flag & NLP_DELAY_TMO)
3077 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3080 return 0;
3083 static int
3084 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3085 struct lpfc_nodelist *ndlp)
3087 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3088 struct lpfc_hba *phba = vport->phba;
3089 struct lpfc_dmabuf *pcmd;
3090 uint32_t *lp, *datap;
3091 IOCB_t *icmd;
3092 uint32_t payload_len, length, nportid, *cmd;
3093 int rscn_cnt;
3094 int rscn_id = 0, hba_id = 0;
3095 int i;
3097 icmd = &cmdiocb->iocb;
3098 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3099 lp = (uint32_t *) pcmd->virt;
3101 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
3102 payload_len -= sizeof(uint32_t); /* take off word 0 */
3103 /* RSCN received */
3104 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3105 "0214 RSCN received Data: x%x x%x x%x x%x\n",
3106 vport->fc_flag, payload_len, *lp,
3107 vport->fc_rscn_id_cnt);
3108 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
3109 fc_host_post_event(shost, fc_get_event_number(),
3110 FCH_EVT_RSCN, lp[i]);
3112 /* If we are about to begin discovery, just ACC the RSCN.
3113 * Discovery processing will satisfy it.
3115 if (vport->port_state <= LPFC_NS_QRY) {
3116 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3117 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
3118 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3120 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3121 return 0;
3124 /* If this RSCN just contains NPortIDs for other vports on this HBA,
3125 * just ACC and ignore it.
3127 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3128 !(vport->cfg_peer_port_login)) {
3129 i = payload_len;
3130 datap = lp;
3131 while (i > 0) {
3132 nportid = *datap++;
3133 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
3134 i -= sizeof(uint32_t);
3135 rscn_id++;
3136 if (lpfc_find_vport_by_did(phba, nportid))
3137 hba_id++;
3139 if (rscn_id == hba_id) {
3140 /* ALL NPortIDs in RSCN are on HBA */
3141 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3142 "0214 Ignore RSCN "
3143 "Data: x%x x%x x%x x%x\n",
3144 vport->fc_flag, payload_len,
3145 *lp, vport->fc_rscn_id_cnt);
3146 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3147 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
3148 ndlp->nlp_DID, vport->port_state,
3149 ndlp->nlp_flag);
3151 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
3152 ndlp, NULL);
3153 return 0;
3157 spin_lock_irq(shost->host_lock);
3158 if (vport->fc_rscn_flush) {
3159 /* Another thread is walking fc_rscn_id_list on this vport */
3160 spin_unlock_irq(shost->host_lock);
3161 vport->fc_flag |= FC_RSCN_DISCOVERY;
3162 /* Send back ACC */
3163 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3164 return 0;
3166 /* Indicate we are walking fc_rscn_id_list on this vport */
3167 vport->fc_rscn_flush = 1;
3168 spin_unlock_irq(shost->host_lock);
3169 /* Get the array count after sucessfully have the token */
3170 rscn_cnt = vport->fc_rscn_id_cnt;
3171 /* If we are already processing an RSCN, save the received
3172 * RSCN payload buffer, cmdiocb->context2 to process later.
3174 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
3175 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3176 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
3177 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3179 spin_lock_irq(shost->host_lock);
3180 vport->fc_flag |= FC_RSCN_DEFERRED;
3181 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
3182 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
3183 vport->fc_flag |= FC_RSCN_MODE;
3184 spin_unlock_irq(shost->host_lock);
3185 if (rscn_cnt) {
3186 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
3187 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
3189 if ((rscn_cnt) &&
3190 (payload_len + length <= LPFC_BPL_SIZE)) {
3191 *cmd &= ELS_CMD_MASK;
3192 *cmd |= cpu_to_be32(payload_len + length);
3193 memcpy(((uint8_t *)cmd) + length, lp,
3194 payload_len);
3195 } else {
3196 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
3197 vport->fc_rscn_id_cnt++;
3198 /* If we zero, cmdiocb->context2, the calling
3199 * routine will not try to free it.
3201 cmdiocb->context2 = NULL;
3203 /* Deferred RSCN */
3204 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3205 "0235 Deferred RSCN "
3206 "Data: x%x x%x x%x\n",
3207 vport->fc_rscn_id_cnt, vport->fc_flag,
3208 vport->port_state);
3209 } else {
3210 vport->fc_flag |= FC_RSCN_DISCOVERY;
3211 spin_unlock_irq(shost->host_lock);
3212 /* ReDiscovery RSCN */
3213 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3214 "0234 ReDiscovery RSCN "
3215 "Data: x%x x%x x%x\n",
3216 vport->fc_rscn_id_cnt, vport->fc_flag,
3217 vport->port_state);
3219 /* Indicate we are done walking fc_rscn_id_list on this vport */
3220 vport->fc_rscn_flush = 0;
3221 /* Send back ACC */
3222 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3223 /* send RECOVERY event for ALL nodes that match RSCN payload */
3224 lpfc_rscn_recovery_check(vport);
3225 spin_lock_irq(shost->host_lock);
3226 vport->fc_flag &= ~FC_RSCN_DEFERRED;
3227 spin_unlock_irq(shost->host_lock);
3228 return 0;
3230 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3231 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
3232 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3234 spin_lock_irq(shost->host_lock);
3235 vport->fc_flag |= FC_RSCN_MODE;
3236 spin_unlock_irq(shost->host_lock);
3237 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
3238 /* Indicate we are done walking fc_rscn_id_list on this vport */
3239 vport->fc_rscn_flush = 0;
3241 * If we zero, cmdiocb->context2, the calling routine will
3242 * not try to free it.
3244 cmdiocb->context2 = NULL;
3245 lpfc_set_disctmo(vport);
3246 /* Send back ACC */
3247 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3248 /* send RECOVERY event for ALL nodes that match RSCN payload */
3249 lpfc_rscn_recovery_check(vport);
3250 return lpfc_els_handle_rscn(vport);
3254 lpfc_els_handle_rscn(struct lpfc_vport *vport)
3256 struct lpfc_nodelist *ndlp;
3257 struct lpfc_hba *phba = vport->phba;
3259 /* Ignore RSCN if the port is being torn down. */
3260 if (vport->load_flag & FC_UNLOADING) {
3261 lpfc_els_flush_rscn(vport);
3262 return 0;
3265 /* Start timer for RSCN processing */
3266 lpfc_set_disctmo(vport);
3268 /* RSCN processed */
3269 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3270 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
3271 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
3272 vport->port_state);
3274 /* To process RSCN, first compare RSCN data with NameServer */
3275 vport->fc_ns_retry = 0;
3276 vport->num_disc_nodes = 0;
3278 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3279 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
3280 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
3281 /* Good ndlp, issue CT Request to NameServer */
3282 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
3283 /* Wait for NameServer query cmpl before we can
3284 continue */
3285 return 1;
3286 } else {
3287 /* If login to NameServer does not exist, issue one */
3288 /* Good status, issue PLOGI to NameServer */
3289 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3290 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3291 /* Wait for NameServer login cmpl before we can
3292 continue */
3293 return 1;
3295 if (ndlp) {
3296 ndlp = lpfc_enable_node(vport, ndlp,
3297 NLP_STE_PLOGI_ISSUE);
3298 if (!ndlp) {
3299 lpfc_els_flush_rscn(vport);
3300 return 0;
3302 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
3303 } else {
3304 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3305 if (!ndlp) {
3306 lpfc_els_flush_rscn(vport);
3307 return 0;
3309 lpfc_nlp_init(vport, ndlp, NameServer_DID);
3310 ndlp->nlp_prev_state = ndlp->nlp_state;
3311 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3313 ndlp->nlp_type |= NLP_FABRIC;
3314 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
3315 /* Wait for NameServer login cmpl before we can
3316 * continue
3318 return 1;
3321 lpfc_els_flush_rscn(vport);
3322 return 0;
3325 static int
3326 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3327 struct lpfc_nodelist *ndlp)
3329 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3330 struct lpfc_hba *phba = vport->phba;
3331 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3332 uint32_t *lp = (uint32_t *) pcmd->virt;
3333 IOCB_t *icmd = &cmdiocb->iocb;
3334 struct serv_parm *sp;
3335 LPFC_MBOXQ_t *mbox;
3336 struct ls_rjt stat;
3337 uint32_t cmd, did;
3338 int rc;
3340 cmd = *lp++;
3341 sp = (struct serv_parm *) lp;
3343 /* FLOGI received */
3345 lpfc_set_disctmo(vport);
3347 if (phba->fc_topology == TOPOLOGY_LOOP) {
3348 /* We should never receive a FLOGI in loop mode, ignore it */
3349 did = icmd->un.elsreq64.remoteID;
3351 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
3352 Loop Mode */
3353 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3354 "0113 An FLOGI ELS command x%x was "
3355 "received from DID x%x in Loop Mode\n",
3356 cmd, did);
3357 return 1;
3360 did = Fabric_DID;
3362 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
3363 /* For a FLOGI we accept, then if our portname is greater
3364 * then the remote portname we initiate Nport login.
3367 rc = memcmp(&vport->fc_portname, &sp->portName,
3368 sizeof(struct lpfc_name));
3370 if (!rc) {
3371 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3372 if (!mbox)
3373 return 1;
3375 lpfc_linkdown(phba);
3376 lpfc_init_link(phba, mbox,
3377 phba->cfg_topology,
3378 phba->cfg_link_speed);
3379 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
3380 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3381 mbox->vport = vport;
3382 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3383 lpfc_set_loopback_flag(phba);
3384 if (rc == MBX_NOT_FINISHED) {
3385 mempool_free(mbox, phba->mbox_mem_pool);
3387 return 1;
3388 } else if (rc > 0) { /* greater than */
3389 spin_lock_irq(shost->host_lock);
3390 vport->fc_flag |= FC_PT2PT_PLOGI;
3391 spin_unlock_irq(shost->host_lock);
3393 spin_lock_irq(shost->host_lock);
3394 vport->fc_flag |= FC_PT2PT;
3395 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3396 spin_unlock_irq(shost->host_lock);
3397 } else {
3398 /* Reject this request because invalid parameters */
3399 stat.un.b.lsRjtRsvd0 = 0;
3400 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3401 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
3402 stat.un.b.vendorUnique = 0;
3403 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3404 NULL);
3405 return 1;
3408 /* Send back ACC */
3409 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
3411 return 0;
3414 static int
3415 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3416 struct lpfc_nodelist *ndlp)
3418 struct lpfc_dmabuf *pcmd;
3419 uint32_t *lp;
3420 IOCB_t *icmd;
3421 RNID *rn;
3422 struct ls_rjt stat;
3423 uint32_t cmd, did;
3425 icmd = &cmdiocb->iocb;
3426 did = icmd->un.elsreq64.remoteID;
3427 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3428 lp = (uint32_t *) pcmd->virt;
3430 cmd = *lp++;
3431 rn = (RNID *) lp;
3433 /* RNID received */
3435 switch (rn->Format) {
3436 case 0:
3437 case RNID_TOPOLOGY_DISC:
3438 /* Send back ACC */
3439 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
3440 break;
3441 default:
3442 /* Reject this request because format not supported */
3443 stat.un.b.lsRjtRsvd0 = 0;
3444 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3445 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3446 stat.un.b.vendorUnique = 0;
3447 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3448 NULL);
3450 return 0;
3453 static int
3454 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3455 struct lpfc_nodelist *ndlp)
3457 struct ls_rjt stat;
3459 /* For now, unconditionally reject this command */
3460 stat.un.b.lsRjtRsvd0 = 0;
3461 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3462 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3463 stat.un.b.vendorUnique = 0;
3464 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
3465 return 0;
3468 static void
3469 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3471 struct lpfc_sli *psli = &phba->sli;
3472 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3473 MAILBOX_t *mb;
3474 IOCB_t *icmd;
3475 RPS_RSP *rps_rsp;
3476 uint8_t *pcmd;
3477 struct lpfc_iocbq *elsiocb;
3478 struct lpfc_nodelist *ndlp;
3479 uint16_t xri, status;
3480 uint32_t cmdsize;
3482 mb = &pmb->mb;
3484 ndlp = (struct lpfc_nodelist *) pmb->context2;
3485 xri = (uint16_t) ((unsigned long)(pmb->context1));
3486 pmb->context1 = NULL;
3487 pmb->context2 = NULL;
3489 if (mb->mbxStatus) {
3490 mempool_free(pmb, phba->mbox_mem_pool);
3491 return;
3494 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
3495 mempool_free(pmb, phba->mbox_mem_pool);
3496 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
3497 lpfc_max_els_tries, ndlp,
3498 ndlp->nlp_DID, ELS_CMD_ACC);
3500 /* Decrement the ndlp reference count from previous mbox command */
3501 lpfc_nlp_put(ndlp);
3503 if (!elsiocb)
3504 return;
3506 icmd = &elsiocb->iocb;
3507 icmd->ulpContext = xri;
3509 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3510 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3511 pcmd += sizeof(uint32_t); /* Skip past command */
3512 rps_rsp = (RPS_RSP *)pcmd;
3514 if (phba->fc_topology != TOPOLOGY_LOOP)
3515 status = 0x10;
3516 else
3517 status = 0x8;
3518 if (phba->pport->fc_flag & FC_FABRIC)
3519 status |= 0x4;
3521 rps_rsp->rsvd1 = 0;
3522 rps_rsp->portStatus = cpu_to_be16(status);
3523 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
3524 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
3525 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
3526 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
3527 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
3528 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
3529 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
3530 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
3531 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
3532 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3533 elsiocb->iotag, elsiocb->iocb.ulpContext,
3534 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3535 ndlp->nlp_rpi);
3536 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3537 phba->fc_stat.elsXmitACC++;
3538 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
3539 lpfc_els_free_iocb(phba, elsiocb);
3540 return;
3543 static int
3544 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3545 struct lpfc_nodelist *ndlp)
3547 struct lpfc_hba *phba = vport->phba;
3548 uint32_t *lp;
3549 uint8_t flag;
3550 LPFC_MBOXQ_t *mbox;
3551 struct lpfc_dmabuf *pcmd;
3552 RPS *rps;
3553 struct ls_rjt stat;
3555 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3556 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
3557 stat.un.b.lsRjtRsvd0 = 0;
3558 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3559 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3560 stat.un.b.vendorUnique = 0;
3561 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3562 NULL);
3565 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3566 lp = (uint32_t *) pcmd->virt;
3567 flag = (be32_to_cpu(*lp++) & 0xf);
3568 rps = (RPS *) lp;
3570 if ((flag == 0) ||
3571 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
3572 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
3573 sizeof(struct lpfc_name)) == 0))) {
3575 printk("Fix me....\n");
3576 dump_stack();
3577 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
3578 if (mbox) {
3579 lpfc_read_lnk_stat(phba, mbox);
3580 mbox->context1 =
3581 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
3582 mbox->context2 = lpfc_nlp_get(ndlp);
3583 mbox->vport = vport;
3584 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
3585 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3586 != MBX_NOT_FINISHED)
3587 /* Mbox completion will send ELS Response */
3588 return 0;
3589 /* Decrement reference count used for the failed mbox
3590 * command.
3592 lpfc_nlp_put(ndlp);
3593 mempool_free(mbox, phba->mbox_mem_pool);
3596 stat.un.b.lsRjtRsvd0 = 0;
3597 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3598 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3599 stat.un.b.vendorUnique = 0;
3600 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
3601 return 0;
3604 static int
3605 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
3606 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
3608 struct lpfc_hba *phba = vport->phba;
3609 IOCB_t *icmd, *oldcmd;
3610 RPL_RSP rpl_rsp;
3611 struct lpfc_iocbq *elsiocb;
3612 struct lpfc_sli *psli = &phba->sli;
3613 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3614 uint8_t *pcmd;
3616 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3617 ndlp->nlp_DID, ELS_CMD_ACC);
3619 if (!elsiocb)
3620 return 1;
3622 icmd = &elsiocb->iocb;
3623 oldcmd = &oldiocb->iocb;
3624 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3626 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3627 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3628 pcmd += sizeof(uint16_t);
3629 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
3630 pcmd += sizeof(uint16_t);
3632 /* Setup the RPL ACC payload */
3633 rpl_rsp.listLen = be32_to_cpu(1);
3634 rpl_rsp.index = 0;
3635 rpl_rsp.port_num_blk.portNum = 0;
3636 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
3637 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
3638 sizeof(struct lpfc_name));
3639 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
3640 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
3641 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3642 "0120 Xmit ELS RPL ACC response tag x%x "
3643 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3644 "rpi x%x\n",
3645 elsiocb->iotag, elsiocb->iocb.ulpContext,
3646 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3647 ndlp->nlp_rpi);
3648 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3649 phba->fc_stat.elsXmitACC++;
3650 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
3651 lpfc_els_free_iocb(phba, elsiocb);
3652 return 1;
3654 return 0;
3657 static int
3658 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3659 struct lpfc_nodelist *ndlp)
3661 struct lpfc_dmabuf *pcmd;
3662 uint32_t *lp;
3663 uint32_t maxsize;
3664 uint16_t cmdsize;
3665 RPL *rpl;
3666 struct ls_rjt stat;
3668 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3669 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
3670 stat.un.b.lsRjtRsvd0 = 0;
3671 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3672 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3673 stat.un.b.vendorUnique = 0;
3674 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3675 NULL);
3678 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3679 lp = (uint32_t *) pcmd->virt;
3680 rpl = (RPL *) (lp + 1);
3682 maxsize = be32_to_cpu(rpl->maxsize);
3684 /* We support only one port */
3685 if ((rpl->index == 0) &&
3686 ((maxsize == 0) ||
3687 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
3688 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
3689 } else {
3690 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
3692 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
3694 return 0;
3697 static int
3698 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3699 struct lpfc_nodelist *ndlp)
3701 struct lpfc_dmabuf *pcmd;
3702 uint32_t *lp;
3703 IOCB_t *icmd;
3704 FARP *fp;
3705 uint32_t cmd, cnt, did;
3707 icmd = &cmdiocb->iocb;
3708 did = icmd->un.elsreq64.remoteID;
3709 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3710 lp = (uint32_t *) pcmd->virt;
3712 cmd = *lp++;
3713 fp = (FARP *) lp;
3714 /* FARP-REQ received from DID <did> */
3715 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3716 "0601 FARP-REQ received from DID x%x\n", did);
3717 /* We will only support match on WWPN or WWNN */
3718 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
3719 return 0;
3722 cnt = 0;
3723 /* If this FARP command is searching for my portname */
3724 if (fp->Mflags & FARP_MATCH_PORT) {
3725 if (memcmp(&fp->RportName, &vport->fc_portname,
3726 sizeof(struct lpfc_name)) == 0)
3727 cnt = 1;
3730 /* If this FARP command is searching for my nodename */
3731 if (fp->Mflags & FARP_MATCH_NODE) {
3732 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
3733 sizeof(struct lpfc_name)) == 0)
3734 cnt = 1;
3737 if (cnt) {
3738 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
3739 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
3740 /* Log back into the node before sending the FARP. */
3741 if (fp->Rflags & FARP_REQUEST_PLOGI) {
3742 ndlp->nlp_prev_state = ndlp->nlp_state;
3743 lpfc_nlp_set_state(vport, ndlp,
3744 NLP_STE_PLOGI_ISSUE);
3745 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
3748 /* Send a FARP response to that node */
3749 if (fp->Rflags & FARP_REQUEST_FARPR)
3750 lpfc_issue_els_farpr(vport, did, 0);
3753 return 0;
3756 static int
3757 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3758 struct lpfc_nodelist *ndlp)
3760 struct lpfc_dmabuf *pcmd;
3761 uint32_t *lp;
3762 IOCB_t *icmd;
3763 uint32_t cmd, did;
3765 icmd = &cmdiocb->iocb;
3766 did = icmd->un.elsreq64.remoteID;
3767 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3768 lp = (uint32_t *) pcmd->virt;
3770 cmd = *lp++;
3771 /* FARP-RSP received from DID <did> */
3772 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3773 "0600 FARP-RSP received from DID x%x\n", did);
3774 /* ACCEPT the Farp resp request */
3775 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3777 return 0;
3780 static int
3781 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3782 struct lpfc_nodelist *fan_ndlp)
3784 struct lpfc_dmabuf *pcmd;
3785 uint32_t *lp;
3786 IOCB_t *icmd;
3787 uint32_t cmd, did;
3788 FAN *fp;
3789 struct lpfc_nodelist *ndlp, *next_ndlp;
3790 struct lpfc_hba *phba = vport->phba;
3792 /* FAN received */
3793 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3794 "0265 FAN received\n");
3795 icmd = &cmdiocb->iocb;
3796 did = icmd->un.elsreq64.remoteID;
3797 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3798 lp = (uint32_t *)pcmd->virt;
3800 cmd = *lp++;
3801 fp = (FAN *) lp;
3803 /* FAN received; Fan does not have a reply sequence */
3805 if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
3806 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3807 sizeof(struct lpfc_name)) != 0) ||
3808 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
3809 sizeof(struct lpfc_name)) != 0)) {
3811 * This node has switched fabrics. FLOGI is required
3812 * Clean up the old rpi's
3815 list_for_each_entry_safe(ndlp, next_ndlp,
3816 &vport->fc_nodes, nlp_listp) {
3817 if (!NLP_CHK_NODE_ACT(ndlp))
3818 continue;
3819 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3820 continue;
3821 if (ndlp->nlp_type & NLP_FABRIC) {
3823 * Clean up old Fabric, Nameserver and
3824 * other NLP_FABRIC logins
3826 lpfc_drop_node(vport, ndlp);
3828 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3829 /* Fail outstanding I/O now since this
3830 * device is marked for PLOGI
3832 lpfc_unreg_rpi(vport, ndlp);
3836 lpfc_initial_flogi(vport);
3837 return 0;
3839 /* Discovery not needed,
3840 * move the nodes to their original state.
3842 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3843 nlp_listp) {
3844 if (!NLP_CHK_NODE_ACT(ndlp))
3845 continue;
3846 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3847 continue;
3849 switch (ndlp->nlp_prev_state) {
3850 case NLP_STE_UNMAPPED_NODE:
3851 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3852 lpfc_nlp_set_state(vport, ndlp,
3853 NLP_STE_UNMAPPED_NODE);
3854 break;
3856 case NLP_STE_MAPPED_NODE:
3857 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3858 lpfc_nlp_set_state(vport, ndlp,
3859 NLP_STE_MAPPED_NODE);
3860 break;
3862 default:
3863 break;
3867 /* Start discovery - this should just do CLEAR_LA */
3868 lpfc_disc_start(vport);
3870 return 0;
3873 void
3874 lpfc_els_timeout(unsigned long ptr)
3876 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3877 struct lpfc_hba *phba = vport->phba;
3878 unsigned long iflag;
3880 spin_lock_irqsave(&vport->work_port_lock, iflag);
3881 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3882 vport->work_port_events |= WORKER_ELS_TMO;
3883 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3885 spin_lock_irqsave(&phba->hbalock, iflag);
3886 if (phba->work_wait)
3887 lpfc_worker_wake_up(phba);
3888 spin_unlock_irqrestore(&phba->hbalock, iflag);
3890 else
3891 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3892 return;
3895 void
3896 lpfc_els_timeout_handler(struct lpfc_vport *vport)
3898 struct lpfc_hba *phba = vport->phba;
3899 struct lpfc_sli_ring *pring;
3900 struct lpfc_iocbq *tmp_iocb, *piocb;
3901 IOCB_t *cmd = NULL;
3902 struct lpfc_dmabuf *pcmd;
3903 uint32_t els_command = 0;
3904 uint32_t timeout;
3905 uint32_t remote_ID = 0xffffffff;
3907 /* If the timer is already canceled do nothing */
3908 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3909 return;
3911 spin_lock_irq(&phba->hbalock);
3912 timeout = (uint32_t)(phba->fc_ratov << 1);
3914 pring = &phba->sli.ring[LPFC_ELS_RING];
3916 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3917 cmd = &piocb->iocb;
3919 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
3920 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
3921 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
3922 continue;
3924 if (piocb->vport != vport)
3925 continue;
3927 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3928 if (pcmd)
3929 els_command = *(uint32_t *) (pcmd->virt);
3931 if (els_command == ELS_CMD_FARP ||
3932 els_command == ELS_CMD_FARPR ||
3933 els_command == ELS_CMD_FDISC)
3934 continue;
3936 if (vport != piocb->vport)
3937 continue;
3939 if (piocb->drvrTimeout > 0) {
3940 if (piocb->drvrTimeout >= timeout)
3941 piocb->drvrTimeout -= timeout;
3942 else
3943 piocb->drvrTimeout = 0;
3944 continue;
3947 remote_ID = 0xffffffff;
3948 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
3949 remote_ID = cmd->un.elsreq64.remoteID;
3950 else {
3951 struct lpfc_nodelist *ndlp;
3952 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
3953 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3954 remote_ID = ndlp->nlp_DID;
3956 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3957 "0127 ELS timeout Data: x%x x%x x%x "
3958 "x%x\n", els_command,
3959 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3960 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3962 spin_unlock_irq(&phba->hbalock);
3964 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3965 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
3968 void
3969 lpfc_els_flush_cmd(struct lpfc_vport *vport)
3971 LIST_HEAD(completions);
3972 struct lpfc_hba *phba = vport->phba;
3973 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3974 struct lpfc_iocbq *tmp_iocb, *piocb;
3975 IOCB_t *cmd = NULL;
3977 lpfc_fabric_abort_vport(vport);
3979 spin_lock_irq(&phba->hbalock);
3980 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3981 cmd = &piocb->iocb;
3983 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3984 continue;
3987 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
3988 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
3989 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
3990 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3991 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3992 continue;
3994 if (piocb->vport != vport)
3995 continue;
3997 list_move_tail(&piocb->list, &completions);
3998 pring->txq_cnt--;
4001 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
4002 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
4003 continue;
4006 if (piocb->vport != vport)
4007 continue;
4009 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
4011 spin_unlock_irq(&phba->hbalock);
4013 while (!list_empty(&completions)) {
4014 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4015 cmd = &piocb->iocb;
4016 list_del_init(&piocb->list);
4018 if (!piocb->iocb_cmpl)
4019 lpfc_sli_release_iocbq(phba, piocb);
4020 else {
4021 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4022 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4023 (piocb->iocb_cmpl) (phba, piocb, piocb);
4027 return;
4030 void
4031 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
4033 LIST_HEAD(completions);
4034 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4035 struct lpfc_iocbq *tmp_iocb, *piocb;
4036 IOCB_t *cmd = NULL;
4038 lpfc_fabric_abort_hba(phba);
4039 spin_lock_irq(&phba->hbalock);
4040 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
4041 cmd = &piocb->iocb;
4042 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
4043 continue;
4044 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
4045 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
4046 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
4047 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
4048 cmd->ulpCommand == CMD_ABORT_XRI_CN)
4049 continue;
4050 list_move_tail(&piocb->list, &completions);
4051 pring->txq_cnt--;
4053 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
4054 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
4055 continue;
4056 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
4058 spin_unlock_irq(&phba->hbalock);
4059 while (!list_empty(&completions)) {
4060 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4061 cmd = &piocb->iocb;
4062 list_del_init(&piocb->list);
4063 if (!piocb->iocb_cmpl)
4064 lpfc_sli_release_iocbq(phba, piocb);
4065 else {
4066 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4067 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4068 (piocb->iocb_cmpl) (phba, piocb, piocb);
4071 return;
4074 static void
4075 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4076 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
4078 struct Scsi_Host *shost;
4079 struct lpfc_nodelist *ndlp;
4080 struct ls_rjt stat;
4081 uint32_t *payload;
4082 uint32_t cmd, did, newnode, rjt_err = 0;
4083 IOCB_t *icmd = &elsiocb->iocb;
4085 if (!vport || !(elsiocb->context2))
4086 goto dropit;
4088 newnode = 0;
4089 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
4090 cmd = *payload;
4091 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
4092 lpfc_post_buffer(phba, pring, 1, 1);
4094 did = icmd->un.rcvels.remoteID;
4095 if (icmd->ulpStatus) {
4096 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4097 "RCV Unsol ELS: status:x%x/x%x did:x%x",
4098 icmd->ulpStatus, icmd->un.ulpWord[4], did);
4099 goto dropit;
4102 /* Check to see if link went down during discovery */
4103 if (lpfc_els_chk_latt(vport))
4104 goto dropit;
4106 /* Ignore traffic recevied during vport shutdown. */
4107 if (vport->load_flag & FC_UNLOADING)
4108 goto dropit;
4110 ndlp = lpfc_findnode_did(vport, did);
4111 if (!ndlp) {
4112 /* Cannot find existing Fabric ndlp, so allocate a new one */
4113 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4114 if (!ndlp)
4115 goto dropit;
4117 lpfc_nlp_init(vport, ndlp, did);
4118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4119 newnode = 1;
4120 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4121 ndlp->nlp_type |= NLP_FABRIC;
4122 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4123 ndlp = lpfc_enable_node(vport, ndlp,
4124 NLP_STE_UNUSED_NODE);
4125 if (!ndlp)
4126 goto dropit;
4127 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4128 newnode = 1;
4129 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4130 ndlp->nlp_type |= NLP_FABRIC;
4131 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
4132 /* This is similar to the new node path */
4133 ndlp = lpfc_nlp_get(ndlp);
4134 if (!ndlp)
4135 goto dropit;
4136 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4137 newnode = 1;
4140 phba->fc_stat.elsRcvFrame++;
4141 if (elsiocb->context1)
4142 lpfc_nlp_put(elsiocb->context1);
4144 elsiocb->context1 = lpfc_nlp_get(ndlp);
4145 elsiocb->vport = vport;
4147 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
4148 cmd &= ELS_CMD_MASK;
4150 /* ELS command <elsCmd> received from NPORT <did> */
4151 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4152 "0112 ELS command x%x received from NPORT x%x "
4153 "Data: x%x\n", cmd, did, vport->port_state);
4154 switch (cmd) {
4155 case ELS_CMD_PLOGI:
4156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4157 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
4158 did, vport->port_state, ndlp->nlp_flag);
4160 phba->fc_stat.elsRcvPLOGI++;
4161 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
4163 if (vport->port_state < LPFC_DISC_AUTH) {
4164 if (!(phba->pport->fc_flag & FC_PT2PT) ||
4165 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
4166 rjt_err = LSRJT_UNABLE_TPC;
4167 break;
4169 /* We get here, and drop thru, if we are PT2PT with
4170 * another NPort and the other side has initiated
4171 * the PLOGI before responding to our FLOGI.
4175 shost = lpfc_shost_from_vport(vport);
4176 spin_lock_irq(shost->host_lock);
4177 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
4178 spin_unlock_irq(shost->host_lock);
4180 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4181 NLP_EVT_RCV_PLOGI);
4183 break;
4184 case ELS_CMD_FLOGI:
4185 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4186 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
4187 did, vport->port_state, ndlp->nlp_flag);
4189 phba->fc_stat.elsRcvFLOGI++;
4190 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
4191 if (newnode)
4192 lpfc_nlp_put(ndlp);
4193 break;
4194 case ELS_CMD_LOGO:
4195 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4196 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
4197 did, vport->port_state, ndlp->nlp_flag);
4199 phba->fc_stat.elsRcvLOGO++;
4200 if (vport->port_state < LPFC_DISC_AUTH) {
4201 rjt_err = LSRJT_UNABLE_TPC;
4202 break;
4204 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
4205 break;
4206 case ELS_CMD_PRLO:
4207 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4208 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
4209 did, vport->port_state, ndlp->nlp_flag);
4211 phba->fc_stat.elsRcvPRLO++;
4212 if (vport->port_state < LPFC_DISC_AUTH) {
4213 rjt_err = LSRJT_UNABLE_TPC;
4214 break;
4216 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
4217 break;
4218 case ELS_CMD_RSCN:
4219 phba->fc_stat.elsRcvRSCN++;
4220 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
4221 if (newnode)
4222 lpfc_nlp_put(ndlp);
4223 break;
4224 case ELS_CMD_ADISC:
4225 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4226 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
4227 did, vport->port_state, ndlp->nlp_flag);
4229 phba->fc_stat.elsRcvADISC++;
4230 if (vport->port_state < LPFC_DISC_AUTH) {
4231 rjt_err = LSRJT_UNABLE_TPC;
4232 break;
4234 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4235 NLP_EVT_RCV_ADISC);
4236 break;
4237 case ELS_CMD_PDISC:
4238 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4239 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
4240 did, vport->port_state, ndlp->nlp_flag);
4242 phba->fc_stat.elsRcvPDISC++;
4243 if (vport->port_state < LPFC_DISC_AUTH) {
4244 rjt_err = LSRJT_UNABLE_TPC;
4245 break;
4247 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4248 NLP_EVT_RCV_PDISC);
4249 break;
4250 case ELS_CMD_FARPR:
4251 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4252 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
4253 did, vport->port_state, ndlp->nlp_flag);
4255 phba->fc_stat.elsRcvFARPR++;
4256 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
4257 break;
4258 case ELS_CMD_FARP:
4259 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4260 "RCV FARP: did:x%x/ste:x%x flg:x%x",
4261 did, vport->port_state, ndlp->nlp_flag);
4263 phba->fc_stat.elsRcvFARP++;
4264 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
4265 break;
4266 case ELS_CMD_FAN:
4267 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4268 "RCV FAN: did:x%x/ste:x%x flg:x%x",
4269 did, vport->port_state, ndlp->nlp_flag);
4271 phba->fc_stat.elsRcvFAN++;
4272 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
4273 break;
4274 case ELS_CMD_PRLI:
4275 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4276 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
4277 did, vport->port_state, ndlp->nlp_flag);
4279 phba->fc_stat.elsRcvPRLI++;
4280 if (vport->port_state < LPFC_DISC_AUTH) {
4281 rjt_err = LSRJT_UNABLE_TPC;
4282 break;
4284 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
4285 break;
4286 case ELS_CMD_LIRR:
4287 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4288 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
4289 did, vport->port_state, ndlp->nlp_flag);
4291 phba->fc_stat.elsRcvLIRR++;
4292 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
4293 if (newnode)
4294 lpfc_nlp_put(ndlp);
4295 break;
4296 case ELS_CMD_RPS:
4297 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4298 "RCV RPS: did:x%x/ste:x%x flg:x%x",
4299 did, vport->port_state, ndlp->nlp_flag);
4301 phba->fc_stat.elsRcvRPS++;
4302 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
4303 if (newnode)
4304 lpfc_nlp_put(ndlp);
4305 break;
4306 case ELS_CMD_RPL:
4307 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4308 "RCV RPL: did:x%x/ste:x%x flg:x%x",
4309 did, vport->port_state, ndlp->nlp_flag);
4311 phba->fc_stat.elsRcvRPL++;
4312 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
4313 if (newnode)
4314 lpfc_nlp_put(ndlp);
4315 break;
4316 case ELS_CMD_RNID:
4317 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4318 "RCV RNID: did:x%x/ste:x%x flg:x%x",
4319 did, vport->port_state, ndlp->nlp_flag);
4321 phba->fc_stat.elsRcvRNID++;
4322 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
4323 if (newnode)
4324 lpfc_nlp_put(ndlp);
4325 break;
4326 default:
4327 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4328 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
4329 cmd, did, vport->port_state);
4331 /* Unsupported ELS command, reject */
4332 rjt_err = LSRJT_INVALID_CMD;
4334 /* Unknown ELS command <elsCmd> received from NPORT <did> */
4335 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4336 "0115 Unknown ELS command x%x "
4337 "received from NPORT x%x\n", cmd, did);
4338 if (newnode)
4339 lpfc_nlp_put(ndlp);
4340 break;
4343 /* check if need to LS_RJT received ELS cmd */
4344 if (rjt_err) {
4345 memset(&stat, 0, sizeof(stat));
4346 stat.un.b.lsRjtRsnCode = rjt_err;
4347 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
4348 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
4349 NULL);
4352 return;
4354 dropit:
4355 if (vport && !(vport->load_flag & FC_UNLOADING))
4356 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4357 "(%d):0111 Dropping received ELS cmd "
4358 "Data: x%x x%x x%x\n",
4359 vport->vpi, icmd->ulpStatus,
4360 icmd->un.ulpWord[4], icmd->ulpTimeout);
4361 phba->fc_stat.elsRcvDrop++;
4364 static struct lpfc_vport *
4365 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
4367 struct lpfc_vport *vport;
4368 unsigned long flags;
4370 spin_lock_irqsave(&phba->hbalock, flags);
4371 list_for_each_entry(vport, &phba->port_list, listentry) {
4372 if (vport->vpi == vpi) {
4373 spin_unlock_irqrestore(&phba->hbalock, flags);
4374 return vport;
4377 spin_unlock_irqrestore(&phba->hbalock, flags);
4378 return NULL;
4381 void
4382 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4383 struct lpfc_iocbq *elsiocb)
4385 struct lpfc_vport *vport = phba->pport;
4386 IOCB_t *icmd = &elsiocb->iocb;
4387 dma_addr_t paddr;
4388 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
4389 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
4391 elsiocb->context2 = NULL;
4392 elsiocb->context3 = NULL;
4394 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
4395 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
4396 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
4397 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
4398 phba->fc_stat.NoRcvBuf++;
4399 /* Not enough posted buffers; Try posting more buffers */
4400 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
4401 lpfc_post_buffer(phba, pring, 0, 1);
4402 return;
4405 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4406 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
4407 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
4408 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
4409 vport = phba->pport;
4410 else {
4411 uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
4412 vport = lpfc_find_vport_by_vpid(phba, vpi);
4415 /* If there are no BDEs associated
4416 * with this IOCB, there is nothing to do.
4418 if (icmd->ulpBdeCount == 0)
4419 return;
4421 /* type of ELS cmd is first 32bit word
4422 * in packet
4424 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4425 elsiocb->context2 = bdeBuf1;
4426 } else {
4427 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
4428 icmd->un.cont64[0].addrLow);
4429 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
4430 paddr);
4433 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4435 * The different unsolicited event handlers would tell us
4436 * if they are done with "mp" by setting context2 to NULL.
4438 lpfc_nlp_put(elsiocb->context1);
4439 elsiocb->context1 = NULL;
4440 if (elsiocb->context2) {
4441 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
4442 elsiocb->context2 = NULL;
4445 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
4446 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
4447 icmd->ulpBdeCount == 2) {
4448 elsiocb->context2 = bdeBuf2;
4449 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4450 /* free mp if we are done with it */
4451 if (elsiocb->context2) {
4452 lpfc_in_buf_free(phba, elsiocb->context2);
4453 elsiocb->context2 = NULL;
4458 void
4459 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4461 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
4463 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4464 if (!ndlp) {
4465 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4466 if (!ndlp) {
4467 if (phba->fc_topology == TOPOLOGY_LOOP) {
4468 lpfc_disc_start(vport);
4469 return;
4471 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4472 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4473 "0251 NameServer login: no memory\n");
4474 return;
4476 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4477 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4478 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4479 if (!ndlp) {
4480 if (phba->fc_topology == TOPOLOGY_LOOP) {
4481 lpfc_disc_start(vport);
4482 return;
4484 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4485 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4486 "0348 NameServer login: node freed\n");
4487 return;
4490 ndlp->nlp_type |= NLP_FABRIC;
4492 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4494 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
4495 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4496 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4497 "0252 Cannot issue NameServer login\n");
4498 return;
4501 if (vport->cfg_fdmi_on) {
4502 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
4503 GFP_KERNEL);
4504 if (ndlp_fdmi) {
4505 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
4506 ndlp_fdmi->nlp_type |= NLP_FABRIC;
4507 lpfc_nlp_set_state(vport, ndlp_fdmi,
4508 NLP_STE_PLOGI_ISSUE);
4509 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
4513 return;
4516 static void
4517 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4519 struct lpfc_vport *vport = pmb->vport;
4520 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4522 MAILBOX_t *mb = &pmb->mb;
4524 spin_lock_irq(shost->host_lock);
4525 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4526 spin_unlock_irq(shost->host_lock);
4528 if (mb->mbxStatus) {
4529 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4530 "0915 Register VPI failed: 0x%x\n",
4531 mb->mbxStatus);
4533 switch (mb->mbxStatus) {
4534 case 0x11: /* unsupported feature */
4535 case 0x9603: /* max_vpi exceeded */
4536 case 0x9602: /* Link event since CLEAR_LA */
4537 /* giving up on vport registration */
4538 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4539 spin_lock_irq(shost->host_lock);
4540 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4541 spin_unlock_irq(shost->host_lock);
4542 lpfc_can_disctmo(vport);
4543 break;
4544 default:
4545 /* Try to recover from this error */
4546 lpfc_mbx_unreg_vpi(vport);
4547 spin_lock_irq(shost->host_lock);
4548 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4549 spin_unlock_irq(shost->host_lock);
4550 if (vport->port_type == LPFC_PHYSICAL_PORT)
4551 lpfc_initial_flogi(vport);
4552 else
4553 lpfc_initial_fdisc(vport);
4554 break;
4557 } else {
4558 if (vport == phba->pport)
4559 lpfc_issue_fabric_reglogin(vport);
4560 else
4561 lpfc_do_scr_ns_plogi(phba, vport);
4564 /* Now, we decrement the ndlp reference count held for this
4565 * callback function
4567 lpfc_nlp_put(ndlp);
4569 mempool_free(pmb, phba->mbox_mem_pool);
4570 return;
4573 static void
4574 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4575 struct lpfc_nodelist *ndlp)
4577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4578 LPFC_MBOXQ_t *mbox;
4580 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4581 if (mbox) {
4582 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
4583 mbox->vport = vport;
4584 mbox->context2 = lpfc_nlp_get(ndlp);
4585 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
4586 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4587 == MBX_NOT_FINISHED) {
4588 /* mailbox command not success, decrement ndlp
4589 * reference count for this command
4591 lpfc_nlp_put(ndlp);
4592 mempool_free(mbox, phba->mbox_mem_pool);
4594 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4595 "0253 Register VPI: Can't send mbox\n");
4596 goto mbox_err_exit;
4598 } else {
4599 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4600 "0254 Register VPI: no memory\n");
4601 goto mbox_err_exit;
4603 return;
4605 mbox_err_exit:
4606 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4607 spin_lock_irq(shost->host_lock);
4608 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4609 spin_unlock_irq(shost->host_lock);
4610 return;
4613 static void
4614 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4615 struct lpfc_iocbq *rspiocb)
4617 struct lpfc_vport *vport = cmdiocb->vport;
4618 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4619 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4620 struct lpfc_nodelist *np;
4621 struct lpfc_nodelist *next_np;
4622 IOCB_t *irsp = &rspiocb->iocb;
4623 struct lpfc_iocbq *piocb;
4625 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4626 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
4627 irsp->ulpStatus, irsp->un.ulpWord[4],
4628 vport->fc_prevDID);
4629 /* Since all FDISCs are being single threaded, we
4630 * must reset the discovery timer for ALL vports
4631 * waiting to send FDISC when one completes.
4633 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
4634 lpfc_set_disctmo(piocb->vport);
4637 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4638 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
4639 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4641 if (irsp->ulpStatus) {
4642 /* Check for retry */
4643 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
4644 goto out;
4645 /* FDISC failed */
4646 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4647 "0124 FDISC failed. (%d/%d)\n",
4648 irsp->ulpStatus, irsp->un.ulpWord[4]);
4649 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4650 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4651 lpfc_nlp_put(ndlp);
4652 /* giving up on FDISC. Cancel discovery timer */
4653 lpfc_can_disctmo(vport);
4654 } else {
4655 spin_lock_irq(shost->host_lock);
4656 vport->fc_flag |= FC_FABRIC;
4657 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
4658 vport->fc_flag |= FC_PUBLIC_LOOP;
4659 spin_unlock_irq(shost->host_lock);
4661 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
4662 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
4663 if ((vport->fc_prevDID != vport->fc_myDID) &&
4664 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
4665 /* If our NportID changed, we need to ensure all
4666 * remaining NPORTs get unreg_login'ed so we can
4667 * issue unreg_vpi.
4669 list_for_each_entry_safe(np, next_np,
4670 &vport->fc_nodes, nlp_listp) {
4671 if (!NLP_CHK_NODE_ACT(ndlp) ||
4672 (np->nlp_state != NLP_STE_NPR_NODE) ||
4673 !(np->nlp_flag & NLP_NPR_ADISC))
4674 continue;
4675 spin_lock_irq(shost->host_lock);
4676 np->nlp_flag &= ~NLP_NPR_ADISC;
4677 spin_unlock_irq(shost->host_lock);
4678 lpfc_unreg_rpi(vport, np);
4680 lpfc_mbx_unreg_vpi(vport);
4681 spin_lock_irq(shost->host_lock);
4682 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4683 spin_unlock_irq(shost->host_lock);
4686 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
4687 lpfc_register_new_vport(phba, vport, ndlp);
4688 else
4689 lpfc_do_scr_ns_plogi(phba, vport);
4691 /* Unconditionaly kick off releasing fabric node for vports */
4692 lpfc_nlp_put(ndlp);
4695 out:
4696 lpfc_els_free_iocb(phba, cmdiocb);
4699 static int
4700 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4701 uint8_t retry)
4703 struct lpfc_hba *phba = vport->phba;
4704 IOCB_t *icmd;
4705 struct lpfc_iocbq *elsiocb;
4706 struct serv_parm *sp;
4707 uint8_t *pcmd;
4708 uint16_t cmdsize;
4709 int did = ndlp->nlp_DID;
4710 int rc;
4712 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
4713 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
4714 ELS_CMD_FDISC);
4715 if (!elsiocb) {
4716 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4717 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4718 "0255 Issue FDISC: no IOCB\n");
4719 return 1;
4722 icmd = &elsiocb->iocb;
4723 icmd->un.elsreq64.myID = 0;
4724 icmd->un.elsreq64.fl = 1;
4726 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
4727 icmd->ulpCt_h = 1;
4728 icmd->ulpCt_l = 0;
4730 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4731 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
4732 pcmd += sizeof(uint32_t); /* CSP Word 1 */
4733 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
4734 sp = (struct serv_parm *) pcmd;
4735 /* Setup CSPs accordingly for Fabric */
4736 sp->cmn.e_d_tov = 0;
4737 sp->cmn.w2.r_a_tov = 0;
4738 sp->cls1.classValid = 0;
4739 sp->cls2.seqDelivery = 1;
4740 sp->cls3.seqDelivery = 1;
4742 pcmd += sizeof(uint32_t); /* CSP Word 2 */
4743 pcmd += sizeof(uint32_t); /* CSP Word 3 */
4744 pcmd += sizeof(uint32_t); /* CSP Word 4 */
4745 pcmd += sizeof(uint32_t); /* Port Name */
4746 memcpy(pcmd, &vport->fc_portname, 8);
4747 pcmd += sizeof(uint32_t); /* Node Name */
4748 pcmd += sizeof(uint32_t); /* Node Name */
4749 memcpy(pcmd, &vport->fc_nodename, 8);
4751 lpfc_set_disctmo(vport);
4753 phba->fc_stat.elsXmitFDISC++;
4754 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
4756 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4757 "Issue FDISC: did:x%x",
4758 did, 0, 0);
4760 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
4761 if (rc == IOCB_ERROR) {
4762 lpfc_els_free_iocb(phba, elsiocb);
4763 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4764 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4765 "0256 Issue FDISC: Cannot send IOCB\n");
4766 return 1;
4768 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
4769 vport->port_state = LPFC_FDISC;
4770 return 0;
4773 static void
4774 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4775 struct lpfc_iocbq *rspiocb)
4777 struct lpfc_vport *vport = cmdiocb->vport;
4778 IOCB_t *irsp;
4779 struct lpfc_nodelist *ndlp;
4780 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
4782 irsp = &rspiocb->iocb;
4783 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4784 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
4785 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
4787 lpfc_els_free_iocb(phba, cmdiocb);
4788 vport->unreg_vpi_cmpl = VPORT_ERROR;
4790 /* Trigger the release of the ndlp after logo */
4791 lpfc_nlp_put(ndlp);
4795 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4797 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4798 struct lpfc_hba *phba = vport->phba;
4799 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4800 IOCB_t *icmd;
4801 struct lpfc_iocbq *elsiocb;
4802 uint8_t *pcmd;
4803 uint16_t cmdsize;
4805 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
4806 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
4807 ELS_CMD_LOGO);
4808 if (!elsiocb)
4809 return 1;
4811 icmd = &elsiocb->iocb;
4812 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4813 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
4814 pcmd += sizeof(uint32_t);
4816 /* Fill in LOGO payload */
4817 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
4818 pcmd += sizeof(uint32_t);
4819 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
4821 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4822 "Issue LOGO npiv did:x%x flg:x%x",
4823 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4825 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
4826 spin_lock_irq(shost->host_lock);
4827 ndlp->nlp_flag |= NLP_LOGO_SND;
4828 spin_unlock_irq(shost->host_lock);
4829 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
4830 spin_lock_irq(shost->host_lock);
4831 ndlp->nlp_flag &= ~NLP_LOGO_SND;
4832 spin_unlock_irq(shost->host_lock);
4833 lpfc_els_free_iocb(phba, elsiocb);
4834 return 1;
4836 return 0;
4839 void
4840 lpfc_fabric_block_timeout(unsigned long ptr)
4842 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4843 unsigned long iflags;
4844 uint32_t tmo_posted;
4845 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4846 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4847 if (!tmo_posted)
4848 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4849 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4851 if (!tmo_posted) {
4852 spin_lock_irqsave(&phba->hbalock, iflags);
4853 if (phba->work_wait)
4854 lpfc_worker_wake_up(phba);
4855 spin_unlock_irqrestore(&phba->hbalock, iflags);
4859 static void
4860 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4862 struct lpfc_iocbq *iocb;
4863 unsigned long iflags;
4864 int ret;
4865 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4866 IOCB_t *cmd;
4868 repeat:
4869 iocb = NULL;
4870 spin_lock_irqsave(&phba->hbalock, iflags);
4871 /* Post any pending iocb to the SLI layer */
4872 if (atomic_read(&phba->fabric_iocb_count) == 0) {
4873 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
4874 list);
4875 if (iocb)
4876 /* Increment fabric iocb count to hold the position */
4877 atomic_inc(&phba->fabric_iocb_count);
4879 spin_unlock_irqrestore(&phba->hbalock, iflags);
4880 if (iocb) {
4881 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4882 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4883 iocb->iocb_flag |= LPFC_IO_FABRIC;
4885 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4886 "Fabric sched1: ste:x%x",
4887 iocb->vport->port_state, 0, 0);
4889 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4891 if (ret == IOCB_ERROR) {
4892 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4893 iocb->fabric_iocb_cmpl = NULL;
4894 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4895 cmd = &iocb->iocb;
4896 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4897 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4898 iocb->iocb_cmpl(phba, iocb, iocb);
4900 atomic_dec(&phba->fabric_iocb_count);
4901 goto repeat;
4905 return;
4908 void
4909 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4911 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4913 lpfc_resume_fabric_iocbs(phba);
4914 return;
4917 static void
4918 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4920 int blocked;
4922 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4923 /* Start a timer to unblock fabric iocbs after 100ms */
4924 if (!blocked)
4925 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
4927 return;
4930 static void
4931 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4932 struct lpfc_iocbq *rspiocb)
4934 struct ls_rjt stat;
4936 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
4937 BUG();
4939 switch (rspiocb->iocb.ulpStatus) {
4940 case IOSTAT_NPORT_RJT:
4941 case IOSTAT_FABRIC_RJT:
4942 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
4943 lpfc_block_fabric_iocbs(phba);
4945 break;
4947 case IOSTAT_NPORT_BSY:
4948 case IOSTAT_FABRIC_BSY:
4949 lpfc_block_fabric_iocbs(phba);
4950 break;
4952 case IOSTAT_LS_RJT:
4953 stat.un.lsRjtError =
4954 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
4955 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
4956 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
4957 lpfc_block_fabric_iocbs(phba);
4958 break;
4961 if (atomic_read(&phba->fabric_iocb_count) == 0)
4962 BUG();
4964 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
4965 cmdiocb->fabric_iocb_cmpl = NULL;
4966 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
4967 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
4969 atomic_dec(&phba->fabric_iocb_count);
4970 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
4971 /* Post any pending iocbs to HBA */
4972 lpfc_resume_fabric_iocbs(phba);
4976 static int
4977 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4979 unsigned long iflags;
4980 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4981 int ready;
4982 int ret;
4984 if (atomic_read(&phba->fabric_iocb_count) > 1)
4985 BUG();
4987 spin_lock_irqsave(&phba->hbalock, iflags);
4988 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
4989 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4991 if (ready)
4992 /* Increment fabric iocb count to hold the position */
4993 atomic_inc(&phba->fabric_iocb_count);
4994 spin_unlock_irqrestore(&phba->hbalock, iflags);
4995 if (ready) {
4996 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4997 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4998 iocb->iocb_flag |= LPFC_IO_FABRIC;
5000 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
5001 "Fabric sched2: ste:x%x",
5002 iocb->vport->port_state, 0, 0);
5004 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
5006 if (ret == IOCB_ERROR) {
5007 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
5008 iocb->fabric_iocb_cmpl = NULL;
5009 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
5010 atomic_dec(&phba->fabric_iocb_count);
5012 } else {
5013 spin_lock_irqsave(&phba->hbalock, iflags);
5014 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
5015 spin_unlock_irqrestore(&phba->hbalock, iflags);
5016 ret = IOCB_SUCCESS;
5018 return ret;
5022 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
5024 LIST_HEAD(completions);
5025 struct lpfc_hba *phba = vport->phba;
5026 struct lpfc_iocbq *tmp_iocb, *piocb;
5027 IOCB_t *cmd;
5029 spin_lock_irq(&phba->hbalock);
5030 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
5031 list) {
5033 if (piocb->vport != vport)
5034 continue;
5036 list_move_tail(&piocb->list, &completions);
5038 spin_unlock_irq(&phba->hbalock);
5040 while (!list_empty(&completions)) {
5041 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5042 list_del_init(&piocb->list);
5044 cmd = &piocb->iocb;
5045 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5046 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5047 (piocb->iocb_cmpl) (phba, piocb, piocb);
5051 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
5053 LIST_HEAD(completions);
5054 struct lpfc_hba *phba = ndlp->vport->phba;
5055 struct lpfc_iocbq *tmp_iocb, *piocb;
5056 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5057 IOCB_t *cmd;
5059 spin_lock_irq(&phba->hbalock);
5060 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
5061 list) {
5062 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
5064 list_move_tail(&piocb->list, &completions);
5067 spin_unlock_irq(&phba->hbalock);
5069 while (!list_empty(&completions)) {
5070 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5071 list_del_init(&piocb->list);
5073 cmd = &piocb->iocb;
5074 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5075 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5076 (piocb->iocb_cmpl) (phba, piocb, piocb);
5080 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
5082 LIST_HEAD(completions);
5083 struct lpfc_iocbq *piocb;
5084 IOCB_t *cmd;
5086 spin_lock_irq(&phba->hbalock);
5087 list_splice_init(&phba->fabric_iocb_list, &completions);
5088 spin_unlock_irq(&phba->hbalock);
5090 while (!list_empty(&completions)) {
5091 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5092 list_del_init(&piocb->list);
5094 cmd = &piocb->iocb;
5095 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5096 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5097 (piocb->iocb_cmpl) (phba, piocb, piocb);