[XFRM]: skb_cow_data() does not set proper owner for new skbs.
[linux-2.6/verdex.git] / drivers / scsi / lpfc / lpfc_nportdisc.c
blobe7470a4738c5f41ee5245a0a234eac5107ed5acd
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_nportdisc.c 1.179 2005/04/13 11:59:13EDT sf_support Exp $
25 #include <linux/blkdev.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
42 /* Called to verify a rcv'ed ADISC was intended for us. */
43 static int
44 lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
45 struct lpfc_name * nn, struct lpfc_name * pn)
47 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
48 * table entry for that node.
50 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
51 return (0);
53 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
54 return (0);
56 /* we match, return success */
57 return (1);
61 int
62 lpfc_check_sparm(struct lpfc_hba * phba,
63 struct lpfc_nodelist * ndlp, struct serv_parm * sp,
64 uint32_t class)
66 volatile struct serv_parm *hsp = &phba->fc_sparam;
67 /* First check for supported version */
69 /* Next check for class validity */
70 if (sp->cls1.classValid) {
72 if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
73 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
74 if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
75 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
76 } else if (class == CLASS1) {
77 return (0);
80 if (sp->cls2.classValid) {
82 if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
83 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
84 if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
85 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
86 } else if (class == CLASS2) {
87 return (0);
90 if (sp->cls3.classValid) {
92 if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
93 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
94 if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
95 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
96 } else if (class == CLASS3) {
97 return (0);
100 if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
101 sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
102 if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
103 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
105 /* If check is good, copy wwpn wwnn into ndlp */
106 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
107 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
108 return (1);
111 static void *
112 lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
113 struct lpfc_iocbq *cmdiocb,
114 struct lpfc_iocbq *rspiocb)
116 struct lpfc_dmabuf *pcmd, *prsp;
117 uint32_t *lp;
118 void *ptr = NULL;
119 IOCB_t *irsp;
121 irsp = &rspiocb->iocb;
122 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
124 /* For lpfc_els_abort, context2 could be zero'ed to delay
125 * freeing associated memory till after ABTS completes.
127 if (pcmd) {
128 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
129 list);
130 if (prsp) {
131 lp = (uint32_t *) prsp->virt;
132 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
135 else {
136 /* Force ulpStatus error since we are returning NULL ptr */
137 if (!(irsp->ulpStatus)) {
138 irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
139 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
141 ptr = NULL;
143 return (ptr);
148 * Free resources / clean up outstanding I/Os
149 * associated with a LPFC_NODELIST entry. This
150 * routine effectively results in a "software abort".
153 lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
154 int send_abts)
156 struct lpfc_sli *psli;
157 struct lpfc_sli_ring *pring;
158 struct lpfc_iocbq *iocb, *next_iocb;
159 IOCB_t *icmd;
160 int found = 0;
162 /* Abort outstanding I/O on NPort <nlp_DID> */
163 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
164 "%d:0201 Abort outstanding I/O on NPort x%x "
165 "Data: x%x x%x x%x\n",
166 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
167 ndlp->nlp_state, ndlp->nlp_rpi);
169 psli = &phba->sli;
170 pring = &psli->ring[LPFC_ELS_RING];
172 /* First check the txq */
173 do {
174 found = 0;
175 spin_lock_irq(phba->host->host_lock);
176 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
177 /* Check to see if iocb matches the nport we are looking
178 for */
179 if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
180 found = 1;
181 /* It matches, so deque and call compl with an
182 error */
183 list_del(&iocb->list);
184 pring->txq_cnt--;
185 if (iocb->iocb_cmpl) {
186 icmd = &iocb->iocb;
187 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
188 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
189 spin_unlock_irq(phba->host->host_lock);
190 (iocb->iocb_cmpl) (phba, iocb, iocb);
191 spin_lock_irq(phba->host->host_lock);
192 } else {
193 list_add_tail(&iocb->list,
194 &phba->lpfc_iocb_list);
196 break;
199 spin_unlock_irq(phba->host->host_lock);
200 } while (found);
202 /* Everything on txcmplq will be returned by firmware
203 * with a no rpi / linkdown / abort error. For ring 0,
204 * ELS discovery, we want to get rid of it right here.
206 /* Next check the txcmplq */
207 do {
208 found = 0;
209 spin_lock_irq(phba->host->host_lock);
210 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
211 list) {
212 /* Check to see if iocb matches the nport we are looking
213 for */
214 if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
215 found = 1;
216 /* It matches, so deque and call compl with an
217 error */
218 list_del(&iocb->list);
219 pring->txcmplq_cnt--;
221 icmd = &iocb->iocb;
222 /* If the driver is completing an ELS
223 * command early, flush it out of the firmware.
225 if (send_abts &&
226 (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
227 (icmd->un.elsreq64.bdl.ulpIoTag32)) {
228 lpfc_sli_issue_abort_iotag32(phba,
229 pring, iocb);
231 if (iocb->iocb_cmpl) {
232 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
233 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
234 spin_unlock_irq(phba->host->host_lock);
235 (iocb->iocb_cmpl) (phba, iocb, iocb);
236 spin_lock_irq(phba->host->host_lock);
237 } else {
238 list_add_tail(&iocb->list,
239 &phba->lpfc_iocb_list);
241 break;
244 spin_unlock_irq(phba->host->host_lock);
245 } while(found);
247 /* If we are delaying issuing an ELS command, cancel it */
248 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
249 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
250 del_timer_sync(&ndlp->nlp_delayfunc);
251 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
252 list_del_init(&ndlp->els_retry_evt.evt_listp);
254 return (0);
257 static int
258 lpfc_rcv_plogi(struct lpfc_hba * phba,
259 struct lpfc_nodelist * ndlp,
260 struct lpfc_iocbq *cmdiocb)
262 struct lpfc_dmabuf *pcmd;
263 uint32_t *lp;
264 IOCB_t *icmd;
265 struct serv_parm *sp;
266 LPFC_MBOXQ_t *mbox;
267 struct ls_rjt stat;
268 int rc;
270 memset(&stat, 0, sizeof (struct ls_rjt));
271 if (phba->hba_state <= LPFC_FLOGI) {
272 /* Before responding to PLOGI, check for pt2pt mode.
273 * If we are pt2pt, with an outstanding FLOGI, abort
274 * the FLOGI and resend it first.
276 if (phba->fc_flag & FC_PT2PT) {
277 lpfc_els_abort_flogi(phba);
278 if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
279 /* If the other side is supposed to initiate
280 * the PLOGI anyway, just ACC it now and
281 * move on with discovery.
283 phba->fc_edtov = FF_DEF_EDTOV;
284 phba->fc_ratov = FF_DEF_RATOV;
285 /* Start discovery - this should just do
286 CLEAR_LA */
287 lpfc_disc_start(phba);
289 else {
290 lpfc_initial_flogi(phba);
293 else {
294 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
295 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
296 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
297 ndlp);
298 return 0;
301 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
302 lp = (uint32_t *) pcmd->virt;
303 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
304 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
305 /* Reject this request because invalid parameters */
306 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
307 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
308 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
309 return (0);
311 icmd = &cmdiocb->iocb;
313 /* PLOGI chkparm OK */
314 lpfc_printf_log(phba,
315 KERN_INFO,
316 LOG_ELS,
317 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
318 phba->brd_no,
319 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
320 ndlp->nlp_rpi);
322 if ((phba->cfg_fcp_class == 2) &&
323 (sp->cls2.classValid)) {
324 ndlp->nlp_fcp_info |= CLASS2;
325 } else {
326 ndlp->nlp_fcp_info |= CLASS3;
328 ndlp->nlp_class_sup = 0;
329 if (sp->cls1.classValid)
330 ndlp->nlp_class_sup |= FC_COS_CLASS1;
331 if (sp->cls2.classValid)
332 ndlp->nlp_class_sup |= FC_COS_CLASS2;
333 if (sp->cls3.classValid)
334 ndlp->nlp_class_sup |= FC_COS_CLASS3;
335 if (sp->cls4.classValid)
336 ndlp->nlp_class_sup |= FC_COS_CLASS4;
337 ndlp->nlp_maxframe =
338 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
340 /* no need to reg_login if we are already in one of these states */
341 switch(ndlp->nlp_state) {
342 case NLP_STE_NPR_NODE:
343 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
344 break;
345 case NLP_STE_REG_LOGIN_ISSUE:
346 case NLP_STE_PRLI_ISSUE:
347 case NLP_STE_UNMAPPED_NODE:
348 case NLP_STE_MAPPED_NODE:
349 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
350 return (1);
353 if ((phba->fc_flag & FC_PT2PT)
354 && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
355 /* rcv'ed PLOGI decides what our NPortId will be */
356 phba->fc_myDID = icmd->un.rcvels.parmRo;
357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
358 if (mbox == NULL)
359 goto out;
360 lpfc_config_link(phba, mbox);
361 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
362 rc = lpfc_sli_issue_mbox
363 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
364 if (rc == MBX_NOT_FINISHED) {
365 mempool_free( mbox, phba->mbox_mem_pool);
366 goto out;
369 lpfc_can_disctmo(phba);
371 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
372 if (mbox == NULL)
373 goto out;
375 if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
376 (uint8_t *) sp, mbox, 0)) {
377 mempool_free( mbox, phba->mbox_mem_pool);
378 goto out;
381 /* ACC PLOGI rsp command needs to execute first,
382 * queue this mbox command to be processed later.
384 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
385 mbox->context2 = ndlp;
386 ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
388 /* If there is an outstanding PLOGI issued, abort it before
389 * sending ACC rsp to PLOGI recieved.
391 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
392 /* software abort outstanding PLOGI */
393 lpfc_els_abort(phba, ndlp, 1);
395 ndlp->nlp_flag |= NLP_RCV_PLOGI;
396 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
397 return (1);
399 out:
400 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
401 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
402 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
403 return (0);
406 static int
407 lpfc_rcv_padisc(struct lpfc_hba * phba,
408 struct lpfc_nodelist * ndlp,
409 struct lpfc_iocbq *cmdiocb)
411 struct lpfc_dmabuf *pcmd;
412 struct serv_parm *sp;
413 struct lpfc_name *pnn, *ppn;
414 struct ls_rjt stat;
415 ADISC *ap;
416 IOCB_t *icmd;
417 uint32_t *lp;
418 uint32_t cmd;
420 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
421 lp = (uint32_t *) pcmd->virt;
423 cmd = *lp++;
424 if (cmd == ELS_CMD_ADISC) {
425 ap = (ADISC *) lp;
426 pnn = (struct lpfc_name *) & ap->nodeName;
427 ppn = (struct lpfc_name *) & ap->portName;
428 } else {
429 sp = (struct serv_parm *) lp;
430 pnn = (struct lpfc_name *) & sp->nodeName;
431 ppn = (struct lpfc_name *) & sp->portName;
434 icmd = &cmdiocb->iocb;
435 if ((icmd->ulpStatus == 0) &&
436 (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
437 if (cmd == ELS_CMD_ADISC) {
438 lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
440 else {
441 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
442 NULL, 0);
444 return (1);
446 /* Reject this request because invalid parameters */
447 stat.un.b.lsRjtRsvd0 = 0;
448 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
449 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
450 stat.un.b.vendorUnique = 0;
451 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
453 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
454 /* 1 sec timeout */
455 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
457 spin_lock_irq(phba->host->host_lock);
458 ndlp->nlp_flag |= NLP_DELAY_TMO;
459 spin_unlock_irq(phba->host->host_lock);
460 ndlp->nlp_state = NLP_STE_NPR_NODE;
461 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
462 return (0);
465 static int
466 lpfc_rcv_logo(struct lpfc_hba * phba,
467 struct lpfc_nodelist * ndlp,
468 struct lpfc_iocbq *cmdiocb)
470 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
471 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
472 * PLOGIs during LOGO storms from a device.
474 ndlp->nlp_flag |= NLP_LOGO_ACC;
475 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
477 if (!(ndlp->nlp_type & NLP_FABRIC)) {
478 /* Only try to re-login if this is NOT a Fabric Node */
479 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
480 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
481 spin_lock_irq(phba->host->host_lock);
482 ndlp->nlp_flag |= NLP_DELAY_TMO;
483 spin_unlock_irq(phba->host->host_lock);
486 ndlp->nlp_state = NLP_STE_NPR_NODE;
487 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
489 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
490 /* The driver has to wait until the ACC completes before it continues
491 * processing the LOGO. The action will resume in
492 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
493 * unreg_login, the driver waits so the ACC does not get aborted.
495 return (0);
498 static void
499 lpfc_rcv_prli(struct lpfc_hba * phba,
500 struct lpfc_nodelist * ndlp,
501 struct lpfc_iocbq *cmdiocb)
503 struct lpfc_dmabuf *pcmd;
504 uint32_t *lp;
505 PRLI *npr;
506 struct fc_rport *rport = ndlp->rport;
507 u32 roles;
509 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
510 lp = (uint32_t *) pcmd->virt;
511 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
513 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
514 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
515 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
516 (npr->prliType == PRLI_FCP_TYPE)) {
517 if (npr->initiatorFunc)
518 ndlp->nlp_type |= NLP_FCP_INITIATOR;
519 if (npr->targetFunc)
520 ndlp->nlp_type |= NLP_FCP_TARGET;
521 if (npr->Retry)
522 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
524 if (rport) {
525 /* We need to update the rport role values */
526 roles = FC_RPORT_ROLE_UNKNOWN;
527 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
528 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
529 if (ndlp->nlp_type & NLP_FCP_TARGET)
530 roles |= FC_RPORT_ROLE_FCP_TARGET;
531 fc_remote_port_rolechg(rport, roles);
535 static uint32_t
536 lpfc_disc_set_adisc(struct lpfc_hba * phba,
537 struct lpfc_nodelist * ndlp)
539 /* Check config parameter use-adisc or FCP-2 */
540 if ((phba->cfg_use_adisc == 0) &&
541 !(phba->fc_flag & FC_RSCN_MODE)) {
542 if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
543 return (0);
545 spin_lock_irq(phba->host->host_lock);
546 ndlp->nlp_flag |= NLP_NPR_ADISC;
547 spin_unlock_irq(phba->host->host_lock);
548 return (1);
551 static uint32_t
552 lpfc_disc_noop(struct lpfc_hba * phba,
553 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
555 /* This routine does nothing, just return the current state */
556 return (ndlp->nlp_state);
559 static uint32_t
560 lpfc_disc_illegal(struct lpfc_hba * phba,
561 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
563 lpfc_printf_log(phba,
564 KERN_ERR,
565 LOG_DISCOVERY,
566 "%d:0253 Illegal State Transition: node x%x event x%x, "
567 "state x%x Data: x%x x%x\n",
568 phba->brd_no,
569 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
570 ndlp->nlp_flag);
571 return (ndlp->nlp_state);
574 /* Start of Discovery State Machine routines */
576 static uint32_t
577 lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
578 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
580 struct lpfc_iocbq *cmdiocb;
582 cmdiocb = (struct lpfc_iocbq *) arg;
584 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
585 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
586 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
587 return (ndlp->nlp_state);
589 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
590 return (NLP_STE_FREED_NODE);
593 static uint32_t
594 lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
595 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
597 lpfc_issue_els_logo(phba, ndlp, 0);
598 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
599 return (ndlp->nlp_state);
602 static uint32_t
603 lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
604 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
606 struct lpfc_iocbq *cmdiocb;
608 cmdiocb = (struct lpfc_iocbq *) arg;
610 spin_lock_irq(phba->host->host_lock);
611 ndlp->nlp_flag |= NLP_LOGO_ACC;
612 spin_unlock_irq(phba->host->host_lock);
613 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
614 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
616 return (ndlp->nlp_state);
619 static uint32_t
620 lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
621 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
623 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
624 return (NLP_STE_FREED_NODE);
627 static uint32_t
628 lpfc_device_rm_unused_node(struct lpfc_hba * phba,
629 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
631 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
632 return (NLP_STE_FREED_NODE);
635 static uint32_t
636 lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
637 void *arg, uint32_t evt)
639 struct lpfc_iocbq *cmdiocb = arg;
640 struct lpfc_dmabuf *pcmd;
641 struct serv_parm *sp;
642 uint32_t *lp;
643 struct ls_rjt stat;
644 int port_cmp;
646 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
647 lp = (uint32_t *) pcmd->virt;
648 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
650 memset(&stat, 0, sizeof (struct ls_rjt));
652 /* For a PLOGI, we only accept if our portname is less
653 * than the remote portname.
655 phba->fc_stat.elsLogiCol++;
656 port_cmp = memcmp(&phba->fc_portname, &sp->portName,
657 sizeof (struct lpfc_name));
659 if (port_cmp >= 0) {
660 /* Reject this request because the remote node will accept
661 ours */
662 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
663 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
664 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
666 else {
667 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
668 } /* if our portname was less */
670 return (ndlp->nlp_state);
673 static uint32_t
674 lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
675 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
677 struct lpfc_iocbq *cmdiocb;
679 cmdiocb = (struct lpfc_iocbq *) arg;
681 /* software abort outstanding PLOGI */
682 lpfc_els_abort(phba, ndlp, 1);
683 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
684 spin_lock_irq(phba->host->host_lock);
685 ndlp->nlp_flag |= NLP_DELAY_TMO;
686 spin_unlock_irq(phba->host->host_lock);
688 if (evt == NLP_EVT_RCV_LOGO) {
689 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
691 else {
692 lpfc_issue_els_logo(phba, ndlp, 0);
695 /* Put ndlp in npr list set plogi timer for 1 sec */
696 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
697 ndlp->nlp_state = NLP_STE_NPR_NODE;
698 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
700 return (ndlp->nlp_state);
703 static uint32_t
704 lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
705 struct lpfc_nodelist * ndlp, void *arg,
706 uint32_t evt)
708 struct lpfc_iocbq *cmdiocb, *rspiocb;
709 struct lpfc_dmabuf *pcmd, *prsp;
710 uint32_t *lp;
711 IOCB_t *irsp;
712 struct serv_parm *sp;
713 LPFC_MBOXQ_t *mbox;
715 cmdiocb = (struct lpfc_iocbq *) arg;
716 rspiocb = cmdiocb->context_un.rsp_iocb;
718 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
719 return (ndlp->nlp_state);
722 irsp = &rspiocb->iocb;
724 if (irsp->ulpStatus)
725 goto out;
727 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
729 prsp = list_get_first(&pcmd->list,
730 struct lpfc_dmabuf,
731 list);
732 lp = (uint32_t *) prsp->virt;
734 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
735 if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
736 goto out;
738 /* PLOGI chkparm OK */
739 lpfc_printf_log(phba,
740 KERN_INFO,
741 LOG_ELS,
742 "%d:0121 PLOGI chkparm OK "
743 "Data: x%x x%x x%x x%x\n",
744 phba->brd_no,
745 ndlp->nlp_DID, ndlp->nlp_state,
746 ndlp->nlp_flag, ndlp->nlp_rpi);
748 if ((phba->cfg_fcp_class == 2) &&
749 (sp->cls2.classValid)) {
750 ndlp->nlp_fcp_info |= CLASS2;
751 } else {
752 ndlp->nlp_fcp_info |= CLASS3;
754 ndlp->nlp_class_sup = 0;
755 if (sp->cls1.classValid)
756 ndlp->nlp_class_sup |= FC_COS_CLASS1;
757 if (sp->cls2.classValid)
758 ndlp->nlp_class_sup |= FC_COS_CLASS2;
759 if (sp->cls3.classValid)
760 ndlp->nlp_class_sup |= FC_COS_CLASS3;
761 if (sp->cls4.classValid)
762 ndlp->nlp_class_sup |= FC_COS_CLASS4;
763 ndlp->nlp_maxframe =
764 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
765 sp->cmn.bbRcvSizeLsb;
767 if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
768 GFP_KERNEL)))
769 goto out;
771 lpfc_unreg_rpi(phba, ndlp);
772 if (lpfc_reg_login
773 (phba, irsp->un.elsreq64.remoteID,
774 (uint8_t *) sp, mbox, 0) == 0) {
775 /* set_slim mailbox command needs to
776 * execute first, queue this command to
777 * be processed later.
779 switch(ndlp->nlp_DID) {
780 case NameServer_DID:
781 mbox->mbox_cmpl =
782 lpfc_mbx_cmpl_ns_reg_login;
783 break;
784 case FDMI_DID:
785 mbox->mbox_cmpl =
786 lpfc_mbx_cmpl_fdmi_reg_login;
787 break;
788 default:
789 mbox->mbox_cmpl =
790 lpfc_mbx_cmpl_reg_login;
792 mbox->context2 = ndlp;
793 if (lpfc_sli_issue_mbox(phba, mbox,
794 (MBX_NOWAIT | MBX_STOP_IOCB))
795 != MBX_NOT_FINISHED) {
796 ndlp->nlp_state =
797 NLP_STE_REG_LOGIN_ISSUE;
798 lpfc_nlp_list(phba, ndlp,
799 NLP_REGLOGIN_LIST);
800 return (ndlp->nlp_state);
802 mempool_free(mbox, phba->mbox_mem_pool);
803 } else {
804 mempool_free(mbox, phba->mbox_mem_pool);
808 out:
809 /* Free this node since the driver cannot login or has the wrong
810 sparm */
811 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
812 return (NLP_STE_FREED_NODE);
815 static uint32_t
816 lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
817 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
819 /* software abort outstanding PLOGI */
820 lpfc_els_abort(phba, ndlp, 1);
822 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
823 return (NLP_STE_FREED_NODE);
826 static uint32_t
827 lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
828 struct lpfc_nodelist * ndlp, void *arg,
829 uint32_t evt)
831 /* software abort outstanding PLOGI */
832 lpfc_els_abort(phba, ndlp, 1);
834 ndlp->nlp_state = NLP_STE_NPR_NODE;
835 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
836 spin_lock_irq(phba->host->host_lock);
837 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
838 spin_unlock_irq(phba->host->host_lock);
840 return (ndlp->nlp_state);
843 static uint32_t
844 lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
845 struct lpfc_nodelist * ndlp, void *arg,
846 uint32_t evt)
848 struct lpfc_iocbq *cmdiocb;
850 /* software abort outstanding ADISC */
851 lpfc_els_abort(phba, ndlp, 1);
853 cmdiocb = (struct lpfc_iocbq *) arg;
855 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
856 return (ndlp->nlp_state);
858 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
859 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
860 lpfc_issue_els_plogi(phba, ndlp, 0);
862 return (ndlp->nlp_state);
865 static uint32_t
866 lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
867 struct lpfc_nodelist * ndlp, void *arg,
868 uint32_t evt)
870 struct lpfc_iocbq *cmdiocb;
872 cmdiocb = (struct lpfc_iocbq *) arg;
874 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
875 return (ndlp->nlp_state);
878 static uint32_t
879 lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
880 struct lpfc_nodelist * ndlp, void *arg,
881 uint32_t evt)
883 struct lpfc_iocbq *cmdiocb;
885 cmdiocb = (struct lpfc_iocbq *) arg;
887 /* software abort outstanding ADISC */
888 lpfc_els_abort(phba, ndlp, 0);
890 lpfc_rcv_logo(phba, ndlp, cmdiocb);
891 return (ndlp->nlp_state);
894 static uint32_t
895 lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
896 struct lpfc_nodelist * ndlp, void *arg,
897 uint32_t evt)
899 struct lpfc_iocbq *cmdiocb;
901 cmdiocb = (struct lpfc_iocbq *) arg;
903 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
904 return (ndlp->nlp_state);
907 static uint32_t
908 lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
909 struct lpfc_nodelist * ndlp, void *arg,
910 uint32_t evt)
912 struct lpfc_iocbq *cmdiocb;
914 cmdiocb = (struct lpfc_iocbq *) arg;
916 /* Treat like rcv logo */
917 lpfc_rcv_logo(phba, ndlp, cmdiocb);
918 return (ndlp->nlp_state);
921 static uint32_t
922 lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
923 struct lpfc_nodelist * ndlp, void *arg,
924 uint32_t evt)
926 struct lpfc_iocbq *cmdiocb, *rspiocb;
927 IOCB_t *irsp;
928 ADISC *ap;
930 cmdiocb = (struct lpfc_iocbq *) arg;
931 rspiocb = cmdiocb->context_un.rsp_iocb;
933 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
934 irsp = &rspiocb->iocb;
936 if ((irsp->ulpStatus) ||
937 (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
938 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
939 /* 1 sec timeout */
940 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
941 spin_lock_irq(phba->host->host_lock);
942 ndlp->nlp_flag |= NLP_DELAY_TMO;
943 spin_unlock_irq(phba->host->host_lock);
945 memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
946 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
948 ndlp->nlp_state = NLP_STE_NPR_NODE;
949 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
950 lpfc_unreg_rpi(phba, ndlp);
951 return (ndlp->nlp_state);
953 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
954 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
955 return (ndlp->nlp_state);
958 static uint32_t
959 lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
960 struct lpfc_nodelist * ndlp, void *arg,
961 uint32_t evt)
963 /* software abort outstanding ADISC */
964 lpfc_els_abort(phba, ndlp, 1);
966 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
967 return (NLP_STE_FREED_NODE);
970 static uint32_t
971 lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
972 struct lpfc_nodelist * ndlp, void *arg,
973 uint32_t evt)
975 /* software abort outstanding ADISC */
976 lpfc_els_abort(phba, ndlp, 1);
978 ndlp->nlp_state = NLP_STE_NPR_NODE;
979 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
980 spin_lock_irq(phba->host->host_lock);
981 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
982 spin_unlock_irq(phba->host->host_lock);
984 lpfc_disc_set_adisc(phba, ndlp);
985 return (ndlp->nlp_state);
988 static uint32_t
989 lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
990 struct lpfc_nodelist * ndlp, void *arg,
991 uint32_t evt)
993 struct lpfc_iocbq *cmdiocb;
995 cmdiocb = (struct lpfc_iocbq *) arg;
997 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
998 return (ndlp->nlp_state);
1001 static uint32_t
1002 lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
1003 struct lpfc_nodelist * ndlp, void *arg,
1004 uint32_t evt)
1006 struct lpfc_iocbq *cmdiocb;
1008 cmdiocb = (struct lpfc_iocbq *) arg;
1010 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1011 return (ndlp->nlp_state);
1014 static uint32_t
1015 lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1016 struct lpfc_nodelist * ndlp, void *arg,
1017 uint32_t evt)
1019 struct lpfc_iocbq *cmdiocb;
1021 cmdiocb = (struct lpfc_iocbq *) arg;
1023 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1024 return (ndlp->nlp_state);
1027 static uint32_t
1028 lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
1029 struct lpfc_nodelist * ndlp, void *arg,
1030 uint32_t evt)
1032 struct lpfc_iocbq *cmdiocb;
1034 cmdiocb = (struct lpfc_iocbq *) arg;
1036 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1037 return (ndlp->nlp_state);
1040 static uint32_t
1041 lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1042 struct lpfc_nodelist * ndlp, void *arg,
1043 uint32_t evt)
1045 struct lpfc_iocbq *cmdiocb;
1047 cmdiocb = (struct lpfc_iocbq *) arg;
1048 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1049 return (ndlp->nlp_state);
1052 static uint32_t
1053 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1054 struct lpfc_nodelist * ndlp,
1055 void *arg, uint32_t evt)
1057 LPFC_MBOXQ_t *pmb;
1058 MAILBOX_t *mb;
1059 uint32_t did;
1061 pmb = (LPFC_MBOXQ_t *) arg;
1062 mb = &pmb->mb;
1063 did = mb->un.varWords[1];
1064 if (mb->mbxStatus) {
1065 /* RegLogin failed */
1066 lpfc_printf_log(phba,
1067 KERN_ERR,
1068 LOG_DISCOVERY,
1069 "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
1070 phba->brd_no,
1071 did, mb->mbxStatus, phba->hba_state);
1073 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1074 spin_lock_irq(phba->host->host_lock);
1075 ndlp->nlp_flag |= NLP_DELAY_TMO;
1076 spin_unlock_irq(phba->host->host_lock);
1078 lpfc_issue_els_logo(phba, ndlp, 0);
1079 /* Put ndlp in npr list set plogi timer for 1 sec */
1080 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
1081 ndlp->nlp_state = NLP_STE_NPR_NODE;
1082 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1083 return (ndlp->nlp_state);
1086 if (ndlp->nlp_rpi != 0)
1087 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1089 ndlp->nlp_rpi = mb->un.varWords[0];
1090 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1092 /* Only if we are not a fabric nport do we issue PRLI */
1093 if (!(ndlp->nlp_type & NLP_FABRIC)) {
1094 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1095 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1096 lpfc_issue_els_prli(phba, ndlp, 0);
1097 } else {
1098 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1099 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1101 return (ndlp->nlp_state);
1104 static uint32_t
1105 lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1106 struct lpfc_nodelist * ndlp, void *arg,
1107 uint32_t evt)
1109 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1110 return (NLP_STE_FREED_NODE);
1113 static uint32_t
1114 lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1115 struct lpfc_nodelist * ndlp, void *arg,
1116 uint32_t evt)
1118 ndlp->nlp_state = NLP_STE_NPR_NODE;
1119 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1120 spin_lock_irq(phba->host->host_lock);
1121 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1122 spin_unlock_irq(phba->host->host_lock);
1123 return (ndlp->nlp_state);
1126 static uint32_t
1127 lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
1128 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1130 struct lpfc_iocbq *cmdiocb;
1132 cmdiocb = (struct lpfc_iocbq *) arg;
1134 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1135 return (ndlp->nlp_state);
1138 static uint32_t
1139 lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
1140 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1142 struct lpfc_iocbq *cmdiocb;
1144 cmdiocb = (struct lpfc_iocbq *) arg;
1146 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1147 return (ndlp->nlp_state);
1150 static uint32_t
1151 lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1152 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1154 struct lpfc_iocbq *cmdiocb;
1156 cmdiocb = (struct lpfc_iocbq *) arg;
1158 /* Software abort outstanding PRLI before sending acc */
1159 lpfc_els_abort(phba, ndlp, 1);
1161 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1162 return (ndlp->nlp_state);
1165 static uint32_t
1166 lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
1167 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1169 struct lpfc_iocbq *cmdiocb;
1171 cmdiocb = (struct lpfc_iocbq *) arg;
1173 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1174 return (ndlp->nlp_state);
1177 /* This routine is envoked when we rcv a PRLO request from a nport
1178 * we are logged into. We should send back a PRLO rsp setting the
1179 * appropriate bits.
1180 * NEXT STATE = PRLI_ISSUE
1182 static uint32_t
1183 lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1184 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1186 struct lpfc_iocbq *cmdiocb;
1188 cmdiocb = (struct lpfc_iocbq *) arg;
1189 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1190 return (ndlp->nlp_state);
1193 static uint32_t
1194 lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1195 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1197 struct lpfc_iocbq *cmdiocb, *rspiocb;
1198 IOCB_t *irsp;
1199 PRLI *npr;
1201 cmdiocb = (struct lpfc_iocbq *) arg;
1202 rspiocb = cmdiocb->context_un.rsp_iocb;
1203 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1205 irsp = &rspiocb->iocb;
1206 if (irsp->ulpStatus) {
1207 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1208 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1209 return (ndlp->nlp_state);
1212 /* Check out PRLI rsp */
1213 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1214 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1215 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1216 (npr->prliType == PRLI_FCP_TYPE)) {
1217 if (npr->initiatorFunc)
1218 ndlp->nlp_type |= NLP_FCP_INITIATOR;
1219 if (npr->targetFunc)
1220 ndlp->nlp_type |= NLP_FCP_TARGET;
1221 if (npr->Retry)
1222 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1225 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
1226 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
1227 return (ndlp->nlp_state);
1230 /*! lpfc_device_rm_prli_issue
1232 * \pre
1233 * \post
1234 * \param phba
1235 * \param ndlp
1236 * \param arg
1237 * \param evt
1238 * \return uint32_t
1240 * \b Description:
1241 * This routine is envoked when we a request to remove a nport we are in the
1242 * process of PRLIing. We should software abort outstanding prli, unreg
1243 * login, send a logout. We will change node state to UNUSED_NODE, put it
1244 * on plogi list so it can be freed when LOGO completes.
1247 static uint32_t
1248 lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1249 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1251 /* software abort outstanding PRLI */
1252 lpfc_els_abort(phba, ndlp, 1);
1254 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1255 return (NLP_STE_FREED_NODE);
1259 /*! lpfc_device_recov_prli_issue
1261 * \pre
1262 * \post
1263 * \param phba
1264 * \param ndlp
1265 * \param arg
1266 * \param evt
1267 * \return uint32_t
1269 * \b Description:
1270 * The routine is envoked when the state of a device is unknown, like
1271 * during a link down. We should remove the nodelist entry from the
1272 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1273 * outstanding PRLI command, then free the node entry.
1275 static uint32_t
1276 lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1277 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1279 /* software abort outstanding PRLI */
1280 lpfc_els_abort(phba, ndlp, 1);
1282 ndlp->nlp_state = NLP_STE_NPR_NODE;
1283 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1284 spin_lock_irq(phba->host->host_lock);
1285 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1286 spin_unlock_irq(phba->host->host_lock);
1287 return (ndlp->nlp_state);
1290 static uint32_t
1291 lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
1292 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1294 struct lpfc_iocbq *cmdiocb;
1296 cmdiocb = (struct lpfc_iocbq *) arg;
1298 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1299 return (ndlp->nlp_state);
1302 static uint32_t
1303 lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
1304 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1306 struct lpfc_iocbq *cmdiocb;
1308 cmdiocb = (struct lpfc_iocbq *) arg;
1310 lpfc_rcv_prli(phba, ndlp, cmdiocb);
1311 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1312 return (ndlp->nlp_state);
1315 static uint32_t
1316 lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1317 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1319 struct lpfc_iocbq *cmdiocb;
1321 cmdiocb = (struct lpfc_iocbq *) arg;
1323 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1324 return (ndlp->nlp_state);
1327 static uint32_t
1328 lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
1329 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1331 struct lpfc_iocbq *cmdiocb;
1333 cmdiocb = (struct lpfc_iocbq *) arg;
1335 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1336 return (ndlp->nlp_state);
1339 static uint32_t
1340 lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1341 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1343 struct lpfc_iocbq *cmdiocb;
1345 cmdiocb = (struct lpfc_iocbq *) arg;
1347 /* Treat like rcv logo */
1348 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1349 return (ndlp->nlp_state);
1352 static uint32_t
1353 lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1354 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1356 ndlp->nlp_state = NLP_STE_NPR_NODE;
1357 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1358 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1359 lpfc_disc_set_adisc(phba, ndlp);
1361 return (ndlp->nlp_state);
1364 static uint32_t
1365 lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
1366 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1368 struct lpfc_iocbq *cmdiocb;
1370 cmdiocb = (struct lpfc_iocbq *) arg;
1372 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1373 return (ndlp->nlp_state);
1376 static uint32_t
1377 lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
1378 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1380 struct lpfc_iocbq *cmdiocb;
1382 cmdiocb = (struct lpfc_iocbq *) arg;
1384 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1385 return (ndlp->nlp_state);
1388 static uint32_t
1389 lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1390 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1392 struct lpfc_iocbq *cmdiocb;
1394 cmdiocb = (struct lpfc_iocbq *) arg;
1396 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1397 return (ndlp->nlp_state);
1400 static uint32_t
1401 lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
1402 struct lpfc_nodelist * ndlp, void *arg,
1403 uint32_t evt)
1405 struct lpfc_iocbq *cmdiocb;
1407 cmdiocb = (struct lpfc_iocbq *) arg;
1409 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1410 return (ndlp->nlp_state);
1413 static uint32_t
1414 lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1415 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1417 struct lpfc_iocbq *cmdiocb;
1419 cmdiocb = (struct lpfc_iocbq *) arg;
1421 /* flush the target */
1422 spin_lock_irq(phba->host->host_lock);
1423 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1424 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1425 spin_unlock_irq(phba->host->host_lock);
1427 /* Treat like rcv logo */
1428 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1429 return (ndlp->nlp_state);
1432 static uint32_t
1433 lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1434 struct lpfc_nodelist * ndlp, void *arg,
1435 uint32_t evt)
1437 ndlp->nlp_state = NLP_STE_NPR_NODE;
1438 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1439 spin_lock_irq(phba->host->host_lock);
1440 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1441 spin_unlock_irq(phba->host->host_lock);
1442 lpfc_disc_set_adisc(phba, ndlp);
1443 return (ndlp->nlp_state);
1446 static uint32_t
1447 lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
1448 struct lpfc_nodelist * ndlp, void *arg,
1449 uint32_t evt)
1451 struct lpfc_iocbq *cmdiocb;
1453 cmdiocb = (struct lpfc_iocbq *) arg;
1455 /* Ignore PLOGI if we have an outstanding LOGO */
1456 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1457 return (ndlp->nlp_state);
1460 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
1461 spin_lock_irq(phba->host->host_lock);
1462 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1463 spin_unlock_irq(phba->host->host_lock);
1464 return (ndlp->nlp_state);
1467 /* send PLOGI immediately, move to PLOGI issue state */
1468 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1469 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1470 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1471 lpfc_issue_els_plogi(phba, ndlp, 0);
1473 return (ndlp->nlp_state);
1476 static uint32_t
1477 lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
1478 struct lpfc_nodelist * ndlp, void *arg,
1479 uint32_t evt)
1481 struct lpfc_iocbq *cmdiocb;
1482 struct ls_rjt stat;
1484 cmdiocb = (struct lpfc_iocbq *) arg;
1486 memset(&stat, 0, sizeof (struct ls_rjt));
1487 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1488 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1489 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
1491 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1492 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1493 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1494 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1495 lpfc_issue_els_adisc(phba, ndlp, 0);
1496 } else {
1497 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1498 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1499 lpfc_issue_els_plogi(phba, ndlp, 0);
1502 return (ndlp->nlp_state);
1505 static uint32_t
1506 lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1507 struct lpfc_nodelist * ndlp, void *arg,
1508 uint32_t evt)
1510 struct lpfc_iocbq *cmdiocb;
1512 cmdiocb = (struct lpfc_iocbq *) arg;
1514 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1515 return (ndlp->nlp_state);
1518 static uint32_t
1519 lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1520 struct lpfc_nodelist * ndlp, void *arg,
1521 uint32_t evt)
1523 struct lpfc_iocbq *cmdiocb;
1525 cmdiocb = (struct lpfc_iocbq *) arg;
1527 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1529 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1530 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1531 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1532 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1533 lpfc_issue_els_adisc(phba, ndlp, 0);
1534 } else {
1535 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1536 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1537 lpfc_issue_els_plogi(phba, ndlp, 0);
1540 return (ndlp->nlp_state);
1543 static uint32_t
1544 lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
1545 struct lpfc_nodelist * ndlp, void *arg,
1546 uint32_t evt)
1548 struct lpfc_iocbq *cmdiocb;
1550 cmdiocb = (struct lpfc_iocbq *) arg;
1552 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1554 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1555 if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
1556 return (ndlp->nlp_state);
1557 } else {
1558 spin_lock_irq(phba->host->host_lock);
1559 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1560 spin_unlock_irq(phba->host->host_lock);
1561 del_timer_sync(&ndlp->nlp_delayfunc);
1562 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1563 list_del_init(&ndlp->els_retry_evt.evt_listp);
1567 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1568 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1569 lpfc_issue_els_plogi(phba, ndlp, 0);
1570 return (ndlp->nlp_state);
1573 static uint32_t
1574 lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
1575 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1577 lpfc_unreg_rpi(phba, ndlp);
1578 /* This routine does nothing, just return the current state */
1579 return (ndlp->nlp_state);
1582 static uint32_t
1583 lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1584 struct lpfc_nodelist * ndlp, void *arg,
1585 uint32_t evt)
1587 LPFC_MBOXQ_t *pmb;
1588 MAILBOX_t *mb;
1590 pmb = (LPFC_MBOXQ_t *) arg;
1591 mb = &pmb->mb;
1593 /* save rpi */
1594 if (ndlp->nlp_rpi != 0)
1595 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1597 ndlp->nlp_rpi = mb->un.varWords[0];
1598 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1600 return (ndlp->nlp_state);
1603 static uint32_t
1604 lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1605 struct lpfc_nodelist * ndlp, void *arg,
1606 uint32_t evt)
1608 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1609 return (NLP_STE_FREED_NODE);
1612 static uint32_t
1613 lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1614 struct lpfc_nodelist * ndlp, void *arg,
1615 uint32_t evt)
1617 spin_lock_irq(phba->host->host_lock);
1618 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1619 spin_unlock_irq(phba->host->host_lock);
1620 return (ndlp->nlp_state);
1624 /* This next section defines the NPort Discovery State Machine */
1626 /* There are 4 different double linked lists nodelist entries can reside on.
1627 * The plogi list and adisc list are used when Link Up discovery or RSCN
1628 * processing is needed. Each list holds the nodes that we will send PLOGI
1629 * or ADISC on. These lists will keep track of what nodes will be effected
1630 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
1631 * The unmapped_list will contain all nodes that we have successfully logged
1632 * into at the Fibre Channel level. The mapped_list will contain all nodes
1633 * that are mapped FCP targets.
1636 * The bind list is a list of undiscovered (potentially non-existent) nodes
1637 * that we have saved binding information on. This information is used when
1638 * nodes transition from the unmapped to the mapped list.
1640 /* For UNUSED_NODE state, the node has just been allocated .
1641 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
1642 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
1643 * and put on the unmapped list. For ADISC processing, the node is taken off
1644 * the ADISC list and placed on either the mapped or unmapped list (depending
1645 * on its previous state). Once on the unmapped list, a PRLI is issued and the
1646 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
1647 * changed to UNMAPPED_NODE. If the completion indicates a mapped
1648 * node, the node is taken off the unmapped list. The binding list is checked
1649 * for a valid binding, or a binding is automatically assigned. If binding
1650 * assignment is unsuccessful, the node is left on the unmapped list. If
1651 * binding assignment is successful, the associated binding list entry (if
1652 * any) is removed, and the node is placed on the mapped list.
1655 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1656 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
1657 * expire, all effected nodes will receive a DEVICE_RM event.
1660 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
1661 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
1662 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
1663 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
1664 * we will first process the ADISC list. 32 entries are processed initially and
1665 * ADISC is initited for each one. Completions / Events for each node are
1666 * funnelled thru the state machine. As each node finishes ADISC processing, it
1667 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
1668 * waiting, and the ADISC list count is identically 0, then we are done. For
1669 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
1670 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
1671 * list. 32 entries are processed initially and PLOGI is initited for each one.
1672 * Completions / Events for each node are funnelled thru the state machine. As
1673 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
1674 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
1675 * indentically 0, then we are done. We have now completed discovery / RSCN
1676 * handling. Upon completion, ALL nodes should be on either the mapped or
1677 * unmapped lists.
1680 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1681 (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
1682 /* Action routine Event Current State */
1683 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
1684 lpfc_rcv_els_unused_node, /* RCV_PRLI */
1685 lpfc_rcv_logo_unused_node, /* RCV_LOGO */
1686 lpfc_rcv_els_unused_node, /* RCV_ADISC */
1687 lpfc_rcv_els_unused_node, /* RCV_PDISC */
1688 lpfc_rcv_els_unused_node, /* RCV_PRLO */
1689 lpfc_disc_illegal, /* CMPL_PLOGI */
1690 lpfc_disc_illegal, /* CMPL_PRLI */
1691 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
1692 lpfc_disc_illegal, /* CMPL_ADISC */
1693 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1694 lpfc_device_rm_unused_node, /* DEVICE_RM */
1695 lpfc_disc_illegal, /* DEVICE_RECOVERY */
1697 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
1698 lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
1699 lpfc_rcv_els_plogi_issue, /* RCV_LOGO */
1700 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
1701 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
1702 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
1703 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
1704 lpfc_disc_illegal, /* CMPL_PRLI */
1705 lpfc_disc_illegal, /* CMPL_LOGO */
1706 lpfc_disc_illegal, /* CMPL_ADISC */
1707 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1708 lpfc_device_rm_plogi_issue, /* DEVICE_RM */
1709 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
1711 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
1712 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
1713 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
1714 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
1715 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
1716 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
1717 lpfc_disc_illegal, /* CMPL_PLOGI */
1718 lpfc_disc_illegal, /* CMPL_PRLI */
1719 lpfc_disc_illegal, /* CMPL_LOGO */
1720 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
1721 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1722 lpfc_device_rm_adisc_issue, /* DEVICE_RM */
1723 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
1725 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
1726 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
1727 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
1728 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
1729 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
1730 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
1731 lpfc_disc_illegal, /* CMPL_PLOGI */
1732 lpfc_disc_illegal, /* CMPL_PRLI */
1733 lpfc_disc_illegal, /* CMPL_LOGO */
1734 lpfc_disc_illegal, /* CMPL_ADISC */
1735 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
1736 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
1737 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
1739 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
1740 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
1741 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
1742 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
1743 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
1744 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
1745 lpfc_disc_illegal, /* CMPL_PLOGI */
1746 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
1747 lpfc_disc_illegal, /* CMPL_LOGO */
1748 lpfc_disc_illegal, /* CMPL_ADISC */
1749 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1750 lpfc_device_rm_prli_issue, /* DEVICE_RM */
1751 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
1753 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
1754 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
1755 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
1756 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
1757 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
1758 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
1759 lpfc_disc_illegal, /* CMPL_PLOGI */
1760 lpfc_disc_illegal, /* CMPL_PRLI */
1761 lpfc_disc_illegal, /* CMPL_LOGO */
1762 lpfc_disc_illegal, /* CMPL_ADISC */
1763 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1764 lpfc_disc_illegal, /* DEVICE_RM */
1765 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
1767 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
1768 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
1769 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
1770 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
1771 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
1772 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
1773 lpfc_disc_illegal, /* CMPL_PLOGI */
1774 lpfc_disc_illegal, /* CMPL_PRLI */
1775 lpfc_disc_illegal, /* CMPL_LOGO */
1776 lpfc_disc_illegal, /* CMPL_ADISC */
1777 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1778 lpfc_disc_illegal, /* DEVICE_RM */
1779 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
1781 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
1782 lpfc_rcv_prli_npr_node, /* RCV_PRLI */
1783 lpfc_rcv_logo_npr_node, /* RCV_LOGO */
1784 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
1785 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
1786 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
1787 lpfc_disc_noop, /* CMPL_PLOGI */
1788 lpfc_disc_noop, /* CMPL_PRLI */
1789 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
1790 lpfc_disc_noop, /* CMPL_ADISC */
1791 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
1792 lpfc_device_rm_npr_node, /* DEVICE_RM */
1793 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
1797 lpfc_disc_state_machine(struct lpfc_hba * phba,
1798 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1800 uint32_t cur_state, rc;
1801 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
1802 uint32_t);
1804 ndlp->nlp_disc_refcnt++;
1805 cur_state = ndlp->nlp_state;
1807 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
1808 lpfc_printf_log(phba,
1809 KERN_INFO,
1810 LOG_DISCOVERY,
1811 "%d:0211 DSM in event x%x on NPort x%x in state %d "
1812 "Data: x%x\n",
1813 phba->brd_no,
1814 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
1816 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
1817 rc = (func) (phba, ndlp, arg, evt);
1819 /* DSM out state <rc> on NPort <nlp_DID> */
1820 lpfc_printf_log(phba,
1821 KERN_INFO,
1822 LOG_DISCOVERY,
1823 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
1824 phba->brd_no,
1825 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1827 ndlp->nlp_disc_refcnt--;
1829 /* Check to see if ndlp removal is deferred */
1830 if ((ndlp->nlp_disc_refcnt == 0)
1831 && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
1832 spin_lock_irq(phba->host->host_lock);
1833 ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
1834 spin_unlock_irq(phba->host->host_lock);
1835 lpfc_nlp_remove(phba, ndlp);
1836 return (NLP_STE_FREED_NODE);
1838 if (rc == NLP_STE_FREED_NODE)
1839 return (NLP_STE_FREED_NODE);
1840 ndlp->nlp_state = rc;
1841 return (rc);