[PATCH] W1: w1_netlink: New init/fini netlink callbacks.
[linux-2.6/verdex.git] / drivers / scsi / lpfc / lpfc_nportdisc.c
blob9b35eaac781de35f051787d9daa8d0396834373b
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
31 #include "lpfc_hw.h"
32 #include "lpfc_sli.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
35 #include "lpfc.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
40 /* Called to verify a rcv'ed ADISC was intended for us. */
41 static int
42 lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
43 struct lpfc_name * nn, struct lpfc_name * pn)
45 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
46 * table entry for that node.
48 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
49 return (0);
51 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
52 return (0);
54 /* we match, return success */
55 return (1);
59 int
60 lpfc_check_sparm(struct lpfc_hba * phba,
61 struct lpfc_nodelist * ndlp, struct serv_parm * sp,
62 uint32_t class)
64 volatile struct serv_parm *hsp = &phba->fc_sparam;
65 /* First check for supported version */
67 /* Next check for class validity */
68 if (sp->cls1.classValid) {
70 if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
71 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
72 if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
73 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
74 } else if (class == CLASS1) {
75 return (0);
78 if (sp->cls2.classValid) {
80 if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
81 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
82 if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
83 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
84 } else if (class == CLASS2) {
85 return (0);
88 if (sp->cls3.classValid) {
90 if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
91 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
92 if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
93 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
94 } else if (class == CLASS3) {
95 return (0);
98 if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
99 sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
100 if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
101 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
103 /* If check is good, copy wwpn wwnn into ndlp */
104 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
105 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
106 return (1);
109 static void *
110 lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
111 struct lpfc_iocbq *cmdiocb,
112 struct lpfc_iocbq *rspiocb)
114 struct lpfc_dmabuf *pcmd, *prsp;
115 uint32_t *lp;
116 void *ptr = NULL;
117 IOCB_t *irsp;
119 irsp = &rspiocb->iocb;
120 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
122 /* For lpfc_els_abort, context2 could be zero'ed to delay
123 * freeing associated memory till after ABTS completes.
125 if (pcmd) {
126 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
127 list);
128 if (prsp) {
129 lp = (uint32_t *) prsp->virt;
130 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
133 else {
134 /* Force ulpStatus error since we are returning NULL ptr */
135 if (!(irsp->ulpStatus)) {
136 irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
137 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
139 ptr = NULL;
141 return (ptr);
146 * Free resources / clean up outstanding I/Os
147 * associated with a LPFC_NODELIST entry. This
148 * routine effectively results in a "software abort".
151 lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
152 int send_abts)
154 struct lpfc_sli *psli;
155 struct lpfc_sli_ring *pring;
156 struct lpfc_iocbq *iocb, *next_iocb;
157 IOCB_t *icmd;
158 int found = 0;
160 /* Abort outstanding I/O on NPort <nlp_DID> */
161 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
162 "%d:0201 Abort outstanding I/O on NPort x%x "
163 "Data: x%x x%x x%x\n",
164 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
165 ndlp->nlp_state, ndlp->nlp_rpi);
167 psli = &phba->sli;
168 pring = &psli->ring[LPFC_ELS_RING];
170 /* First check the txq */
171 do {
172 found = 0;
173 spin_lock_irq(phba->host->host_lock);
174 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
175 /* Check to see if iocb matches the nport we are looking
176 for */
177 if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
178 found = 1;
179 /* It matches, so deque and call compl with an
180 error */
181 list_del(&iocb->list);
182 pring->txq_cnt--;
183 if (iocb->iocb_cmpl) {
184 icmd = &iocb->iocb;
185 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
186 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
187 spin_unlock_irq(phba->host->host_lock);
188 (iocb->iocb_cmpl) (phba, iocb, iocb);
189 spin_lock_irq(phba->host->host_lock);
190 } else {
191 list_add_tail(&iocb->list,
192 &phba->lpfc_iocb_list);
194 break;
197 spin_unlock_irq(phba->host->host_lock);
198 } while (found);
200 /* Everything on txcmplq will be returned by firmware
201 * with a no rpi / linkdown / abort error. For ring 0,
202 * ELS discovery, we want to get rid of it right here.
204 /* Next check the txcmplq */
205 do {
206 found = 0;
207 spin_lock_irq(phba->host->host_lock);
208 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
209 list) {
210 /* Check to see if iocb matches the nport we are looking
211 for */
212 if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
213 found = 1;
214 /* It matches, so deque and call compl with an
215 error */
216 list_del(&iocb->list);
217 pring->txcmplq_cnt--;
219 icmd = &iocb->iocb;
220 /* If the driver is completing an ELS
221 * command early, flush it out of the firmware.
223 if (send_abts &&
224 (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
225 (icmd->un.elsreq64.bdl.ulpIoTag32)) {
226 lpfc_sli_issue_abort_iotag32(phba,
227 pring, iocb);
229 if (iocb->iocb_cmpl) {
230 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
231 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
232 spin_unlock_irq(phba->host->host_lock);
233 (iocb->iocb_cmpl) (phba, iocb, iocb);
234 spin_lock_irq(phba->host->host_lock);
235 } else {
236 list_add_tail(&iocb->list,
237 &phba->lpfc_iocb_list);
239 break;
242 spin_unlock_irq(phba->host->host_lock);
243 } while(found);
245 /* If we are delaying issuing an ELS command, cancel it */
246 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
247 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
248 del_timer_sync(&ndlp->nlp_delayfunc);
249 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
250 list_del_init(&ndlp->els_retry_evt.evt_listp);
252 return (0);
255 static int
256 lpfc_rcv_plogi(struct lpfc_hba * phba,
257 struct lpfc_nodelist * ndlp,
258 struct lpfc_iocbq *cmdiocb)
260 struct lpfc_dmabuf *pcmd;
261 uint32_t *lp;
262 IOCB_t *icmd;
263 struct serv_parm *sp;
264 LPFC_MBOXQ_t *mbox;
265 struct ls_rjt stat;
266 int rc;
268 memset(&stat, 0, sizeof (struct ls_rjt));
269 if (phba->hba_state <= LPFC_FLOGI) {
270 /* Before responding to PLOGI, check for pt2pt mode.
271 * If we are pt2pt, with an outstanding FLOGI, abort
272 * the FLOGI and resend it first.
274 if (phba->fc_flag & FC_PT2PT) {
275 lpfc_els_abort_flogi(phba);
276 if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
277 /* If the other side is supposed to initiate
278 * the PLOGI anyway, just ACC it now and
279 * move on with discovery.
281 phba->fc_edtov = FF_DEF_EDTOV;
282 phba->fc_ratov = FF_DEF_RATOV;
283 /* Start discovery - this should just do
284 CLEAR_LA */
285 lpfc_disc_start(phba);
287 else {
288 lpfc_initial_flogi(phba);
291 else {
292 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
293 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
294 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
295 ndlp);
296 return 0;
299 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
300 lp = (uint32_t *) pcmd->virt;
301 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
302 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
303 /* Reject this request because invalid parameters */
304 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
305 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
306 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
307 return (0);
309 icmd = &cmdiocb->iocb;
311 /* PLOGI chkparm OK */
312 lpfc_printf_log(phba,
313 KERN_INFO,
314 LOG_ELS,
315 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
316 phba->brd_no,
317 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
318 ndlp->nlp_rpi);
320 if ((phba->cfg_fcp_class == 2) &&
321 (sp->cls2.classValid)) {
322 ndlp->nlp_fcp_info |= CLASS2;
323 } else {
324 ndlp->nlp_fcp_info |= CLASS3;
326 ndlp->nlp_class_sup = 0;
327 if (sp->cls1.classValid)
328 ndlp->nlp_class_sup |= FC_COS_CLASS1;
329 if (sp->cls2.classValid)
330 ndlp->nlp_class_sup |= FC_COS_CLASS2;
331 if (sp->cls3.classValid)
332 ndlp->nlp_class_sup |= FC_COS_CLASS3;
333 if (sp->cls4.classValid)
334 ndlp->nlp_class_sup |= FC_COS_CLASS4;
335 ndlp->nlp_maxframe =
336 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
338 /* no need to reg_login if we are already in one of these states */
339 switch(ndlp->nlp_state) {
340 case NLP_STE_NPR_NODE:
341 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
342 break;
343 case NLP_STE_REG_LOGIN_ISSUE:
344 case NLP_STE_PRLI_ISSUE:
345 case NLP_STE_UNMAPPED_NODE:
346 case NLP_STE_MAPPED_NODE:
347 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
348 return (1);
351 if ((phba->fc_flag & FC_PT2PT)
352 && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
353 /* rcv'ed PLOGI decides what our NPortId will be */
354 phba->fc_myDID = icmd->un.rcvels.parmRo;
355 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
356 if (mbox == NULL)
357 goto out;
358 lpfc_config_link(phba, mbox);
359 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
360 rc = lpfc_sli_issue_mbox
361 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
362 if (rc == MBX_NOT_FINISHED) {
363 mempool_free( mbox, phba->mbox_mem_pool);
364 goto out;
367 lpfc_can_disctmo(phba);
369 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
370 if (mbox == NULL)
371 goto out;
373 if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
374 (uint8_t *) sp, mbox, 0)) {
375 mempool_free( mbox, phba->mbox_mem_pool);
376 goto out;
379 /* ACC PLOGI rsp command needs to execute first,
380 * queue this mbox command to be processed later.
382 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
383 mbox->context2 = ndlp;
384 ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
386 /* If there is an outstanding PLOGI issued, abort it before
387 * sending ACC rsp to PLOGI recieved.
389 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
390 /* software abort outstanding PLOGI */
391 lpfc_els_abort(phba, ndlp, 1);
393 ndlp->nlp_flag |= NLP_RCV_PLOGI;
394 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
395 return (1);
397 out:
398 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
399 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
400 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
401 return (0);
404 static int
405 lpfc_rcv_padisc(struct lpfc_hba * phba,
406 struct lpfc_nodelist * ndlp,
407 struct lpfc_iocbq *cmdiocb)
409 struct lpfc_dmabuf *pcmd;
410 struct serv_parm *sp;
411 struct lpfc_name *pnn, *ppn;
412 struct ls_rjt stat;
413 ADISC *ap;
414 IOCB_t *icmd;
415 uint32_t *lp;
416 uint32_t cmd;
418 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
419 lp = (uint32_t *) pcmd->virt;
421 cmd = *lp++;
422 if (cmd == ELS_CMD_ADISC) {
423 ap = (ADISC *) lp;
424 pnn = (struct lpfc_name *) & ap->nodeName;
425 ppn = (struct lpfc_name *) & ap->portName;
426 } else {
427 sp = (struct serv_parm *) lp;
428 pnn = (struct lpfc_name *) & sp->nodeName;
429 ppn = (struct lpfc_name *) & sp->portName;
432 icmd = &cmdiocb->iocb;
433 if ((icmd->ulpStatus == 0) &&
434 (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
435 if (cmd == ELS_CMD_ADISC) {
436 lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
438 else {
439 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
440 NULL, 0);
442 return (1);
444 /* Reject this request because invalid parameters */
445 stat.un.b.lsRjtRsvd0 = 0;
446 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
447 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
448 stat.un.b.vendorUnique = 0;
449 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
451 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
452 /* 1 sec timeout */
453 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
455 spin_lock_irq(phba->host->host_lock);
456 ndlp->nlp_flag |= NLP_DELAY_TMO;
457 spin_unlock_irq(phba->host->host_lock);
458 ndlp->nlp_state = NLP_STE_NPR_NODE;
459 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
460 return (0);
463 static int
464 lpfc_rcv_logo(struct lpfc_hba * phba,
465 struct lpfc_nodelist * ndlp,
466 struct lpfc_iocbq *cmdiocb)
468 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
469 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
470 * PLOGIs during LOGO storms from a device.
472 ndlp->nlp_flag |= NLP_LOGO_ACC;
473 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
475 if (!(ndlp->nlp_type & NLP_FABRIC)) {
476 /* Only try to re-login if this is NOT a Fabric Node */
477 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
478 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
479 spin_lock_irq(phba->host->host_lock);
480 ndlp->nlp_flag |= NLP_DELAY_TMO;
481 spin_unlock_irq(phba->host->host_lock);
484 ndlp->nlp_state = NLP_STE_NPR_NODE;
485 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
487 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
488 /* The driver has to wait until the ACC completes before it continues
489 * processing the LOGO. The action will resume in
490 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
491 * unreg_login, the driver waits so the ACC does not get aborted.
493 return (0);
496 static void
497 lpfc_rcv_prli(struct lpfc_hba * phba,
498 struct lpfc_nodelist * ndlp,
499 struct lpfc_iocbq *cmdiocb)
501 struct lpfc_dmabuf *pcmd;
502 uint32_t *lp;
503 PRLI *npr;
504 struct fc_rport *rport = ndlp->rport;
505 u32 roles;
507 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
508 lp = (uint32_t *) pcmd->virt;
509 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
511 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
512 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
513 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
514 (npr->prliType == PRLI_FCP_TYPE)) {
515 if (npr->initiatorFunc)
516 ndlp->nlp_type |= NLP_FCP_INITIATOR;
517 if (npr->targetFunc)
518 ndlp->nlp_type |= NLP_FCP_TARGET;
519 if (npr->Retry)
520 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
522 if (rport) {
523 /* We need to update the rport role values */
524 roles = FC_RPORT_ROLE_UNKNOWN;
525 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
526 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
527 if (ndlp->nlp_type & NLP_FCP_TARGET)
528 roles |= FC_RPORT_ROLE_FCP_TARGET;
529 fc_remote_port_rolechg(rport, roles);
533 static uint32_t
534 lpfc_disc_set_adisc(struct lpfc_hba * phba,
535 struct lpfc_nodelist * ndlp)
537 /* Check config parameter use-adisc or FCP-2 */
538 if ((phba->cfg_use_adisc == 0) &&
539 !(phba->fc_flag & FC_RSCN_MODE)) {
540 if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
541 return (0);
543 spin_lock_irq(phba->host->host_lock);
544 ndlp->nlp_flag |= NLP_NPR_ADISC;
545 spin_unlock_irq(phba->host->host_lock);
546 return (1);
549 static uint32_t
550 lpfc_disc_noop(struct lpfc_hba * phba,
551 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
553 /* This routine does nothing, just return the current state */
554 return (ndlp->nlp_state);
557 static uint32_t
558 lpfc_disc_illegal(struct lpfc_hba * phba,
559 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
561 lpfc_printf_log(phba,
562 KERN_ERR,
563 LOG_DISCOVERY,
564 "%d:0253 Illegal State Transition: node x%x event x%x, "
565 "state x%x Data: x%x x%x\n",
566 phba->brd_no,
567 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
568 ndlp->nlp_flag);
569 return (ndlp->nlp_state);
572 /* Start of Discovery State Machine routines */
574 static uint32_t
575 lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
576 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
578 struct lpfc_iocbq *cmdiocb;
580 cmdiocb = (struct lpfc_iocbq *) arg;
582 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
583 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
584 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
585 return (ndlp->nlp_state);
587 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
588 return (NLP_STE_FREED_NODE);
591 static uint32_t
592 lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
593 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
595 lpfc_issue_els_logo(phba, ndlp, 0);
596 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
597 return (ndlp->nlp_state);
600 static uint32_t
601 lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
602 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
604 struct lpfc_iocbq *cmdiocb;
606 cmdiocb = (struct lpfc_iocbq *) arg;
608 spin_lock_irq(phba->host->host_lock);
609 ndlp->nlp_flag |= NLP_LOGO_ACC;
610 spin_unlock_irq(phba->host->host_lock);
611 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
612 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
614 return (ndlp->nlp_state);
617 static uint32_t
618 lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
619 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
621 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
622 return (NLP_STE_FREED_NODE);
625 static uint32_t
626 lpfc_device_rm_unused_node(struct lpfc_hba * phba,
627 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
629 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
630 return (NLP_STE_FREED_NODE);
633 static uint32_t
634 lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
635 void *arg, uint32_t evt)
637 struct lpfc_iocbq *cmdiocb = arg;
638 struct lpfc_dmabuf *pcmd;
639 struct serv_parm *sp;
640 uint32_t *lp;
641 struct ls_rjt stat;
642 int port_cmp;
644 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
645 lp = (uint32_t *) pcmd->virt;
646 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
648 memset(&stat, 0, sizeof (struct ls_rjt));
650 /* For a PLOGI, we only accept if our portname is less
651 * than the remote portname.
653 phba->fc_stat.elsLogiCol++;
654 port_cmp = memcmp(&phba->fc_portname, &sp->portName,
655 sizeof (struct lpfc_name));
657 if (port_cmp >= 0) {
658 /* Reject this request because the remote node will accept
659 ours */
660 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
661 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
662 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
664 else {
665 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
666 } /* if our portname was less */
668 return (ndlp->nlp_state);
671 static uint32_t
672 lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
673 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
675 struct lpfc_iocbq *cmdiocb;
677 cmdiocb = (struct lpfc_iocbq *) arg;
679 /* software abort outstanding PLOGI */
680 lpfc_els_abort(phba, ndlp, 1);
681 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
682 spin_lock_irq(phba->host->host_lock);
683 ndlp->nlp_flag |= NLP_DELAY_TMO;
684 spin_unlock_irq(phba->host->host_lock);
686 if (evt == NLP_EVT_RCV_LOGO) {
687 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
689 else {
690 lpfc_issue_els_logo(phba, ndlp, 0);
693 /* Put ndlp in npr list set plogi timer for 1 sec */
694 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
695 ndlp->nlp_state = NLP_STE_NPR_NODE;
696 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
698 return (ndlp->nlp_state);
701 static uint32_t
702 lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
703 struct lpfc_nodelist * ndlp, void *arg,
704 uint32_t evt)
706 struct lpfc_iocbq *cmdiocb, *rspiocb;
707 struct lpfc_dmabuf *pcmd, *prsp;
708 uint32_t *lp;
709 IOCB_t *irsp;
710 struct serv_parm *sp;
711 LPFC_MBOXQ_t *mbox;
713 cmdiocb = (struct lpfc_iocbq *) arg;
714 rspiocb = cmdiocb->context_un.rsp_iocb;
716 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
717 return (ndlp->nlp_state);
720 irsp = &rspiocb->iocb;
722 if (irsp->ulpStatus)
723 goto out;
725 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
727 prsp = list_get_first(&pcmd->list,
728 struct lpfc_dmabuf,
729 list);
730 lp = (uint32_t *) prsp->virt;
732 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
733 if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
734 goto out;
736 /* PLOGI chkparm OK */
737 lpfc_printf_log(phba,
738 KERN_INFO,
739 LOG_ELS,
740 "%d:0121 PLOGI chkparm OK "
741 "Data: x%x x%x x%x x%x\n",
742 phba->brd_no,
743 ndlp->nlp_DID, ndlp->nlp_state,
744 ndlp->nlp_flag, ndlp->nlp_rpi);
746 if ((phba->cfg_fcp_class == 2) &&
747 (sp->cls2.classValid)) {
748 ndlp->nlp_fcp_info |= CLASS2;
749 } else {
750 ndlp->nlp_fcp_info |= CLASS3;
752 ndlp->nlp_class_sup = 0;
753 if (sp->cls1.classValid)
754 ndlp->nlp_class_sup |= FC_COS_CLASS1;
755 if (sp->cls2.classValid)
756 ndlp->nlp_class_sup |= FC_COS_CLASS2;
757 if (sp->cls3.classValid)
758 ndlp->nlp_class_sup |= FC_COS_CLASS3;
759 if (sp->cls4.classValid)
760 ndlp->nlp_class_sup |= FC_COS_CLASS4;
761 ndlp->nlp_maxframe =
762 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
763 sp->cmn.bbRcvSizeLsb;
765 if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
766 GFP_KERNEL)))
767 goto out;
769 lpfc_unreg_rpi(phba, ndlp);
770 if (lpfc_reg_login
771 (phba, irsp->un.elsreq64.remoteID,
772 (uint8_t *) sp, mbox, 0) == 0) {
773 /* set_slim mailbox command needs to
774 * execute first, queue this command to
775 * be processed later.
777 switch(ndlp->nlp_DID) {
778 case NameServer_DID:
779 mbox->mbox_cmpl =
780 lpfc_mbx_cmpl_ns_reg_login;
781 break;
782 case FDMI_DID:
783 mbox->mbox_cmpl =
784 lpfc_mbx_cmpl_fdmi_reg_login;
785 break;
786 default:
787 mbox->mbox_cmpl =
788 lpfc_mbx_cmpl_reg_login;
790 mbox->context2 = ndlp;
791 if (lpfc_sli_issue_mbox(phba, mbox,
792 (MBX_NOWAIT | MBX_STOP_IOCB))
793 != MBX_NOT_FINISHED) {
794 ndlp->nlp_state =
795 NLP_STE_REG_LOGIN_ISSUE;
796 lpfc_nlp_list(phba, ndlp,
797 NLP_REGLOGIN_LIST);
798 return (ndlp->nlp_state);
800 mempool_free(mbox, phba->mbox_mem_pool);
801 } else {
802 mempool_free(mbox, phba->mbox_mem_pool);
806 out:
807 /* Free this node since the driver cannot login or has the wrong
808 sparm */
809 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
810 return (NLP_STE_FREED_NODE);
813 static uint32_t
814 lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
815 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
817 /* software abort outstanding PLOGI */
818 lpfc_els_abort(phba, ndlp, 1);
820 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
821 return (NLP_STE_FREED_NODE);
824 static uint32_t
825 lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
826 struct lpfc_nodelist * ndlp, void *arg,
827 uint32_t evt)
829 /* software abort outstanding PLOGI */
830 lpfc_els_abort(phba, ndlp, 1);
832 ndlp->nlp_state = NLP_STE_NPR_NODE;
833 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
834 spin_lock_irq(phba->host->host_lock);
835 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
836 spin_unlock_irq(phba->host->host_lock);
838 return (ndlp->nlp_state);
841 static uint32_t
842 lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
843 struct lpfc_nodelist * ndlp, void *arg,
844 uint32_t evt)
846 struct lpfc_iocbq *cmdiocb;
848 /* software abort outstanding ADISC */
849 lpfc_els_abort(phba, ndlp, 1);
851 cmdiocb = (struct lpfc_iocbq *) arg;
853 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
854 return (ndlp->nlp_state);
856 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
857 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
858 lpfc_issue_els_plogi(phba, ndlp, 0);
860 return (ndlp->nlp_state);
863 static uint32_t
864 lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
865 struct lpfc_nodelist * ndlp, void *arg,
866 uint32_t evt)
868 struct lpfc_iocbq *cmdiocb;
870 cmdiocb = (struct lpfc_iocbq *) arg;
872 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
873 return (ndlp->nlp_state);
876 static uint32_t
877 lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
878 struct lpfc_nodelist * ndlp, void *arg,
879 uint32_t evt)
881 struct lpfc_iocbq *cmdiocb;
883 cmdiocb = (struct lpfc_iocbq *) arg;
885 /* software abort outstanding ADISC */
886 lpfc_els_abort(phba, ndlp, 0);
888 lpfc_rcv_logo(phba, ndlp, cmdiocb);
889 return (ndlp->nlp_state);
892 static uint32_t
893 lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
894 struct lpfc_nodelist * ndlp, void *arg,
895 uint32_t evt)
897 struct lpfc_iocbq *cmdiocb;
899 cmdiocb = (struct lpfc_iocbq *) arg;
901 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
902 return (ndlp->nlp_state);
905 static uint32_t
906 lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
907 struct lpfc_nodelist * ndlp, void *arg,
908 uint32_t evt)
910 struct lpfc_iocbq *cmdiocb;
912 cmdiocb = (struct lpfc_iocbq *) arg;
914 /* Treat like rcv logo */
915 lpfc_rcv_logo(phba, ndlp, cmdiocb);
916 return (ndlp->nlp_state);
919 static uint32_t
920 lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
921 struct lpfc_nodelist * ndlp, void *arg,
922 uint32_t evt)
924 struct lpfc_iocbq *cmdiocb, *rspiocb;
925 IOCB_t *irsp;
926 ADISC *ap;
928 cmdiocb = (struct lpfc_iocbq *) arg;
929 rspiocb = cmdiocb->context_un.rsp_iocb;
931 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
932 irsp = &rspiocb->iocb;
934 if ((irsp->ulpStatus) ||
935 (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
936 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
937 /* 1 sec timeout */
938 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
939 spin_lock_irq(phba->host->host_lock);
940 ndlp->nlp_flag |= NLP_DELAY_TMO;
941 spin_unlock_irq(phba->host->host_lock);
943 memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
944 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
946 ndlp->nlp_state = NLP_STE_NPR_NODE;
947 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
948 lpfc_unreg_rpi(phba, ndlp);
949 return (ndlp->nlp_state);
951 if (ndlp->nlp_type & NLP_FCP_TARGET) {
952 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
953 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
954 } else {
955 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
956 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
958 return (ndlp->nlp_state);
961 static uint32_t
962 lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
963 struct lpfc_nodelist * ndlp, void *arg,
964 uint32_t evt)
966 /* software abort outstanding ADISC */
967 lpfc_els_abort(phba, ndlp, 1);
969 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
970 return (NLP_STE_FREED_NODE);
973 static uint32_t
974 lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
975 struct lpfc_nodelist * ndlp, void *arg,
976 uint32_t evt)
978 /* software abort outstanding ADISC */
979 lpfc_els_abort(phba, ndlp, 1);
981 ndlp->nlp_state = NLP_STE_NPR_NODE;
982 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
983 spin_lock_irq(phba->host->host_lock);
984 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
985 spin_unlock_irq(phba->host->host_lock);
987 lpfc_disc_set_adisc(phba, ndlp);
988 return (ndlp->nlp_state);
991 static uint32_t
992 lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
993 struct lpfc_nodelist * ndlp, void *arg,
994 uint32_t evt)
996 struct lpfc_iocbq *cmdiocb;
998 cmdiocb = (struct lpfc_iocbq *) arg;
1000 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1001 return (ndlp->nlp_state);
1004 static uint32_t
1005 lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
1006 struct lpfc_nodelist * ndlp, void *arg,
1007 uint32_t evt)
1009 struct lpfc_iocbq *cmdiocb;
1011 cmdiocb = (struct lpfc_iocbq *) arg;
1013 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1014 return (ndlp->nlp_state);
1017 static uint32_t
1018 lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1019 struct lpfc_nodelist * ndlp, void *arg,
1020 uint32_t evt)
1022 struct lpfc_iocbq *cmdiocb;
1024 cmdiocb = (struct lpfc_iocbq *) arg;
1026 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1027 return (ndlp->nlp_state);
1030 static uint32_t
1031 lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
1032 struct lpfc_nodelist * ndlp, void *arg,
1033 uint32_t evt)
1035 struct lpfc_iocbq *cmdiocb;
1037 cmdiocb = (struct lpfc_iocbq *) arg;
1039 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1040 return (ndlp->nlp_state);
1043 static uint32_t
1044 lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1045 struct lpfc_nodelist * ndlp, void *arg,
1046 uint32_t evt)
1048 struct lpfc_iocbq *cmdiocb;
1050 cmdiocb = (struct lpfc_iocbq *) arg;
1051 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1052 return (ndlp->nlp_state);
1055 static uint32_t
1056 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1057 struct lpfc_nodelist * ndlp,
1058 void *arg, uint32_t evt)
1060 LPFC_MBOXQ_t *pmb;
1061 MAILBOX_t *mb;
1062 uint32_t did;
1064 pmb = (LPFC_MBOXQ_t *) arg;
1065 mb = &pmb->mb;
1066 did = mb->un.varWords[1];
1067 if (mb->mbxStatus) {
1068 /* RegLogin failed */
1069 lpfc_printf_log(phba,
1070 KERN_ERR,
1071 LOG_DISCOVERY,
1072 "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
1073 phba->brd_no,
1074 did, mb->mbxStatus, phba->hba_state);
1076 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1077 spin_lock_irq(phba->host->host_lock);
1078 ndlp->nlp_flag |= NLP_DELAY_TMO;
1079 spin_unlock_irq(phba->host->host_lock);
1081 lpfc_issue_els_logo(phba, ndlp, 0);
1082 /* Put ndlp in npr list set plogi timer for 1 sec */
1083 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
1084 ndlp->nlp_state = NLP_STE_NPR_NODE;
1085 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1086 return (ndlp->nlp_state);
1089 if (ndlp->nlp_rpi != 0)
1090 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1092 ndlp->nlp_rpi = mb->un.varWords[0];
1093 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1095 /* Only if we are not a fabric nport do we issue PRLI */
1096 if (!(ndlp->nlp_type & NLP_FABRIC)) {
1097 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1098 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1099 lpfc_issue_els_prli(phba, ndlp, 0);
1100 } else {
1101 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1102 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1104 return (ndlp->nlp_state);
1107 static uint32_t
1108 lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1109 struct lpfc_nodelist * ndlp, void *arg,
1110 uint32_t evt)
1112 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1113 return (NLP_STE_FREED_NODE);
1116 static uint32_t
1117 lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1118 struct lpfc_nodelist * ndlp, void *arg,
1119 uint32_t evt)
1121 ndlp->nlp_state = NLP_STE_NPR_NODE;
1122 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1123 spin_lock_irq(phba->host->host_lock);
1124 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1125 spin_unlock_irq(phba->host->host_lock);
1126 return (ndlp->nlp_state);
1129 static uint32_t
1130 lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
1131 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1133 struct lpfc_iocbq *cmdiocb;
1135 cmdiocb = (struct lpfc_iocbq *) arg;
1137 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1138 return (ndlp->nlp_state);
1141 static uint32_t
1142 lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
1143 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1145 struct lpfc_iocbq *cmdiocb;
1147 cmdiocb = (struct lpfc_iocbq *) arg;
1149 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1150 return (ndlp->nlp_state);
1153 static uint32_t
1154 lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1155 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1157 struct lpfc_iocbq *cmdiocb;
1159 cmdiocb = (struct lpfc_iocbq *) arg;
1161 /* Software abort outstanding PRLI before sending acc */
1162 lpfc_els_abort(phba, ndlp, 1);
1164 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1165 return (ndlp->nlp_state);
1168 static uint32_t
1169 lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
1170 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1172 struct lpfc_iocbq *cmdiocb;
1174 cmdiocb = (struct lpfc_iocbq *) arg;
1176 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1177 return (ndlp->nlp_state);
1180 /* This routine is envoked when we rcv a PRLO request from a nport
1181 * we are logged into. We should send back a PRLO rsp setting the
1182 * appropriate bits.
1183 * NEXT STATE = PRLI_ISSUE
1185 static uint32_t
1186 lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1187 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1189 struct lpfc_iocbq *cmdiocb;
1191 cmdiocb = (struct lpfc_iocbq *) arg;
1192 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1193 return (ndlp->nlp_state);
1196 static uint32_t
1197 lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1198 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1200 struct lpfc_iocbq *cmdiocb, *rspiocb;
1201 IOCB_t *irsp;
1202 PRLI *npr;
1204 cmdiocb = (struct lpfc_iocbq *) arg;
1205 rspiocb = cmdiocb->context_un.rsp_iocb;
1206 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1208 irsp = &rspiocb->iocb;
1209 if (irsp->ulpStatus) {
1210 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1211 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1212 return (ndlp->nlp_state);
1215 /* Check out PRLI rsp */
1216 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1217 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1218 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1219 (npr->prliType == PRLI_FCP_TYPE)) {
1220 if (npr->initiatorFunc)
1221 ndlp->nlp_type |= NLP_FCP_INITIATOR;
1222 if (npr->targetFunc)
1223 ndlp->nlp_type |= NLP_FCP_TARGET;
1224 if (npr->Retry)
1225 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1228 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
1229 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
1230 return (ndlp->nlp_state);
1233 /*! lpfc_device_rm_prli_issue
1235 * \pre
1236 * \post
1237 * \param phba
1238 * \param ndlp
1239 * \param arg
1240 * \param evt
1241 * \return uint32_t
1243 * \b Description:
1244 * This routine is envoked when we a request to remove a nport we are in the
1245 * process of PRLIing. We should software abort outstanding prli, unreg
1246 * login, send a logout. We will change node state to UNUSED_NODE, put it
1247 * on plogi list so it can be freed when LOGO completes.
1250 static uint32_t
1251 lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1252 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1254 /* software abort outstanding PRLI */
1255 lpfc_els_abort(phba, ndlp, 1);
1257 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1258 return (NLP_STE_FREED_NODE);
1262 /*! lpfc_device_recov_prli_issue
1264 * \pre
1265 * \post
1266 * \param phba
1267 * \param ndlp
1268 * \param arg
1269 * \param evt
1270 * \return uint32_t
1272 * \b Description:
1273 * The routine is envoked when the state of a device is unknown, like
1274 * during a link down. We should remove the nodelist entry from the
1275 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1276 * outstanding PRLI command, then free the node entry.
1278 static uint32_t
1279 lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1280 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1282 /* software abort outstanding PRLI */
1283 lpfc_els_abort(phba, ndlp, 1);
1285 ndlp->nlp_state = NLP_STE_NPR_NODE;
1286 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1287 spin_lock_irq(phba->host->host_lock);
1288 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1289 spin_unlock_irq(phba->host->host_lock);
1290 return (ndlp->nlp_state);
1293 static uint32_t
1294 lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
1295 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1297 struct lpfc_iocbq *cmdiocb;
1299 cmdiocb = (struct lpfc_iocbq *) arg;
1301 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1302 return (ndlp->nlp_state);
1305 static uint32_t
1306 lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
1307 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1309 struct lpfc_iocbq *cmdiocb;
1311 cmdiocb = (struct lpfc_iocbq *) arg;
1313 lpfc_rcv_prli(phba, ndlp, cmdiocb);
1314 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1315 return (ndlp->nlp_state);
1318 static uint32_t
1319 lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1320 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1322 struct lpfc_iocbq *cmdiocb;
1324 cmdiocb = (struct lpfc_iocbq *) arg;
1326 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1327 return (ndlp->nlp_state);
1330 static uint32_t
1331 lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
1332 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1334 struct lpfc_iocbq *cmdiocb;
1336 cmdiocb = (struct lpfc_iocbq *) arg;
1338 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1339 return (ndlp->nlp_state);
1342 static uint32_t
1343 lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1344 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1346 struct lpfc_iocbq *cmdiocb;
1348 cmdiocb = (struct lpfc_iocbq *) arg;
1350 /* Treat like rcv logo */
1351 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1352 return (ndlp->nlp_state);
1355 static uint32_t
1356 lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1357 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1359 ndlp->nlp_state = NLP_STE_NPR_NODE;
1360 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1361 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1362 lpfc_disc_set_adisc(phba, ndlp);
1364 return (ndlp->nlp_state);
1367 static uint32_t
1368 lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
1369 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1371 struct lpfc_iocbq *cmdiocb;
1373 cmdiocb = (struct lpfc_iocbq *) arg;
1375 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1376 return (ndlp->nlp_state);
1379 static uint32_t
1380 lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
1381 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1383 struct lpfc_iocbq *cmdiocb;
1385 cmdiocb = (struct lpfc_iocbq *) arg;
1387 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1388 return (ndlp->nlp_state);
1391 static uint32_t
1392 lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1393 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1395 struct lpfc_iocbq *cmdiocb;
1397 cmdiocb = (struct lpfc_iocbq *) arg;
1399 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1400 return (ndlp->nlp_state);
1403 static uint32_t
1404 lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
1405 struct lpfc_nodelist * ndlp, void *arg,
1406 uint32_t evt)
1408 struct lpfc_iocbq *cmdiocb;
1410 cmdiocb = (struct lpfc_iocbq *) arg;
1412 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1413 return (ndlp->nlp_state);
1416 static uint32_t
1417 lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1418 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1420 struct lpfc_iocbq *cmdiocb;
1422 cmdiocb = (struct lpfc_iocbq *) arg;
1424 /* flush the target */
1425 spin_lock_irq(phba->host->host_lock);
1426 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1427 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1428 spin_unlock_irq(phba->host->host_lock);
1430 /* Treat like rcv logo */
1431 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1432 return (ndlp->nlp_state);
1435 static uint32_t
1436 lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1437 struct lpfc_nodelist * ndlp, void *arg,
1438 uint32_t evt)
1440 ndlp->nlp_state = NLP_STE_NPR_NODE;
1441 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1442 spin_lock_irq(phba->host->host_lock);
1443 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1444 spin_unlock_irq(phba->host->host_lock);
1445 lpfc_disc_set_adisc(phba, ndlp);
1446 return (ndlp->nlp_state);
1449 static uint32_t
1450 lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
1451 struct lpfc_nodelist * ndlp, void *arg,
1452 uint32_t evt)
1454 struct lpfc_iocbq *cmdiocb;
1456 cmdiocb = (struct lpfc_iocbq *) arg;
1458 /* Ignore PLOGI if we have an outstanding LOGO */
1459 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1460 return (ndlp->nlp_state);
1463 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
1464 spin_lock_irq(phba->host->host_lock);
1465 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1466 spin_unlock_irq(phba->host->host_lock);
1467 return (ndlp->nlp_state);
1470 /* send PLOGI immediately, move to PLOGI issue state */
1471 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1472 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1473 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1474 lpfc_issue_els_plogi(phba, ndlp, 0);
1476 return (ndlp->nlp_state);
1479 static uint32_t
1480 lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
1481 struct lpfc_nodelist * ndlp, void *arg,
1482 uint32_t evt)
1484 struct lpfc_iocbq *cmdiocb;
1485 struct ls_rjt stat;
1487 cmdiocb = (struct lpfc_iocbq *) arg;
1489 memset(&stat, 0, sizeof (struct ls_rjt));
1490 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1491 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1492 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
1494 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1495 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1496 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1497 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1498 lpfc_issue_els_adisc(phba, ndlp, 0);
1499 } else {
1500 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1501 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1502 lpfc_issue_els_plogi(phba, ndlp, 0);
1505 return (ndlp->nlp_state);
1508 static uint32_t
1509 lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1510 struct lpfc_nodelist * ndlp, void *arg,
1511 uint32_t evt)
1513 struct lpfc_iocbq *cmdiocb;
1515 cmdiocb = (struct lpfc_iocbq *) arg;
1517 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1518 return (ndlp->nlp_state);
1521 static uint32_t
1522 lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1523 struct lpfc_nodelist * ndlp, void *arg,
1524 uint32_t evt)
1526 struct lpfc_iocbq *cmdiocb;
1528 cmdiocb = (struct lpfc_iocbq *) arg;
1530 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1532 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1533 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1534 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1535 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1536 lpfc_issue_els_adisc(phba, ndlp, 0);
1537 } else {
1538 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1539 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1540 lpfc_issue_els_plogi(phba, ndlp, 0);
1543 return (ndlp->nlp_state);
1546 static uint32_t
1547 lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
1548 struct lpfc_nodelist * ndlp, void *arg,
1549 uint32_t evt)
1551 struct lpfc_iocbq *cmdiocb;
1553 cmdiocb = (struct lpfc_iocbq *) arg;
1555 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1557 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1558 if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
1559 return (ndlp->nlp_state);
1560 } else {
1561 spin_lock_irq(phba->host->host_lock);
1562 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1563 spin_unlock_irq(phba->host->host_lock);
1564 del_timer_sync(&ndlp->nlp_delayfunc);
1565 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1566 list_del_init(&ndlp->els_retry_evt.evt_listp);
1570 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1571 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1572 lpfc_issue_els_plogi(phba, ndlp, 0);
1573 return (ndlp->nlp_state);
1576 static uint32_t
1577 lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
1578 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1580 lpfc_unreg_rpi(phba, ndlp);
1581 /* This routine does nothing, just return the current state */
1582 return (ndlp->nlp_state);
1585 static uint32_t
1586 lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1587 struct lpfc_nodelist * ndlp, void *arg,
1588 uint32_t evt)
1590 LPFC_MBOXQ_t *pmb;
1591 MAILBOX_t *mb;
1593 pmb = (LPFC_MBOXQ_t *) arg;
1594 mb = &pmb->mb;
1596 /* save rpi */
1597 if (ndlp->nlp_rpi != 0)
1598 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1600 ndlp->nlp_rpi = mb->un.varWords[0];
1601 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1603 return (ndlp->nlp_state);
1606 static uint32_t
1607 lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1608 struct lpfc_nodelist * ndlp, void *arg,
1609 uint32_t evt)
1611 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1612 return (NLP_STE_FREED_NODE);
1615 static uint32_t
1616 lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1617 struct lpfc_nodelist * ndlp, void *arg,
1618 uint32_t evt)
1620 spin_lock_irq(phba->host->host_lock);
1621 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1622 spin_unlock_irq(phba->host->host_lock);
1623 return (ndlp->nlp_state);
1627 /* This next section defines the NPort Discovery State Machine */
1629 /* There are 4 different double linked lists nodelist entries can reside on.
1630 * The plogi list and adisc list are used when Link Up discovery or RSCN
1631 * processing is needed. Each list holds the nodes that we will send PLOGI
1632 * or ADISC on. These lists will keep track of what nodes will be effected
1633 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
1634 * The unmapped_list will contain all nodes that we have successfully logged
1635 * into at the Fibre Channel level. The mapped_list will contain all nodes
1636 * that are mapped FCP targets.
1639 * The bind list is a list of undiscovered (potentially non-existent) nodes
1640 * that we have saved binding information on. This information is used when
1641 * nodes transition from the unmapped to the mapped list.
1643 /* For UNUSED_NODE state, the node has just been allocated .
1644 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
1645 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
1646 * and put on the unmapped list. For ADISC processing, the node is taken off
1647 * the ADISC list and placed on either the mapped or unmapped list (depending
1648 * on its previous state). Once on the unmapped list, a PRLI is issued and the
1649 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
1650 * changed to UNMAPPED_NODE. If the completion indicates a mapped
1651 * node, the node is taken off the unmapped list. The binding list is checked
1652 * for a valid binding, or a binding is automatically assigned. If binding
1653 * assignment is unsuccessful, the node is left on the unmapped list. If
1654 * binding assignment is successful, the associated binding list entry (if
1655 * any) is removed, and the node is placed on the mapped list.
1658 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1659 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
1660 * expire, all effected nodes will receive a DEVICE_RM event.
1663 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
1664 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
1665 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
1666 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
1667 * we will first process the ADISC list. 32 entries are processed initially and
1668 * ADISC is initited for each one. Completions / Events for each node are
1669 * funnelled thru the state machine. As each node finishes ADISC processing, it
1670 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
1671 * waiting, and the ADISC list count is identically 0, then we are done. For
1672 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
1673 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
1674 * list. 32 entries are processed initially and PLOGI is initited for each one.
1675 * Completions / Events for each node are funnelled thru the state machine. As
1676 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
1677 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
1678 * indentically 0, then we are done. We have now completed discovery / RSCN
1679 * handling. Upon completion, ALL nodes should be on either the mapped or
1680 * unmapped lists.
1683 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1684 (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
1685 /* Action routine Event Current State */
1686 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
1687 lpfc_rcv_els_unused_node, /* RCV_PRLI */
1688 lpfc_rcv_logo_unused_node, /* RCV_LOGO */
1689 lpfc_rcv_els_unused_node, /* RCV_ADISC */
1690 lpfc_rcv_els_unused_node, /* RCV_PDISC */
1691 lpfc_rcv_els_unused_node, /* RCV_PRLO */
1692 lpfc_disc_illegal, /* CMPL_PLOGI */
1693 lpfc_disc_illegal, /* CMPL_PRLI */
1694 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
1695 lpfc_disc_illegal, /* CMPL_ADISC */
1696 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1697 lpfc_device_rm_unused_node, /* DEVICE_RM */
1698 lpfc_disc_illegal, /* DEVICE_RECOVERY */
1700 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
1701 lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
1702 lpfc_rcv_els_plogi_issue, /* RCV_LOGO */
1703 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
1704 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
1705 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
1706 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
1707 lpfc_disc_illegal, /* CMPL_PRLI */
1708 lpfc_disc_illegal, /* CMPL_LOGO */
1709 lpfc_disc_illegal, /* CMPL_ADISC */
1710 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1711 lpfc_device_rm_plogi_issue, /* DEVICE_RM */
1712 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
1714 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
1715 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
1716 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
1717 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
1718 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
1719 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
1720 lpfc_disc_illegal, /* CMPL_PLOGI */
1721 lpfc_disc_illegal, /* CMPL_PRLI */
1722 lpfc_disc_illegal, /* CMPL_LOGO */
1723 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
1724 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1725 lpfc_device_rm_adisc_issue, /* DEVICE_RM */
1726 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
1728 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
1729 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
1730 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
1731 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
1732 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
1733 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
1734 lpfc_disc_illegal, /* CMPL_PLOGI */
1735 lpfc_disc_illegal, /* CMPL_PRLI */
1736 lpfc_disc_illegal, /* CMPL_LOGO */
1737 lpfc_disc_illegal, /* CMPL_ADISC */
1738 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
1739 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
1740 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
1742 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
1743 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
1744 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
1745 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
1746 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
1747 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
1748 lpfc_disc_illegal, /* CMPL_PLOGI */
1749 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
1750 lpfc_disc_illegal, /* CMPL_LOGO */
1751 lpfc_disc_illegal, /* CMPL_ADISC */
1752 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1753 lpfc_device_rm_prli_issue, /* DEVICE_RM */
1754 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
1756 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
1757 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
1758 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
1759 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
1760 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
1761 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
1762 lpfc_disc_illegal, /* CMPL_PLOGI */
1763 lpfc_disc_illegal, /* CMPL_PRLI */
1764 lpfc_disc_illegal, /* CMPL_LOGO */
1765 lpfc_disc_illegal, /* CMPL_ADISC */
1766 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1767 lpfc_disc_illegal, /* DEVICE_RM */
1768 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
1770 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
1771 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
1772 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
1773 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
1774 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
1775 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
1776 lpfc_disc_illegal, /* CMPL_PLOGI */
1777 lpfc_disc_illegal, /* CMPL_PRLI */
1778 lpfc_disc_illegal, /* CMPL_LOGO */
1779 lpfc_disc_illegal, /* CMPL_ADISC */
1780 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1781 lpfc_disc_illegal, /* DEVICE_RM */
1782 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
1784 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
1785 lpfc_rcv_prli_npr_node, /* RCV_PRLI */
1786 lpfc_rcv_logo_npr_node, /* RCV_LOGO */
1787 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
1788 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
1789 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
1790 lpfc_disc_noop, /* CMPL_PLOGI */
1791 lpfc_disc_noop, /* CMPL_PRLI */
1792 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
1793 lpfc_disc_noop, /* CMPL_ADISC */
1794 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
1795 lpfc_device_rm_npr_node, /* DEVICE_RM */
1796 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
1800 lpfc_disc_state_machine(struct lpfc_hba * phba,
1801 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1803 uint32_t cur_state, rc;
1804 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
1805 uint32_t);
1807 ndlp->nlp_disc_refcnt++;
1808 cur_state = ndlp->nlp_state;
1810 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
1811 lpfc_printf_log(phba,
1812 KERN_INFO,
1813 LOG_DISCOVERY,
1814 "%d:0211 DSM in event x%x on NPort x%x in state %d "
1815 "Data: x%x\n",
1816 phba->brd_no,
1817 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
1819 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
1820 rc = (func) (phba, ndlp, arg, evt);
1822 /* DSM out state <rc> on NPort <nlp_DID> */
1823 lpfc_printf_log(phba,
1824 KERN_INFO,
1825 LOG_DISCOVERY,
1826 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
1827 phba->brd_no,
1828 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1830 ndlp->nlp_disc_refcnt--;
1832 /* Check to see if ndlp removal is deferred */
1833 if ((ndlp->nlp_disc_refcnt == 0)
1834 && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
1835 spin_lock_irq(phba->host->host_lock);
1836 ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
1837 spin_unlock_irq(phba->host->host_lock);
1838 lpfc_nlp_remove(phba, ndlp);
1839 return (NLP_STE_FREED_NODE);
1841 if (rc == NLP_STE_FREED_NODE)
1842 return (NLP_STE_FREED_NODE);
1843 ndlp->nlp_state = rc;
1844 return (rc);