1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_sli4.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
45 static int lpfc_els_retry(struct lpfc_hba
*, struct lpfc_iocbq
*,
47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba
*, struct lpfc_iocbq
*,
49 static void lpfc_fabric_abort_vport(struct lpfc_vport
*vport
);
50 static int lpfc_issue_els_fdisc(struct lpfc_vport
*vport
,
51 struct lpfc_nodelist
*ndlp
, uint8_t retry
);
52 static int lpfc_issue_fabric_iocb(struct lpfc_hba
*phba
,
53 struct lpfc_iocbq
*iocb
);
55 static int lpfc_max_els_tries
= 3;
58 * lpfc_els_chk_latt - Check host link attention event for a vport
59 * @vport: pointer to a host virtual N_Port data structure.
61 * This routine checks whether there is an outstanding host link
62 * attention event during the discovery process with the @vport. It is done
63 * by reading the HBA's Host Attention (HA) register. If there is any host
64 * link attention events during this @vport's discovery process, the @vport
65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
66 * be issued if the link state is not already in host link cleared state,
67 * and a return code shall indicate whether the host link attention event
70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
71 * state in LPFC_VPORT_READY, the request for checking host link attention
72 * event will be ignored and a return code shall indicate no host link
73 * attention event had happened.
76 * 0 - no host link attention event happened
77 * 1 - host link attention event happened
80 lpfc_els_chk_latt(struct lpfc_vport
*vport
)
82 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
83 struct lpfc_hba
*phba
= vport
->phba
;
86 if (vport
->port_state
>= LPFC_VPORT_READY
||
87 phba
->link_state
== LPFC_LINK_DOWN
||
88 phba
->sli_rev
> LPFC_SLI_REV3
)
91 /* Read the HBA Host Attention Register */
92 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
95 if (!(ha_copy
& HA_LATT
))
98 /* Pending Link Event during Discovery */
99 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
100 "0237 Pending Link Event during "
101 "Discovery: State x%x\n",
102 phba
->pport
->port_state
);
104 /* CLEAR_LA should re-enable link attention events and
105 * we should then immediately take a LATT event. The
106 * LATT processing should call lpfc_linkdown() which
107 * will cleanup any left over in-progress discovery
110 spin_lock_irq(shost
->host_lock
);
111 vport
->fc_flag
|= FC_ABORT_DISCOVERY
;
112 spin_unlock_irq(shost
->host_lock
);
114 if (phba
->link_state
!= LPFC_CLEAR_LA
)
115 lpfc_issue_clear_la(phba
, vport
);
121 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
122 * @vport: pointer to a host virtual N_Port data structure.
123 * @expectRsp: flag indicating whether response is expected.
124 * @cmdSize: size of the ELS command.
125 * @retry: number of retries to the command IOCB when it fails.
126 * @ndlp: pointer to a node-list data structure.
127 * @did: destination identifier.
128 * @elscmd: the ELS command code.
130 * This routine is used for allocating a lpfc-IOCB data structure from
131 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
132 * passed into the routine for discovery state machine to issue an Extended
133 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
134 * and preparation routine that is used by all the discovery state machine
135 * routines and the ELS command-specific fields will be later set up by
136 * the individual discovery machine routines after calling this routine
137 * allocating and preparing a generic IOCB data structure. It fills in the
138 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
139 * payload and response payload (if expected). The reference count on the
140 * ndlp is incremented by 1 and the reference to the ndlp is put into
141 * context1 of the IOCB data structure for this IOCB to hold the ndlp
142 * reference for the command's callback function to access later.
145 * Pointer to the newly allocated/prepared els iocb data structure
146 * NULL - when els iocb data structure allocation/preparation failed
149 lpfc_prep_els_iocb(struct lpfc_vport
*vport
, uint8_t expectRsp
,
150 uint16_t cmdSize
, uint8_t retry
,
151 struct lpfc_nodelist
*ndlp
, uint32_t did
,
154 struct lpfc_hba
*phba
= vport
->phba
;
155 struct lpfc_iocbq
*elsiocb
;
156 struct lpfc_dmabuf
*pcmd
, *prsp
, *pbuflist
;
157 struct ulp_bde64
*bpl
;
161 if (!lpfc_is_link_up(phba
))
164 /* Allocate buffer for command iocb */
165 elsiocb
= lpfc_sli_get_iocbq(phba
);
171 * If this command is for fabric controller and HBA running
172 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 if ((did
== Fabric_DID
) &&
175 (phba
->hba_flag
& HBA_FIP_SUPPORT
) &&
176 ((elscmd
== ELS_CMD_FLOGI
) ||
177 (elscmd
== ELS_CMD_FDISC
) ||
178 (elscmd
== ELS_CMD_LOGO
)))
181 elsiocb
->iocb_flag
|=
182 ((LPFC_ELS_ID_FLOGI
<< LPFC_FIP_ELS_ID_SHIFT
)
183 & LPFC_FIP_ELS_ID_MASK
);
186 elsiocb
->iocb_flag
|=
187 ((LPFC_ELS_ID_FDISC
<< LPFC_FIP_ELS_ID_SHIFT
)
188 & LPFC_FIP_ELS_ID_MASK
);
191 elsiocb
->iocb_flag
|=
192 ((LPFC_ELS_ID_LOGO
<< LPFC_FIP_ELS_ID_SHIFT
)
193 & LPFC_FIP_ELS_ID_MASK
);
197 elsiocb
->iocb_flag
&= ~LPFC_FIP_ELS_ID_MASK
;
199 icmd
= &elsiocb
->iocb
;
201 /* fill in BDEs for command */
202 /* Allocate buffer for command payload */
203 pcmd
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
205 pcmd
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &pcmd
->phys
);
206 if (!pcmd
|| !pcmd
->virt
)
207 goto els_iocb_free_pcmb_exit
;
209 INIT_LIST_HEAD(&pcmd
->list
);
211 /* Allocate buffer for response payload */
213 prsp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
215 prsp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
217 if (!prsp
|| !prsp
->virt
)
218 goto els_iocb_free_prsp_exit
;
219 INIT_LIST_HEAD(&prsp
->list
);
223 /* Allocate buffer for Buffer ptr list */
224 pbuflist
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
226 pbuflist
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
228 if (!pbuflist
|| !pbuflist
->virt
)
229 goto els_iocb_free_pbuf_exit
;
231 INIT_LIST_HEAD(&pbuflist
->list
);
233 icmd
->un
.elsreq64
.bdl
.addrHigh
= putPaddrHigh(pbuflist
->phys
);
234 icmd
->un
.elsreq64
.bdl
.addrLow
= putPaddrLow(pbuflist
->phys
);
235 icmd
->un
.elsreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
236 icmd
->un
.elsreq64
.remoteID
= did
; /* DID */
238 icmd
->un
.elsreq64
.bdl
.bdeSize
= (2 * sizeof(struct ulp_bde64
));
239 icmd
->ulpCommand
= CMD_ELS_REQUEST64_CR
;
240 icmd
->ulpTimeout
= phba
->fc_ratov
* 2;
242 icmd
->un
.elsreq64
.bdl
.bdeSize
= sizeof(struct ulp_bde64
);
243 icmd
->ulpCommand
= CMD_XMIT_ELS_RSP64_CX
;
245 icmd
->ulpBdeCount
= 1;
247 icmd
->ulpClass
= CLASS3
;
249 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) {
250 icmd
->un
.elsreq64
.myID
= vport
->fc_myDID
;
252 /* For ELS_REQUEST64_CR, use the VPI by default */
253 icmd
->ulpContext
= phba
->vpi_ids
[vport
->vpi
];
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd
== ELS_CMD_ECHO
)
257 icmd
->ulpCt_l
= 0; /* context = invalid RPI */
259 icmd
->ulpCt_l
= 1; /* context = VPI */
262 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
263 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pcmd
->phys
));
264 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pcmd
->phys
));
265 bpl
->tus
.f
.bdeSize
= cmdSize
;
266 bpl
->tus
.f
.bdeFlags
= 0;
267 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
271 bpl
->addrLow
= le32_to_cpu(putPaddrLow(prsp
->phys
));
272 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(prsp
->phys
));
273 bpl
->tus
.f
.bdeSize
= FCELSSIZE
;
274 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
275 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
278 /* prevent preparing iocb with NULL ndlp reference */
279 elsiocb
->context1
= lpfc_nlp_get(ndlp
);
280 if (!elsiocb
->context1
)
281 goto els_iocb_free_pbuf_exit
;
282 elsiocb
->context2
= pcmd
;
283 elsiocb
->context3
= pbuflist
;
284 elsiocb
->retry
= retry
;
285 elsiocb
->vport
= vport
;
286 elsiocb
->drvrTimeout
= (phba
->fc_ratov
<< 1) + LPFC_DRVR_TIMEOUT
;
289 list_add(&prsp
->list
, &pcmd
->list
);
292 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
293 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
294 "0116 Xmit ELS command x%x to remote "
295 "NPORT x%x I/O tag: x%x, port state: x%x\n",
296 elscmd
, did
, elsiocb
->iotag
,
299 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
300 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
301 "0117 Xmit ELS response x%x to remote "
302 "NPORT x%x I/O tag: x%x, size: x%x\n",
303 elscmd
, ndlp
->nlp_DID
, elsiocb
->iotag
,
308 els_iocb_free_pbuf_exit
:
310 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
313 els_iocb_free_prsp_exit
:
314 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
317 els_iocb_free_pcmb_exit
:
319 lpfc_sli_release_iocbq(phba
, elsiocb
);
324 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
325 * @vport: pointer to a host virtual N_Port data structure.
327 * This routine issues a fabric registration login for a @vport. An
328 * active ndlp node with Fabric_DID must already exist for this @vport.
329 * The routine invokes two mailbox commands to carry out fabric registration
330 * login through the HBA firmware: the first mailbox command requests the
331 * HBA to perform link configuration for the @vport; and the second mailbox
332 * command requests the HBA to perform the actual fabric registration login
336 * 0 - successfully issued fabric registration login for @vport
337 * -ENXIO -- failed to issue fabric registration login for @vport
340 lpfc_issue_fabric_reglogin(struct lpfc_vport
*vport
)
342 struct lpfc_hba
*phba
= vport
->phba
;
344 struct lpfc_dmabuf
*mp
;
345 struct lpfc_nodelist
*ndlp
;
346 struct serv_parm
*sp
;
350 sp
= &phba
->fc_fabparam
;
351 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
352 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
357 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
363 vport
->port_state
= LPFC_FABRIC_CFG_LINK
;
364 lpfc_config_link(phba
, mbox
);
365 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
368 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
369 if (rc
== MBX_NOT_FINISHED
) {
374 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
379 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, Fabric_DID
, (uint8_t *)sp
, mbox
,
386 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_fabric_reg_login
;
388 /* increment the reference count on ndlp to hold reference
389 * for the callback routine.
391 mbox
->context2
= lpfc_nlp_get(ndlp
);
393 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
394 if (rc
== MBX_NOT_FINISHED
) {
396 goto fail_issue_reg_login
;
401 fail_issue_reg_login
:
402 /* decrement the reference count on ndlp just incremented
403 * for the failed mbox command.
406 mp
= (struct lpfc_dmabuf
*) mbox
->context1
;
407 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
410 mempool_free(mbox
, phba
->mbox_mem_pool
);
413 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
414 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
415 "0249 Cannot issue Register Fabric login: Err %d\n", err
);
420 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
421 * @vport: pointer to a host virtual N_Port data structure.
423 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
424 * the @vport. This mailbox command is necessary for FCoE only.
427 * 0 - successfully issued REG_VFI for @vport
428 * A failure code otherwise.
431 lpfc_issue_reg_vfi(struct lpfc_vport
*vport
)
433 struct lpfc_hba
*phba
= vport
->phba
;
435 struct lpfc_nodelist
*ndlp
;
436 struct serv_parm
*sp
;
437 struct lpfc_dmabuf
*dmabuf
;
440 sp
= &phba
->fc_fabparam
;
441 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
442 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
447 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
452 dmabuf
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &dmabuf
->phys
);
455 goto fail_free_dmabuf
;
458 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
461 goto fail_free_coherent
;
463 vport
->port_state
= LPFC_FABRIC_CFG_LINK
;
464 memcpy(dmabuf
->virt
, &phba
->fc_fabparam
, sizeof(vport
->fc_sparam
));
465 lpfc_reg_vfi(mboxq
, vport
, dmabuf
->phys
);
466 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vfi
;
467 mboxq
->vport
= vport
;
468 mboxq
->context1
= dmabuf
;
469 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
470 if (rc
== MBX_NOT_FINISHED
) {
477 mempool_free(mboxq
, phba
->mbox_mem_pool
);
479 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
483 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
484 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
485 "0289 Issue Register VFI failed: Err %d\n", rc
);
490 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
491 * @vport: pointer to a host virtual N_Port data structure.
492 * @sp: pointer to service parameter data structure.
494 * This routine is called from FLOGI/FDISC completion handler functions.
495 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
496 * node nodename is changed in the completion service parameter else return
497 * 0. This function also set flag in the vport data structure to delay
498 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
499 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
500 * node nodename is changed in the completion service parameter.
503 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
504 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
508 lpfc_check_clean_addr_bit(struct lpfc_vport
*vport
,
509 struct serv_parm
*sp
)
511 uint8_t fabric_param_changed
= 0;
512 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
514 if ((vport
->fc_prevDID
!= vport
->fc_myDID
) ||
515 memcmp(&vport
->fabric_portname
, &sp
->portName
,
516 sizeof(struct lpfc_name
)) ||
517 memcmp(&vport
->fabric_nodename
, &sp
->nodeName
,
518 sizeof(struct lpfc_name
)))
519 fabric_param_changed
= 1;
522 * Word 1 Bit 31 in common service parameter is overloaded.
523 * Word 1 Bit 31 in FLOGI request is multiple NPort request
524 * Word 1 Bit 31 in FLOGI response is clean address bit
526 * If fabric parameter is changed and clean address bit is
527 * cleared delay nport discovery if
528 * - vport->fc_prevDID != 0 (not initial discovery) OR
529 * - lpfc_delay_discovery module parameter is set.
531 if (fabric_param_changed
&& !sp
->cmn
.clean_address_bit
&&
532 (vport
->fc_prevDID
|| lpfc_delay_discovery
)) {
533 spin_lock_irq(shost
->host_lock
);
534 vport
->fc_flag
|= FC_DISC_DELAYED
;
535 spin_unlock_irq(shost
->host_lock
);
538 return fabric_param_changed
;
543 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
544 * @vport: pointer to a host virtual N_Port data structure.
545 * @ndlp: pointer to a node-list data structure.
546 * @sp: pointer to service parameter data structure.
547 * @irsp: pointer to the IOCB within the lpfc response IOCB.
549 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
550 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
551 * port in a fabric topology. It properly sets up the parameters to the @ndlp
552 * from the IOCB response. It also check the newly assigned N_Port ID to the
553 * @vport against the previously assigned N_Port ID. If it is different from
554 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
555 * is invoked on all the remaining nodes with the @vport to unregister the
556 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
557 * is invoked to register login to the fabric.
560 * 0 - Success (currently, always return 0)
563 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
564 struct serv_parm
*sp
, IOCB_t
*irsp
)
566 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
567 struct lpfc_hba
*phba
= vport
->phba
;
568 struct lpfc_nodelist
*np
;
569 struct lpfc_nodelist
*next_np
;
570 uint8_t fabric_param_changed
;
572 spin_lock_irq(shost
->host_lock
);
573 vport
->fc_flag
|= FC_FABRIC
;
574 spin_unlock_irq(shost
->host_lock
);
576 phba
->fc_edtov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
577 if (sp
->cmn
.edtovResolution
) /* E_D_TOV ticks are in nanoseconds */
578 phba
->fc_edtov
= (phba
->fc_edtov
+ 999999) / 1000000;
580 phba
->fc_edtovResol
= sp
->cmn
.edtovResolution
;
581 phba
->fc_ratov
= (be32_to_cpu(sp
->cmn
.w2
.r_a_tov
) + 999) / 1000;
583 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
584 spin_lock_irq(shost
->host_lock
);
585 vport
->fc_flag
|= FC_PUBLIC_LOOP
;
586 spin_unlock_irq(shost
->host_lock
);
589 vport
->fc_myDID
= irsp
->un
.ulpWord
[4] & Mask_DID
;
590 memcpy(&ndlp
->nlp_portname
, &sp
->portName
, sizeof(struct lpfc_name
));
591 memcpy(&ndlp
->nlp_nodename
, &sp
->nodeName
, sizeof(struct lpfc_name
));
592 ndlp
->nlp_class_sup
= 0;
593 if (sp
->cls1
.classValid
)
594 ndlp
->nlp_class_sup
|= FC_COS_CLASS1
;
595 if (sp
->cls2
.classValid
)
596 ndlp
->nlp_class_sup
|= FC_COS_CLASS2
;
597 if (sp
->cls3
.classValid
)
598 ndlp
->nlp_class_sup
|= FC_COS_CLASS3
;
599 if (sp
->cls4
.classValid
)
600 ndlp
->nlp_class_sup
|= FC_COS_CLASS4
;
601 ndlp
->nlp_maxframe
= ((sp
->cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
602 sp
->cmn
.bbRcvSizeLsb
;
604 fabric_param_changed
= lpfc_check_clean_addr_bit(vport
, sp
);
605 memcpy(&vport
->fabric_portname
, &sp
->portName
,
606 sizeof(struct lpfc_name
));
607 memcpy(&vport
->fabric_nodename
, &sp
->nodeName
,
608 sizeof(struct lpfc_name
));
609 memcpy(&phba
->fc_fabparam
, sp
, sizeof(struct serv_parm
));
611 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) {
612 if (sp
->cmn
.response_multiple_NPort
) {
613 lpfc_printf_vlog(vport
, KERN_WARNING
,
615 "1816 FLOGI NPIV supported, "
616 "response data 0x%x\n",
617 sp
->cmn
.response_multiple_NPort
);
618 phba
->link_flag
|= LS_NPIV_FAB_SUPPORTED
;
620 /* Because we asked f/w for NPIV it still expects us
621 to call reg_vnpid atleast for the physcial host */
622 lpfc_printf_vlog(vport
, KERN_WARNING
,
624 "1817 Fabric does not support NPIV "
625 "- configuring single port mode.\n");
626 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
630 if (fabric_param_changed
&&
631 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
633 /* If our NportID changed, we need to ensure all
634 * remaining NPORTs get unreg_login'ed.
636 list_for_each_entry_safe(np
, next_np
,
637 &vport
->fc_nodes
, nlp_listp
) {
638 if (!NLP_CHK_NODE_ACT(np
))
640 if ((np
->nlp_state
!= NLP_STE_NPR_NODE
) ||
641 !(np
->nlp_flag
& NLP_NPR_ADISC
))
643 spin_lock_irq(shost
->host_lock
);
644 np
->nlp_flag
&= ~NLP_NPR_ADISC
;
645 spin_unlock_irq(shost
->host_lock
);
646 lpfc_unreg_rpi(vport
, np
);
648 lpfc_cleanup_pending_mbox(vport
);
650 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
651 lpfc_sli4_unreg_all_rpis(vport
);
652 lpfc_mbx_unreg_vpi(vport
);
653 spin_lock_irq(shost
->host_lock
);
654 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
656 * If VPI is unreged, driver need to do INIT_VPI
657 * before re-registering
659 vport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
660 spin_unlock_irq(shost
->host_lock
);
662 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
663 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
665 * Driver needs to re-reg VPI in order for f/w
666 * to update the MAC address.
668 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
669 lpfc_register_new_vport(phba
, vport
, ndlp
);
673 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
674 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_REG_LOGIN_ISSUE
);
675 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
&&
676 vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)
677 lpfc_register_new_vport(phba
, vport
, ndlp
);
679 lpfc_issue_fabric_reglogin(vport
);
681 ndlp
->nlp_type
|= NLP_FABRIC
;
682 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
683 if ((!(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) &&
684 (vport
->vpi_state
& LPFC_VPI_REGISTERED
)) {
685 lpfc_start_fdiscs(phba
);
686 lpfc_do_scr_ns_plogi(phba
, vport
);
687 } else if (vport
->fc_flag
& FC_VFI_REGISTERED
)
688 lpfc_issue_init_vpi(vport
);
690 lpfc_issue_reg_vfi(vport
);
695 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
696 * @vport: pointer to a host virtual N_Port data structure.
697 * @ndlp: pointer to a node-list data structure.
698 * @sp: pointer to service parameter data structure.
700 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
701 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
702 * in a point-to-point topology. First, the @vport's N_Port Name is compared
703 * with the received N_Port Name: if the @vport's N_Port Name is greater than
704 * the received N_Port Name lexicographically, this node shall assign local
705 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
706 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
707 * this node shall just wait for the remote node to issue PLOGI and assign
715 lpfc_cmpl_els_flogi_nport(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
716 struct serv_parm
*sp
)
718 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
719 struct lpfc_hba
*phba
= vport
->phba
;
723 spin_lock_irq(shost
->host_lock
);
724 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
725 spin_unlock_irq(shost
->host_lock
);
727 phba
->fc_edtov
= FF_DEF_EDTOV
;
728 phba
->fc_ratov
= FF_DEF_RATOV
;
729 rc
= memcmp(&vport
->fc_portname
, &sp
->portName
,
730 sizeof(vport
->fc_portname
));
732 /* This side will initiate the PLOGI */
733 spin_lock_irq(shost
->host_lock
);
734 vport
->fc_flag
|= FC_PT2PT_PLOGI
;
735 spin_unlock_irq(shost
->host_lock
);
738 * N_Port ID cannot be 0, set our to LocalID the other
739 * side will be RemoteID.
744 vport
->fc_myDID
= PT2PT_LocalID
;
746 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
750 lpfc_config_link(phba
, mbox
);
752 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
754 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
755 if (rc
== MBX_NOT_FINISHED
) {
756 mempool_free(mbox
, phba
->mbox_mem_pool
);
759 /* Decrement ndlp reference count indicating that ndlp can be
760 * safely released when other references to it are done.
764 ndlp
= lpfc_findnode_did(vport
, PT2PT_RemoteID
);
767 * Cannot find existing Fabric ndlp, so allocate a
770 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
773 lpfc_nlp_init(vport
, ndlp
, PT2PT_RemoteID
);
774 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
775 ndlp
= lpfc_enable_node(vport
, ndlp
,
776 NLP_STE_UNUSED_NODE
);
781 memcpy(&ndlp
->nlp_portname
, &sp
->portName
,
782 sizeof(struct lpfc_name
));
783 memcpy(&ndlp
->nlp_nodename
, &sp
->nodeName
,
784 sizeof(struct lpfc_name
));
785 /* Set state will put ndlp onto node list if not already done */
786 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
787 spin_lock_irq(shost
->host_lock
);
788 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
789 spin_unlock_irq(shost
->host_lock
);
791 /* This side will wait for the PLOGI, decrement ndlp reference
792 * count indicating that ndlp can be released when other
793 * references to it are done.
797 /* If we are pt2pt with another NPort, force NPIV off! */
798 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
800 spin_lock_irq(shost
->host_lock
);
801 vport
->fc_flag
|= FC_PT2PT
;
802 spin_unlock_irq(shost
->host_lock
);
804 /* Start discovery - this should just do CLEAR_LA */
805 lpfc_disc_start(vport
);
812 * lpfc_cmpl_els_flogi - Completion callback function for flogi
813 * @phba: pointer to lpfc hba data structure.
814 * @cmdiocb: pointer to lpfc command iocb data structure.
815 * @rspiocb: pointer to lpfc response iocb data structure.
817 * This routine is the top-level completion callback function for issuing
818 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
819 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
820 * retry has been made (either immediately or delayed with lpfc_els_retry()
821 * returning 1), the command IOCB will be released and function returned.
822 * If the retry attempt has been given up (possibly reach the maximum
823 * number of retries), one additional decrement of ndlp reference shall be
824 * invoked before going out after releasing the command IOCB. This will
825 * actually release the remote node (Note, lpfc_els_free_iocb() will also
826 * invoke one decrement of ndlp reference count). If no error reported in
827 * the IOCB status, the command Port ID field is used to determine whether
828 * this is a point-to-point topology or a fabric topology: if the Port ID
829 * field is assigned, it is a fabric topology; otherwise, it is a
830 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
831 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
832 * specific topology completion conditions.
835 lpfc_cmpl_els_flogi(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
836 struct lpfc_iocbq
*rspiocb
)
838 struct lpfc_vport
*vport
= cmdiocb
->vport
;
839 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
840 IOCB_t
*irsp
= &rspiocb
->iocb
;
841 struct lpfc_nodelist
*ndlp
= cmdiocb
->context1
;
842 struct lpfc_dmabuf
*pcmd
= cmdiocb
->context2
, *prsp
;
843 struct serv_parm
*sp
;
847 /* Check to see if link went down during discovery */
848 if (lpfc_els_chk_latt(vport
)) {
849 /* One additional decrement on node reference count to
850 * trigger the release of the node
856 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
857 "FLOGI cmpl: status:x%x/x%x state:x%x",
858 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
861 if (irsp
->ulpStatus
) {
863 * In case of FIP mode, perform roundrobin FCF failover
864 * due to new FCF discovery
866 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) &&
867 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) &&
868 !((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
869 (irsp
->un
.ulpWord
[4] == IOERR_SLI_ABORTED
))) {
870 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
871 "2611 FLOGI failed on FCF (x%x), "
872 "status:x%x/x%x, tmo:x%x, perform "
873 "roundrobin FCF failover\n",
874 phba
->fcf
.current_rec
.fcf_indx
,
875 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
877 lpfc_sli4_set_fcf_flogi_fail(phba
,
878 phba
->fcf
.current_rec
.fcf_indx
);
879 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
880 rc
= lpfc_sli4_fcf_rr_next_proc(vport
, fcf_index
);
886 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
887 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
888 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
891 /* Check for retry */
892 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
))
896 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
897 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
898 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
901 /* FLOGI failed, so there is no fabric */
902 spin_lock_irq(shost
->host_lock
);
903 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
904 spin_unlock_irq(shost
->host_lock
);
906 /* If private loop, then allow max outstanding els to be
907 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
908 * alpa map would take too long otherwise.
910 if (phba
->alpa_map
[0] == 0) {
911 vport
->cfg_discovery_threads
= LPFC_MAX_DISC_THREADS
;
912 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
913 (!(vport
->fc_flag
& FC_VFI_REGISTERED
) ||
914 (vport
->fc_prevDID
!= vport
->fc_myDID
))) {
915 if (vport
->fc_flag
& FC_VFI_REGISTERED
)
916 lpfc_sli4_unreg_all_rpis(vport
);
917 lpfc_issue_reg_vfi(vport
);
924 spin_lock_irq(shost
->host_lock
);
925 vport
->fc_flag
&= ~FC_VPORT_CVL_RCVD
;
926 vport
->fc_flag
&= ~FC_VPORT_LOGO_RCVD
;
927 spin_unlock_irq(shost
->host_lock
);
930 * The FLogI succeeded. Sync the data for the CPU before
933 prsp
= list_get_first(&pcmd
->list
, struct lpfc_dmabuf
, list
);
935 sp
= prsp
->virt
+ sizeof(uint32_t);
937 /* FLOGI completes successfully */
938 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
939 "0101 FLOGI completes successfully "
940 "Data: x%x x%x x%x x%x\n",
941 irsp
->un
.ulpWord
[4], sp
->cmn
.e_d_tov
,
942 sp
->cmn
.w2
.r_a_tov
, sp
->cmn
.edtovResolution
);
944 if (vport
->port_state
== LPFC_FLOGI
) {
946 * If Common Service Parameters indicate Nport
947 * we are point to point, if Fport we are Fabric.
950 rc
= lpfc_cmpl_els_flogi_fabric(vport
, ndlp
, sp
, irsp
);
951 else if (!(phba
->hba_flag
& HBA_FCOE_MODE
))
952 rc
= lpfc_cmpl_els_flogi_nport(vport
, ndlp
, sp
);
954 lpfc_printf_vlog(vport
, KERN_ERR
,
956 "2831 FLOGI response with cleared Fabric "
957 "bit fcf_index 0x%x "
958 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
960 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
961 phba
->fcf
.current_rec
.fcf_indx
,
962 phba
->fcf
.current_rec
.switch_name
[0],
963 phba
->fcf
.current_rec
.switch_name
[1],
964 phba
->fcf
.current_rec
.switch_name
[2],
965 phba
->fcf
.current_rec
.switch_name
[3],
966 phba
->fcf
.current_rec
.switch_name
[4],
967 phba
->fcf
.current_rec
.switch_name
[5],
968 phba
->fcf
.current_rec
.switch_name
[6],
969 phba
->fcf
.current_rec
.switch_name
[7],
970 phba
->fcf
.current_rec
.fabric_name
[0],
971 phba
->fcf
.current_rec
.fabric_name
[1],
972 phba
->fcf
.current_rec
.fabric_name
[2],
973 phba
->fcf
.current_rec
.fabric_name
[3],
974 phba
->fcf
.current_rec
.fabric_name
[4],
975 phba
->fcf
.current_rec
.fabric_name
[5],
976 phba
->fcf
.current_rec
.fabric_name
[6],
977 phba
->fcf
.current_rec
.fabric_name
[7]);
979 spin_lock_irq(&phba
->hbalock
);
980 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
981 phba
->hba_flag
&= ~(FCF_RR_INPROG
| HBA_DEVLOSS_TMO
);
982 spin_unlock_irq(&phba
->hbalock
);
986 /* Mark the FCF discovery process done */
987 if (phba
->hba_flag
& HBA_FIP_SUPPORT
)
988 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FIP
|
990 "2769 FLOGI to FCF (x%x) "
991 "completed successfully\n",
992 phba
->fcf
.current_rec
.fcf_indx
);
993 spin_lock_irq(&phba
->hbalock
);
994 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
995 phba
->hba_flag
&= ~(FCF_RR_INPROG
| HBA_DEVLOSS_TMO
);
996 spin_unlock_irq(&phba
->hbalock
);
1004 if (!lpfc_error_lost_link(irsp
)) {
1005 /* FLOGI failed, so just use loop map to make discovery list */
1006 lpfc_disc_list_loopmap(vport
);
1008 /* Start discovery */
1009 lpfc_disc_start(vport
);
1010 } else if (((irsp
->ulpStatus
!= IOSTAT_LOCAL_REJECT
) ||
1011 ((irsp
->un
.ulpWord
[4] != IOERR_SLI_ABORTED
) &&
1012 (irsp
->un
.ulpWord
[4] != IOERR_SLI_DOWN
))) &&
1013 (phba
->link_state
!= LPFC_CLEAR_LA
)) {
1014 /* If FLOGI failed enable link interrupt. */
1015 lpfc_issue_clear_la(phba
, vport
);
1018 lpfc_els_free_iocb(phba
, cmdiocb
);
1022 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1023 * @vport: pointer to a host virtual N_Port data structure.
1024 * @ndlp: pointer to a node-list data structure.
1025 * @retry: number of retries to the command IOCB.
1027 * This routine issues a Fabric Login (FLOGI) Request ELS command
1028 * for a @vport. The initiator service parameters are put into the payload
1029 * of the FLOGI Request IOCB and the top-level callback function pointer
1030 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1031 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1032 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1034 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1035 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1036 * will be stored into the context1 field of the IOCB for the completion
1037 * callback function to the FLOGI ELS command.
1040 * 0 - successfully issued flogi iocb for @vport
1041 * 1 - failed to issue flogi iocb for @vport
1044 lpfc_issue_els_flogi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1047 struct lpfc_hba
*phba
= vport
->phba
;
1048 struct serv_parm
*sp
;
1050 struct lpfc_iocbq
*elsiocb
;
1051 struct lpfc_sli_ring
*pring
;
1057 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
1059 cmdsize
= (sizeof(uint32_t) + sizeof(struct serv_parm
));
1060 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
1061 ndlp
->nlp_DID
, ELS_CMD_FLOGI
);
1066 icmd
= &elsiocb
->iocb
;
1067 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
1069 /* For FLOGI request, remainder of payload is service parameters */
1070 *((uint32_t *) (pcmd
)) = ELS_CMD_FLOGI
;
1071 pcmd
+= sizeof(uint32_t);
1072 memcpy(pcmd
, &vport
->fc_sparam
, sizeof(struct serv_parm
));
1073 sp
= (struct serv_parm
*) pcmd
;
1075 /* Setup CSPs accordingly for Fabric */
1076 sp
->cmn
.e_d_tov
= 0;
1077 sp
->cmn
.w2
.r_a_tov
= 0;
1078 sp
->cls1
.classValid
= 0;
1079 sp
->cls2
.seqDelivery
= 1;
1080 sp
->cls3
.seqDelivery
= 1;
1081 if (sp
->cmn
.fcphLow
< FC_PH3
)
1082 sp
->cmn
.fcphLow
= FC_PH3
;
1083 if (sp
->cmn
.fcphHigh
< FC_PH3
)
1084 sp
->cmn
.fcphHigh
= FC_PH3
;
1086 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1087 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
1088 LPFC_SLI_INTF_IF_TYPE_0
) {
1089 elsiocb
->iocb
.ulpCt_h
= ((SLI4_CT_FCFI
>> 1) & 1);
1090 elsiocb
->iocb
.ulpCt_l
= (SLI4_CT_FCFI
& 1);
1091 /* FLOGI needs to be 3 for WQE FCFI */
1092 /* Set the fcfi to the fcfi we registered with */
1093 elsiocb
->iocb
.ulpContext
= phba
->fcf
.fcfi
;
1096 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) {
1097 sp
->cmn
.request_multiple_Nport
= 1;
1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1102 sp
->cmn
.request_multiple_Nport
= 0;
1105 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
) {
1106 icmd
->un
.elsreq64
.myID
= 0;
1107 icmd
->un
.elsreq64
.fl
= 1;
1110 tmo
= phba
->fc_ratov
;
1111 phba
->fc_ratov
= LPFC_DISC_FLOGI_TMO
;
1112 lpfc_set_disctmo(vport
);
1113 phba
->fc_ratov
= tmo
;
1115 phba
->fc_stat
.elsXmitFLOGI
++;
1116 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_flogi
;
1118 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1119 "Issue FLOGI: opt:x%x",
1120 phba
->sli3_options
, 0, 0);
1122 rc
= lpfc_issue_fabric_iocb(phba
, elsiocb
);
1123 if (rc
== IOCB_ERROR
) {
1124 lpfc_els_free_iocb(phba
, elsiocb
);
1131 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1132 * @phba: pointer to lpfc hba data structure.
1134 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1135 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1136 * list and issues an abort IOCB commond on each outstanding IOCB that
1137 * contains a active Fabric_DID ndlp. Note that this function is to issue
1138 * the abort IOCB command on all the outstanding IOCBs, thus when this
1139 * function returns, it does not guarantee all the IOCBs are actually aborted.
1142 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1145 lpfc_els_abort_flogi(struct lpfc_hba
*phba
)
1147 struct lpfc_sli_ring
*pring
;
1148 struct lpfc_iocbq
*iocb
, *next_iocb
;
1149 struct lpfc_nodelist
*ndlp
;
1152 /* Abort outstanding I/O on NPort <nlp_DID> */
1153 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1154 "0201 Abort outstanding I/O on NPort x%x\n",
1157 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
1160 * Check the txcmplq for an iocb that matches the nport the driver is
1163 spin_lock_irq(&phba
->hbalock
);
1164 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
1166 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
&&
1167 icmd
->un
.elsreq64
.bdl
.ulpIoTag32
) {
1168 ndlp
= (struct lpfc_nodelist
*)(iocb
->context1
);
1169 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
1170 (ndlp
->nlp_DID
== Fabric_DID
))
1171 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
1174 spin_unlock_irq(&phba
->hbalock
);
1180 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1181 * @vport: pointer to a host virtual N_Port data structure.
1183 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1184 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1185 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1186 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1187 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1188 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1192 * 0 - failed to issue initial flogi for @vport
1193 * 1 - successfully issued initial flogi for @vport
1196 lpfc_initial_flogi(struct lpfc_vport
*vport
)
1198 struct lpfc_hba
*phba
= vport
->phba
;
1199 struct lpfc_nodelist
*ndlp
;
1201 vport
->port_state
= LPFC_FLOGI
;
1202 lpfc_set_disctmo(vport
);
1204 /* First look for the Fabric ndlp */
1205 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
1207 /* Cannot find existing Fabric ndlp, so allocate a new one */
1208 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
1211 lpfc_nlp_init(vport
, ndlp
, Fabric_DID
);
1212 /* Set the node type */
1213 ndlp
->nlp_type
|= NLP_FABRIC
;
1214 /* Put ndlp onto node list */
1215 lpfc_enqueue_node(vport
, ndlp
);
1216 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
1217 /* re-setup ndlp without removing from node list */
1218 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
1223 if (lpfc_issue_els_flogi(vport
, ndlp
, 0)) {
1224 /* This decrement of reference count to node shall kick off
1225 * the release of the node.
1234 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1235 * @vport: pointer to a host virtual N_Port data structure.
1237 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1238 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1239 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1240 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1241 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1242 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1246 * 0 - failed to issue initial fdisc for @vport
1247 * 1 - successfully issued initial fdisc for @vport
1250 lpfc_initial_fdisc(struct lpfc_vport
*vport
)
1252 struct lpfc_hba
*phba
= vport
->phba
;
1253 struct lpfc_nodelist
*ndlp
;
1255 /* First look for the Fabric ndlp */
1256 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
1258 /* Cannot find existing Fabric ndlp, so allocate a new one */
1259 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
1262 lpfc_nlp_init(vport
, ndlp
, Fabric_DID
);
1263 /* Put ndlp onto node list */
1264 lpfc_enqueue_node(vport
, ndlp
);
1265 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
1266 /* re-setup ndlp without removing from node list */
1267 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
1272 if (lpfc_issue_els_fdisc(vport
, ndlp
, 0)) {
1273 /* decrement node reference count to trigger the release of
1283 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1284 * @vport: pointer to a host virtual N_Port data structure.
1286 * This routine checks whether there are more remaining Port Logins
1287 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1288 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1289 * to issue ELS PLOGIs up to the configured discover threads with the
1290 * @vport (@vport->cfg_discovery_threads). The function also decrement
1291 * the @vport's num_disc_node by 1 if it is not already 0.
1294 lpfc_more_plogi(struct lpfc_vport
*vport
)
1298 if (vport
->num_disc_nodes
)
1299 vport
->num_disc_nodes
--;
1301 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1302 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1303 "0232 Continue discovery with %d PLOGIs to go "
1304 "Data: x%x x%x x%x\n",
1305 vport
->num_disc_nodes
, vport
->fc_plogi_cnt
,
1306 vport
->fc_flag
, vport
->port_state
);
1307 /* Check to see if there are more PLOGIs to be sent */
1308 if (vport
->fc_flag
& FC_NLP_MORE
)
1309 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1310 sentplogi
= lpfc_els_disc_plogi(vport
);
1316 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1317 * @phba: pointer to lpfc hba data structure.
1318 * @prsp: pointer to response IOCB payload.
1319 * @ndlp: pointer to a node-list data structure.
1321 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1322 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1323 * The following cases are considered N_Port confirmed:
1324 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1325 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1326 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1327 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1328 * 1) if there is a node on vport list other than the @ndlp with the same
1329 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1330 * on that node to release the RPI associated with the node; 2) if there is
1331 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1332 * into, a new node shall be allocated (or activated). In either case, the
1333 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1334 * be released and the new_ndlp shall be put on to the vport node list and
1335 * its pointer returned as the confirmed node.
1337 * Note that before the @ndlp got "released", the keepDID from not-matching
1338 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1339 * of the @ndlp. This is because the release of @ndlp is actually to put it
1340 * into an inactive state on the vport node list and the vport node list
1341 * management algorithm does not allow two node with a same DID.
1344 * pointer to the PLOGI N_Port @ndlp
1346 static struct lpfc_nodelist
*
1347 lpfc_plogi_confirm_nport(struct lpfc_hba
*phba
, uint32_t *prsp
,
1348 struct lpfc_nodelist
*ndlp
)
1350 struct lpfc_vport
*vport
= ndlp
->vport
;
1351 struct lpfc_nodelist
*new_ndlp
;
1352 struct lpfc_rport_data
*rdata
;
1353 struct fc_rport
*rport
;
1354 struct serv_parm
*sp
;
1355 uint8_t name
[sizeof(struct lpfc_name
)];
1356 uint32_t rc
, keepDID
= 0;
1359 struct lpfc_node_rrqs rrq
;
1361 /* Fabric nodes can have the same WWPN so we don't bother searching
1362 * by WWPN. Just return the ndlp that was given to us.
1364 if (ndlp
->nlp_type
& NLP_FABRIC
)
1367 sp
= (struct serv_parm
*) ((uint8_t *) prsp
+ sizeof(uint32_t));
1368 memset(name
, 0, sizeof(struct lpfc_name
));
1370 /* Now we find out if the NPort we are logging into, matches the WWPN
1371 * we have for that ndlp. If not, we have some work to do.
1373 new_ndlp
= lpfc_findnode_wwpn(vport
, &sp
->portName
);
1375 if (new_ndlp
== ndlp
&& NLP_CHK_NODE_ACT(new_ndlp
))
1377 memset(&rrq
.xri_bitmap
, 0, sizeof(new_ndlp
->active_rrqs
.xri_bitmap
));
1380 rc
= memcmp(&ndlp
->nlp_portname
, name
,
1381 sizeof(struct lpfc_name
));
1384 new_ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_ATOMIC
);
1387 lpfc_nlp_init(vport
, new_ndlp
, ndlp
->nlp_DID
);
1388 } else if (!NLP_CHK_NODE_ACT(new_ndlp
)) {
1389 rc
= memcmp(&ndlp
->nlp_portname
, name
,
1390 sizeof(struct lpfc_name
));
1393 new_ndlp
= lpfc_enable_node(vport
, new_ndlp
,
1394 NLP_STE_UNUSED_NODE
);
1397 keepDID
= new_ndlp
->nlp_DID
;
1398 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1399 memcpy(&rrq
.xri_bitmap
,
1400 &new_ndlp
->active_rrqs
.xri_bitmap
,
1401 sizeof(new_ndlp
->active_rrqs
.xri_bitmap
));
1403 keepDID
= new_ndlp
->nlp_DID
;
1404 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1405 memcpy(&rrq
.xri_bitmap
,
1406 &new_ndlp
->active_rrqs
.xri_bitmap
,
1407 sizeof(new_ndlp
->active_rrqs
.xri_bitmap
));
1410 lpfc_unreg_rpi(vport
, new_ndlp
);
1411 new_ndlp
->nlp_DID
= ndlp
->nlp_DID
;
1412 new_ndlp
->nlp_prev_state
= ndlp
->nlp_prev_state
;
1413 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1414 memcpy(new_ndlp
->active_rrqs
.xri_bitmap
,
1415 &ndlp
->active_rrqs
.xri_bitmap
,
1416 sizeof(ndlp
->active_rrqs
.xri_bitmap
));
1418 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
)
1419 new_ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1420 ndlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
1422 /* Set state will put new_ndlp on to node list if not already done */
1423 lpfc_nlp_set_state(vport
, new_ndlp
, ndlp
->nlp_state
);
1425 /* Move this back to NPR state */
1426 if (memcmp(&ndlp
->nlp_portname
, name
, sizeof(struct lpfc_name
)) == 0) {
1427 /* The new_ndlp is replacing ndlp totally, so we need
1428 * to put ndlp on UNUSED list and try to free it.
1431 /* Fix up the rport accordingly */
1432 rport
= ndlp
->rport
;
1434 rdata
= rport
->dd_data
;
1435 if (rdata
->pnode
== ndlp
) {
1438 rdata
->pnode
= lpfc_nlp_get(new_ndlp
);
1439 new_ndlp
->rport
= rport
;
1441 new_ndlp
->nlp_type
= ndlp
->nlp_type
;
1443 /* We shall actually free the ndlp with both nlp_DID and
1444 * nlp_portname fields equals 0 to avoid any ndlp on the
1445 * nodelist never to be used.
1447 if (ndlp
->nlp_DID
== 0) {
1448 spin_lock_irq(&phba
->ndlp_lock
);
1449 NLP_SET_FREE_REQ(ndlp
);
1450 spin_unlock_irq(&phba
->ndlp_lock
);
1453 /* Two ndlps cannot have the same did on the nodelist */
1454 ndlp
->nlp_DID
= keepDID
;
1455 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1456 memcpy(&ndlp
->active_rrqs
.xri_bitmap
,
1458 sizeof(ndlp
->active_rrqs
.xri_bitmap
));
1459 lpfc_drop_node(vport
, ndlp
);
1462 lpfc_unreg_rpi(vport
, ndlp
);
1463 /* Two ndlps cannot have the same did */
1464 ndlp
->nlp_DID
= keepDID
;
1465 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1466 memcpy(&ndlp
->active_rrqs
.xri_bitmap
,
1468 sizeof(ndlp
->active_rrqs
.xri_bitmap
));
1469 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1470 /* Since we are swapping the ndlp passed in with the new one
1471 * and the did has already been swapped, copy over the
1474 memcpy(&new_ndlp
->nlp_portname
, &ndlp
->nlp_portname
,
1475 sizeof(struct lpfc_name
));
1476 memcpy(&new_ndlp
->nlp_nodename
, &ndlp
->nlp_nodename
,
1477 sizeof(struct lpfc_name
));
1478 new_ndlp
->nlp_state
= ndlp
->nlp_state
;
1479 /* Fix up the rport accordingly */
1480 rport
= ndlp
->rport
;
1482 rdata
= rport
->dd_data
;
1483 put_node
= rdata
->pnode
!= NULL
;
1484 put_rport
= ndlp
->rport
!= NULL
;
1485 rdata
->pnode
= NULL
;
1490 put_device(&rport
->dev
);
1497 * lpfc_end_rscn - Check and handle more rscn for a vport
1498 * @vport: pointer to a host virtual N_Port data structure.
1500 * This routine checks whether more Registration State Change
1501 * Notifications (RSCNs) came in while the discovery state machine was in
1502 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1503 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1504 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1505 * handling the RSCNs.
1508 lpfc_end_rscn(struct lpfc_vport
*vport
)
1510 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1512 if (vport
->fc_flag
& FC_RSCN_MODE
) {
1514 * Check to see if more RSCNs came in while we were
1515 * processing this one.
1517 if (vport
->fc_rscn_id_cnt
||
1518 (vport
->fc_flag
& FC_RSCN_DISCOVERY
) != 0)
1519 lpfc_els_handle_rscn(vport
);
1521 spin_lock_irq(shost
->host_lock
);
1522 vport
->fc_flag
&= ~FC_RSCN_MODE
;
1523 spin_unlock_irq(shost
->host_lock
);
1529 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1530 * @phba: pointer to lpfc hba data structure.
1531 * @cmdiocb: pointer to lpfc command iocb data structure.
1532 * @rspiocb: pointer to lpfc response iocb data structure.
1534 * This routine will call the clear rrq function to free the rrq and
1535 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1536 * exist then the clear_rrq is still called because the rrq needs to
1541 lpfc_cmpl_els_rrq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1542 struct lpfc_iocbq
*rspiocb
)
1544 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1546 struct lpfc_nodelist
*ndlp
;
1547 struct lpfc_node_rrq
*rrq
;
1549 /* we pass cmdiocb to state machine which needs rspiocb as well */
1550 rrq
= cmdiocb
->context_un
.rrq
;
1551 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
1553 irsp
= &rspiocb
->iocb
;
1554 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1555 "RRQ cmpl: status:x%x/x%x did:x%x",
1556 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1557 irsp
->un
.elsreq64
.remoteID
);
1559 ndlp
= lpfc_findnode_did(vport
, irsp
->un
.elsreq64
.remoteID
);
1560 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) || ndlp
!= rrq
->ndlp
) {
1561 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1562 "2882 RRQ completes to NPort x%x "
1563 "with no ndlp. Data: x%x x%x x%x\n",
1564 irsp
->un
.elsreq64
.remoteID
,
1565 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1570 /* rrq completes to NPort <nlp_DID> */
1571 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1572 "2880 RRQ completes to NPort x%x "
1573 "Data: x%x x%x x%x x%x x%x\n",
1574 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1575 irsp
->ulpTimeout
, rrq
->xritag
, rrq
->rxid
);
1577 if (irsp
->ulpStatus
) {
1578 /* Check for retry */
1579 /* RRQ failed Don't print the vport to vport rjts */
1580 if (irsp
->ulpStatus
!= IOSTAT_LS_RJT
||
1581 (((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_INVALID_CMD
) &&
1582 ((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_UNABLE_TPC
)) ||
1583 (phba
)->pport
->cfg_log_verbose
& LOG_ELS
)
1584 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1585 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1586 ndlp
->nlp_DID
, irsp
->ulpStatus
,
1587 irsp
->un
.ulpWord
[4]);
1591 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
1592 lpfc_els_free_iocb(phba
, cmdiocb
);
1596 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1597 * @phba: pointer to lpfc hba data structure.
1598 * @cmdiocb: pointer to lpfc command iocb data structure.
1599 * @rspiocb: pointer to lpfc response iocb data structure.
1601 * This routine is the completion callback function for issuing the Port
1602 * Login (PLOGI) command. For PLOGI completion, there must be an active
1603 * ndlp on the vport node list that matches the remote node ID from the
1604 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1605 * ignored and command IOCB released. The PLOGI response IOCB status is
1606 * checked for error conditons. If there is error status reported, PLOGI
1607 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1608 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1609 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1610 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1611 * there are additional N_Port nodes with the vport that need to perform
1612 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1616 lpfc_cmpl_els_plogi(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1617 struct lpfc_iocbq
*rspiocb
)
1619 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1620 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1622 struct lpfc_nodelist
*ndlp
;
1623 struct lpfc_dmabuf
*prsp
;
1624 int disc
, rc
, did
, type
;
1626 /* we pass cmdiocb to state machine which needs rspiocb as well */
1627 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
1629 irsp
= &rspiocb
->iocb
;
1630 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1631 "PLOGI cmpl: status:x%x/x%x did:x%x",
1632 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1633 irsp
->un
.elsreq64
.remoteID
);
1635 ndlp
= lpfc_findnode_did(vport
, irsp
->un
.elsreq64
.remoteID
);
1636 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1637 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1638 "0136 PLOGI completes to NPort x%x "
1639 "with no ndlp. Data: x%x x%x x%x\n",
1640 irsp
->un
.elsreq64
.remoteID
,
1641 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1646 /* Since ndlp can be freed in the disc state machine, note if this node
1647 * is being used during discovery.
1649 spin_lock_irq(shost
->host_lock
);
1650 disc
= (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
);
1651 ndlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
1652 spin_unlock_irq(shost
->host_lock
);
1655 /* PLOGI completes to NPort <nlp_DID> */
1656 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1657 "0102 PLOGI completes to NPort x%x "
1658 "Data: x%x x%x x%x x%x x%x\n",
1659 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1660 irsp
->ulpTimeout
, disc
, vport
->num_disc_nodes
);
1661 /* Check to see if link went down during discovery */
1662 if (lpfc_els_chk_latt(vport
)) {
1663 spin_lock_irq(shost
->host_lock
);
1664 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1665 spin_unlock_irq(shost
->host_lock
);
1669 /* ndlp could be freed in DSM, save these values now */
1670 type
= ndlp
->nlp_type
;
1671 did
= ndlp
->nlp_DID
;
1673 if (irsp
->ulpStatus
) {
1674 /* Check for retry */
1675 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
)) {
1676 /* ELS command is being retried */
1678 spin_lock_irq(shost
->host_lock
);
1679 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1680 spin_unlock_irq(shost
->host_lock
);
1684 /* PLOGI failed Don't print the vport to vport rjts */
1685 if (irsp
->ulpStatus
!= IOSTAT_LS_RJT
||
1686 (((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_INVALID_CMD
) &&
1687 ((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_UNABLE_TPC
)) ||
1688 (phba
)->pport
->cfg_log_verbose
& LOG_ELS
)
1689 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1690 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1691 ndlp
->nlp_DID
, irsp
->ulpStatus
,
1692 irsp
->un
.ulpWord
[4]);
1693 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1694 if (lpfc_error_lost_link(irsp
))
1695 rc
= NLP_STE_FREED_NODE
;
1697 rc
= lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
1698 NLP_EVT_CMPL_PLOGI
);
1700 /* Good status, call state machine */
1701 prsp
= list_entry(((struct lpfc_dmabuf
*)
1702 cmdiocb
->context2
)->list
.next
,
1703 struct lpfc_dmabuf
, list
);
1704 ndlp
= lpfc_plogi_confirm_nport(phba
, prsp
->virt
, ndlp
);
1705 rc
= lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
1706 NLP_EVT_CMPL_PLOGI
);
1709 if (disc
&& vport
->num_disc_nodes
) {
1710 /* Check to see if there are more PLOGIs to be sent */
1711 lpfc_more_plogi(vport
);
1713 if (vport
->num_disc_nodes
== 0) {
1714 spin_lock_irq(shost
->host_lock
);
1715 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
1716 spin_unlock_irq(shost
->host_lock
);
1718 lpfc_can_disctmo(vport
);
1719 lpfc_end_rscn(vport
);
1724 lpfc_els_free_iocb(phba
, cmdiocb
);
1729 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1730 * @vport: pointer to a host virtual N_Port data structure.
1731 * @did: destination port identifier.
1732 * @retry: number of retries to the command IOCB.
1734 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1735 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1736 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1737 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1738 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1740 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1741 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1742 * will be stored into the context1 field of the IOCB for the completion
1743 * callback function to the PLOGI ELS command.
1746 * 0 - Successfully issued a plogi for @vport
1747 * 1 - failed to issue a plogi for @vport
1750 lpfc_issue_els_plogi(struct lpfc_vport
*vport
, uint32_t did
, uint8_t retry
)
1752 struct lpfc_hba
*phba
= vport
->phba
;
1753 struct serv_parm
*sp
;
1755 struct lpfc_nodelist
*ndlp
;
1756 struct lpfc_iocbq
*elsiocb
;
1757 struct lpfc_sli
*psli
;
1764 ndlp
= lpfc_findnode_did(vport
, did
);
1765 if (ndlp
&& !NLP_CHK_NODE_ACT(ndlp
))
1768 /* If ndlp is not NULL, we will bump the reference count on it */
1769 cmdsize
= (sizeof(uint32_t) + sizeof(struct serv_parm
));
1770 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
, did
,
1775 icmd
= &elsiocb
->iocb
;
1776 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
1778 /* For PLOGI request, remainder of payload is service parameters */
1779 *((uint32_t *) (pcmd
)) = ELS_CMD_PLOGI
;
1780 pcmd
+= sizeof(uint32_t);
1781 memcpy(pcmd
, &vport
->fc_sparam
, sizeof(struct serv_parm
));
1782 sp
= (struct serv_parm
*) pcmd
;
1785 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1786 * to device on remote loops work.
1788 if ((vport
->fc_flag
& FC_FABRIC
) && !(vport
->fc_flag
& FC_PUBLIC_LOOP
))
1789 sp
->cmn
.altBbCredit
= 1;
1791 if (sp
->cmn
.fcphLow
< FC_PH_4_3
)
1792 sp
->cmn
.fcphLow
= FC_PH_4_3
;
1794 if (sp
->cmn
.fcphHigh
< FC_PH3
)
1795 sp
->cmn
.fcphHigh
= FC_PH3
;
1797 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1798 "Issue PLOGI: did:x%x",
1801 phba
->fc_stat
.elsXmitPLOGI
++;
1802 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_plogi
;
1803 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
1805 if (ret
== IOCB_ERROR
) {
1806 lpfc_els_free_iocb(phba
, elsiocb
);
1813 * lpfc_cmpl_els_prli - Completion callback function for prli
1814 * @phba: pointer to lpfc hba data structure.
1815 * @cmdiocb: pointer to lpfc command iocb data structure.
1816 * @rspiocb: pointer to lpfc response iocb data structure.
1818 * This routine is the completion callback function for a Process Login
1819 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1820 * status. If there is error status reported, PRLI retry shall be attempted
1821 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1822 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1823 * ndlp to mark the PRLI completion.
1826 lpfc_cmpl_els_prli(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1827 struct lpfc_iocbq
*rspiocb
)
1829 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1830 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1832 struct lpfc_sli
*psli
;
1833 struct lpfc_nodelist
*ndlp
;
1836 /* we pass cmdiocb to state machine which needs rspiocb as well */
1837 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
1839 irsp
= &(rspiocb
->iocb
);
1840 ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
1841 spin_lock_irq(shost
->host_lock
);
1842 ndlp
->nlp_flag
&= ~NLP_PRLI_SND
;
1843 spin_unlock_irq(shost
->host_lock
);
1845 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1846 "PRLI cmpl: status:x%x/x%x did:x%x",
1847 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1849 /* PRLI completes to NPort <nlp_DID> */
1850 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1851 "0103 PRLI completes to NPort x%x "
1852 "Data: x%x x%x x%x x%x\n",
1853 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1854 irsp
->ulpTimeout
, vport
->num_disc_nodes
);
1856 vport
->fc_prli_sent
--;
1857 /* Check to see if link went down during discovery */
1858 if (lpfc_els_chk_latt(vport
))
1861 if (irsp
->ulpStatus
) {
1862 /* Check for retry */
1863 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
)) {
1864 /* ELS command is being retried */
1868 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1869 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1870 ndlp
->nlp_DID
, irsp
->ulpStatus
,
1871 irsp
->un
.ulpWord
[4]);
1872 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1873 if (lpfc_error_lost_link(irsp
))
1876 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
1879 /* Good status, call state machine */
1880 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
1883 lpfc_els_free_iocb(phba
, cmdiocb
);
1888 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
1889 * @vport: pointer to a host virtual N_Port data structure.
1890 * @ndlp: pointer to a node-list data structure.
1891 * @retry: number of retries to the command IOCB.
1893 * This routine issues a Process Login (PRLI) ELS command for the
1894 * @vport. The PRLI service parameters are set up in the payload of the
1895 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1896 * is put to the IOCB completion callback func field before invoking the
1897 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1899 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1900 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1901 * will be stored into the context1 field of the IOCB for the completion
1902 * callback function to the PRLI ELS command.
1905 * 0 - successfully issued prli iocb command for @vport
1906 * 1 - failed to issue prli iocb command for @vport
1909 lpfc_issue_els_prli(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1912 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1913 struct lpfc_hba
*phba
= vport
->phba
;
1916 struct lpfc_iocbq
*elsiocb
;
1920 cmdsize
= (sizeof(uint32_t) + sizeof(PRLI
));
1921 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
1922 ndlp
->nlp_DID
, ELS_CMD_PRLI
);
1926 icmd
= &elsiocb
->iocb
;
1927 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
1929 /* For PRLI request, remainder of payload is service parameters */
1930 memset(pcmd
, 0, (sizeof(PRLI
) + sizeof(uint32_t)));
1931 *((uint32_t *) (pcmd
)) = ELS_CMD_PRLI
;
1932 pcmd
+= sizeof(uint32_t);
1934 /* For PRLI, remainder of payload is PRLI parameter page */
1935 npr
= (PRLI
*) pcmd
;
1937 * If our firmware version is 3.20 or later,
1938 * set the following bits for FC-TAPE support.
1940 if (phba
->vpd
.rev
.feaLevelHigh
>= 0x02) {
1941 npr
->ConfmComplAllowed
= 1;
1943 npr
->TaskRetryIdReq
= 1;
1945 npr
->estabImagePair
= 1;
1946 npr
->readXferRdyDis
= 1;
1948 /* For FCP support */
1949 npr
->prliType
= PRLI_FCP_TYPE
;
1950 npr
->initiatorFunc
= 1;
1952 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1953 "Issue PRLI: did:x%x",
1954 ndlp
->nlp_DID
, 0, 0);
1956 phba
->fc_stat
.elsXmitPRLI
++;
1957 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_prli
;
1958 spin_lock_irq(shost
->host_lock
);
1959 ndlp
->nlp_flag
|= NLP_PRLI_SND
;
1960 spin_unlock_irq(shost
->host_lock
);
1961 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
1963 spin_lock_irq(shost
->host_lock
);
1964 ndlp
->nlp_flag
&= ~NLP_PRLI_SND
;
1965 spin_unlock_irq(shost
->host_lock
);
1966 lpfc_els_free_iocb(phba
, elsiocb
);
1969 vport
->fc_prli_sent
++;
1974 * lpfc_rscn_disc - Perform rscn discovery for a vport
1975 * @vport: pointer to a host virtual N_Port data structure.
1977 * This routine performs Registration State Change Notification (RSCN)
1978 * discovery for a @vport. If the @vport's node port recovery count is not
1979 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1980 * the nodes that need recovery. If none of the PLOGI were needed through
1981 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1982 * invoked to check and handle possible more RSCN came in during the period
1983 * of processing the current ones.
1986 lpfc_rscn_disc(struct lpfc_vport
*vport
)
1988 lpfc_can_disctmo(vport
);
1990 /* RSCN discovery */
1991 /* go thru NPR nodes and issue ELS PLOGIs */
1992 if (vport
->fc_npr_cnt
)
1993 if (lpfc_els_disc_plogi(vport
))
1996 lpfc_end_rscn(vport
);
2000 * lpfc_adisc_done - Complete the adisc phase of discovery
2001 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2003 * This function is called when the final ADISC is completed during discovery.
2004 * This function handles clearing link attention or issuing reg_vpi depending
2005 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2007 * This function is called with no locks held.
2010 lpfc_adisc_done(struct lpfc_vport
*vport
)
2012 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2013 struct lpfc_hba
*phba
= vport
->phba
;
2016 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2017 * and continue discovery.
2019 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
2020 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
2021 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
2022 lpfc_issue_reg_vpi(phba
, vport
);
2026 * For SLI2, we need to set port_state to READY
2027 * and continue discovery.
2029 if (vport
->port_state
< LPFC_VPORT_READY
) {
2030 /* If we get here, there is nothing to ADISC */
2031 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
2032 lpfc_issue_clear_la(phba
, vport
);
2033 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
2034 vport
->num_disc_nodes
= 0;
2035 /* go thru NPR list, issue ELS PLOGIs */
2036 if (vport
->fc_npr_cnt
)
2037 lpfc_els_disc_plogi(vport
);
2038 if (!vport
->num_disc_nodes
) {
2039 spin_lock_irq(shost
->host_lock
);
2040 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2041 spin_unlock_irq(shost
->host_lock
);
2042 lpfc_can_disctmo(vport
);
2043 lpfc_end_rscn(vport
);
2046 vport
->port_state
= LPFC_VPORT_READY
;
2048 lpfc_rscn_disc(vport
);
2052 * lpfc_more_adisc - Issue more adisc as needed
2053 * @vport: pointer to a host virtual N_Port data structure.
2055 * This routine determines whether there are more ndlps on a @vport
2056 * node list need to have Address Discover (ADISC) issued. If so, it will
2057 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2058 * remaining nodes which need to have ADISC sent.
2061 lpfc_more_adisc(struct lpfc_vport
*vport
)
2065 if (vport
->num_disc_nodes
)
2066 vport
->num_disc_nodes
--;
2067 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2068 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2069 "0210 Continue discovery with %d ADISCs to go "
2070 "Data: x%x x%x x%x\n",
2071 vport
->num_disc_nodes
, vport
->fc_adisc_cnt
,
2072 vport
->fc_flag
, vport
->port_state
);
2073 /* Check to see if there are more ADISCs to be sent */
2074 if (vport
->fc_flag
& FC_NLP_MORE
) {
2075 lpfc_set_disctmo(vport
);
2076 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2077 sentadisc
= lpfc_els_disc_adisc(vport
);
2079 if (!vport
->num_disc_nodes
)
2080 lpfc_adisc_done(vport
);
2085 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2086 * @phba: pointer to lpfc hba data structure.
2087 * @cmdiocb: pointer to lpfc command iocb data structure.
2088 * @rspiocb: pointer to lpfc response iocb data structure.
2090 * This routine is the completion function for issuing the Address Discover
2091 * (ADISC) command. It first checks to see whether link went down during
2092 * the discovery process. If so, the node will be marked as node port
2093 * recovery for issuing discover IOCB by the link attention handler and
2094 * exit. Otherwise, the response status is checked. If error was reported
2095 * in the response status, the ADISC command shall be retried by invoking
2096 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2097 * the response status, the state machine is invoked to set transition
2098 * with respect to NLP_EVT_CMPL_ADISC event.
2101 lpfc_cmpl_els_adisc(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2102 struct lpfc_iocbq
*rspiocb
)
2104 struct lpfc_vport
*vport
= cmdiocb
->vport
;
2105 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2107 struct lpfc_nodelist
*ndlp
;
2110 /* we pass cmdiocb to state machine which needs rspiocb as well */
2111 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
2113 irsp
= &(rspiocb
->iocb
);
2114 ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
2116 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2117 "ADISC cmpl: status:x%x/x%x did:x%x",
2118 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2121 /* Since ndlp can be freed in the disc state machine, note if this node
2122 * is being used during discovery.
2124 spin_lock_irq(shost
->host_lock
);
2125 disc
= (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
);
2126 ndlp
->nlp_flag
&= ~(NLP_ADISC_SND
| NLP_NPR_2B_DISC
);
2127 spin_unlock_irq(shost
->host_lock
);
2128 /* ADISC completes to NPort <nlp_DID> */
2129 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2130 "0104 ADISC completes to NPort x%x "
2131 "Data: x%x x%x x%x x%x x%x\n",
2132 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2133 irsp
->ulpTimeout
, disc
, vport
->num_disc_nodes
);
2134 /* Check to see if link went down during discovery */
2135 if (lpfc_els_chk_latt(vport
)) {
2136 spin_lock_irq(shost
->host_lock
);
2137 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2138 spin_unlock_irq(shost
->host_lock
);
2142 if (irsp
->ulpStatus
) {
2143 /* Check for retry */
2144 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
)) {
2145 /* ELS command is being retried */
2147 spin_lock_irq(shost
->host_lock
);
2148 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2149 spin_unlock_irq(shost
->host_lock
);
2150 lpfc_set_disctmo(vport
);
2155 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2156 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2157 ndlp
->nlp_DID
, irsp
->ulpStatus
,
2158 irsp
->un
.ulpWord
[4]);
2159 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2160 if (!lpfc_error_lost_link(irsp
))
2161 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2162 NLP_EVT_CMPL_ADISC
);
2164 /* Good status, call state machine */
2165 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2166 NLP_EVT_CMPL_ADISC
);
2168 /* Check to see if there are more ADISCs to be sent */
2169 if (disc
&& vport
->num_disc_nodes
)
2170 lpfc_more_adisc(vport
);
2172 lpfc_els_free_iocb(phba
, cmdiocb
);
2177 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2178 * @vport: pointer to a virtual N_Port data structure.
2179 * @ndlp: pointer to a node-list data structure.
2180 * @retry: number of retries to the command IOCB.
2182 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2183 * @vport. It prepares the payload of the ADISC ELS command, updates the
2184 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2185 * to issue the ADISC ELS command.
2187 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2188 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2189 * will be stored into the context1 field of the IOCB for the completion
2190 * callback function to the ADISC ELS command.
2193 * 0 - successfully issued adisc
2194 * 1 - failed to issue adisc
2197 lpfc_issue_els_adisc(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2200 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2201 struct lpfc_hba
*phba
= vport
->phba
;
2204 struct lpfc_iocbq
*elsiocb
;
2208 cmdsize
= (sizeof(uint32_t) + sizeof(ADISC
));
2209 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2210 ndlp
->nlp_DID
, ELS_CMD_ADISC
);
2214 icmd
= &elsiocb
->iocb
;
2215 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2217 /* For ADISC request, remainder of payload is service parameters */
2218 *((uint32_t *) (pcmd
)) = ELS_CMD_ADISC
;
2219 pcmd
+= sizeof(uint32_t);
2221 /* Fill in ADISC payload */
2222 ap
= (ADISC
*) pcmd
;
2223 ap
->hardAL_PA
= phba
->fc_pref_ALPA
;
2224 memcpy(&ap
->portName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
2225 memcpy(&ap
->nodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
2226 ap
->DID
= be32_to_cpu(vport
->fc_myDID
);
2228 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2229 "Issue ADISC: did:x%x",
2230 ndlp
->nlp_DID
, 0, 0);
2232 phba
->fc_stat
.elsXmitADISC
++;
2233 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_adisc
;
2234 spin_lock_irq(shost
->host_lock
);
2235 ndlp
->nlp_flag
|= NLP_ADISC_SND
;
2236 spin_unlock_irq(shost
->host_lock
);
2237 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
2239 spin_lock_irq(shost
->host_lock
);
2240 ndlp
->nlp_flag
&= ~NLP_ADISC_SND
;
2241 spin_unlock_irq(shost
->host_lock
);
2242 lpfc_els_free_iocb(phba
, elsiocb
);
2249 * lpfc_cmpl_els_logo - Completion callback function for logo
2250 * @phba: pointer to lpfc hba data structure.
2251 * @cmdiocb: pointer to lpfc command iocb data structure.
2252 * @rspiocb: pointer to lpfc response iocb data structure.
2254 * This routine is the completion function for issuing the ELS Logout (LOGO)
2255 * command. If no error status was reported from the LOGO response, the
2256 * state machine of the associated ndlp shall be invoked for transition with
2257 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2258 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2261 lpfc_cmpl_els_logo(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2262 struct lpfc_iocbq
*rspiocb
)
2264 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
2265 struct lpfc_vport
*vport
= ndlp
->vport
;
2266 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2268 struct lpfc_sli
*psli
;
2269 struct lpfcMboxq
*mbox
;
2272 /* we pass cmdiocb to state machine which needs rspiocb as well */
2273 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
2275 irsp
= &(rspiocb
->iocb
);
2276 spin_lock_irq(shost
->host_lock
);
2277 ndlp
->nlp_flag
&= ~NLP_LOGO_SND
;
2278 spin_unlock_irq(shost
->host_lock
);
2280 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2281 "LOGO cmpl: status:x%x/x%x did:x%x",
2282 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2284 /* LOGO completes to NPort <nlp_DID> */
2285 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2286 "0105 LOGO completes to NPort x%x "
2287 "Data: x%x x%x x%x x%x\n",
2288 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2289 irsp
->ulpTimeout
, vport
->num_disc_nodes
);
2290 /* Check to see if link went down during discovery */
2291 if (lpfc_els_chk_latt(vport
))
2294 if (ndlp
->nlp_flag
& NLP_TARGET_REMOVE
) {
2295 /* NLP_EVT_DEVICE_RM should unregister the RPI
2296 * which should abort all outstanding IOs.
2298 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2303 if (irsp
->ulpStatus
) {
2304 /* Check for retry */
2305 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
))
2306 /* ELS command is being retried */
2309 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2310 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2311 ndlp
->nlp_DID
, irsp
->ulpStatus
,
2312 irsp
->un
.ulpWord
[4]);
2313 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2314 if (lpfc_error_lost_link(irsp
))
2317 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2320 /* Good status, call state machine.
2321 * This will unregister the rpi if needed.
2323 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2326 lpfc_els_free_iocb(phba
, cmdiocb
);
2327 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2328 if ((vport
->fc_flag
& FC_PT2PT
) &&
2329 !(vport
->fc_flag
& FC_PT2PT_PLOGI
)) {
2330 phba
->pport
->fc_myDID
= 0;
2331 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2333 lpfc_config_link(phba
, mbox
);
2334 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2335 mbox
->vport
= vport
;
2336 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
) ==
2338 mempool_free(mbox
, phba
->mbox_mem_pool
);
2346 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2347 * @vport: pointer to a virtual N_Port data structure.
2348 * @ndlp: pointer to a node-list data structure.
2349 * @retry: number of retries to the command IOCB.
2351 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2352 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2353 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2354 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2356 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2357 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2358 * will be stored into the context1 field of the IOCB for the completion
2359 * callback function to the LOGO ELS command.
2362 * 0 - successfully issued logo
2363 * 1 - failed to issue logo
2366 lpfc_issue_els_logo(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2369 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2370 struct lpfc_hba
*phba
= vport
->phba
;
2372 struct lpfc_iocbq
*elsiocb
;
2377 spin_lock_irq(shost
->host_lock
);
2378 if (ndlp
->nlp_flag
& NLP_LOGO_SND
) {
2379 spin_unlock_irq(shost
->host_lock
);
2382 spin_unlock_irq(shost
->host_lock
);
2384 cmdsize
= (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name
);
2385 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2386 ndlp
->nlp_DID
, ELS_CMD_LOGO
);
2390 icmd
= &elsiocb
->iocb
;
2391 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2392 *((uint32_t *) (pcmd
)) = ELS_CMD_LOGO
;
2393 pcmd
+= sizeof(uint32_t);
2395 /* Fill in LOGO payload */
2396 *((uint32_t *) (pcmd
)) = be32_to_cpu(vport
->fc_myDID
);
2397 pcmd
+= sizeof(uint32_t);
2398 memcpy(pcmd
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
2400 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2401 "Issue LOGO: did:x%x",
2402 ndlp
->nlp_DID
, 0, 0);
2404 phba
->fc_stat
.elsXmitLOGO
++;
2405 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_logo
;
2406 spin_lock_irq(shost
->host_lock
);
2407 ndlp
->nlp_flag
|= NLP_LOGO_SND
;
2408 spin_unlock_irq(shost
->host_lock
);
2409 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
2411 if (rc
== IOCB_ERROR
) {
2412 spin_lock_irq(shost
->host_lock
);
2413 ndlp
->nlp_flag
&= ~NLP_LOGO_SND
;
2414 spin_unlock_irq(shost
->host_lock
);
2415 lpfc_els_free_iocb(phba
, elsiocb
);
2422 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2423 * @phba: pointer to lpfc hba data structure.
2424 * @cmdiocb: pointer to lpfc command iocb data structure.
2425 * @rspiocb: pointer to lpfc response iocb data structure.
2427 * This routine is a generic completion callback function for ELS commands.
2428 * Specifically, it is the callback function which does not need to perform
2429 * any command specific operations. It is currently used by the ELS command
2430 * issuing routines for the ELS State Change Request (SCR),
2431 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2432 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2433 * certain debug loggings, this callback function simply invokes the
2434 * lpfc_els_chk_latt() routine to check whether link went down during the
2435 * discovery process.
2438 lpfc_cmpl_els_cmd(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2439 struct lpfc_iocbq
*rspiocb
)
2441 struct lpfc_vport
*vport
= cmdiocb
->vport
;
2444 irsp
= &rspiocb
->iocb
;
2446 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2447 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2448 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2449 irsp
->un
.elsreq64
.remoteID
);
2450 /* ELS cmd tag <ulpIoTag> completes */
2451 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2452 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2453 irsp
->ulpIoTag
, irsp
->ulpStatus
,
2454 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
2455 /* Check to see if link went down during discovery */
2456 lpfc_els_chk_latt(vport
);
2457 lpfc_els_free_iocb(phba
, cmdiocb
);
2462 * lpfc_issue_els_scr - Issue a scr to an node on a vport
2463 * @vport: pointer to a host virtual N_Port data structure.
2464 * @nportid: N_Port identifier to the remote node.
2465 * @retry: number of retries to the command IOCB.
2467 * This routine issues a State Change Request (SCR) to a fabric node
2468 * on a @vport. The remote node @nportid is passed into the function. It
2469 * first search the @vport node list to find the matching ndlp. If no such
2470 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2471 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2472 * routine is invoked to send the SCR IOCB.
2474 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2475 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2476 * will be stored into the context1 field of the IOCB for the completion
2477 * callback function to the SCR ELS command.
2480 * 0 - Successfully issued scr command
2481 * 1 - Failed to issue scr command
2484 lpfc_issue_els_scr(struct lpfc_vport
*vport
, uint32_t nportid
, uint8_t retry
)
2486 struct lpfc_hba
*phba
= vport
->phba
;
2488 struct lpfc_iocbq
*elsiocb
;
2489 struct lpfc_sli
*psli
;
2492 struct lpfc_nodelist
*ndlp
;
2495 cmdsize
= (sizeof(uint32_t) + sizeof(SCR
));
2497 ndlp
= lpfc_findnode_did(vport
, nportid
);
2499 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
2502 lpfc_nlp_init(vport
, ndlp
, nportid
);
2503 lpfc_enqueue_node(vport
, ndlp
);
2504 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
2505 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
2510 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2511 ndlp
->nlp_DID
, ELS_CMD_SCR
);
2514 /* This will trigger the release of the node just
2521 icmd
= &elsiocb
->iocb
;
2522 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2524 *((uint32_t *) (pcmd
)) = ELS_CMD_SCR
;
2525 pcmd
+= sizeof(uint32_t);
2527 /* For SCR, remainder of payload is SCR parameter page */
2528 memset(pcmd
, 0, sizeof(SCR
));
2529 ((SCR
*) pcmd
)->Function
= SCR_FUNC_FULL
;
2531 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2532 "Issue SCR: did:x%x",
2533 ndlp
->nlp_DID
, 0, 0);
2535 phba
->fc_stat
.elsXmitSCR
++;
2536 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_cmd
;
2537 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
2539 /* The additional lpfc_nlp_put will cause the following
2540 * lpfc_els_free_iocb routine to trigger the rlease of
2544 lpfc_els_free_iocb(phba
, elsiocb
);
2547 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2548 * trigger the release of node.
2555 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2556 * @vport: pointer to a host virtual N_Port data structure.
2557 * @nportid: N_Port identifier to the remote node.
2558 * @retry: number of retries to the command IOCB.
2560 * This routine issues a Fibre Channel Address Resolution Response
2561 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2562 * is passed into the function. It first search the @vport node list to find
2563 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2564 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2565 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2567 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2568 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2569 * will be stored into the context1 field of the IOCB for the completion
2570 * callback function to the PARPR ELS command.
2573 * 0 - Successfully issued farpr command
2574 * 1 - Failed to issue farpr command
2577 lpfc_issue_els_farpr(struct lpfc_vport
*vport
, uint32_t nportid
, uint8_t retry
)
2579 struct lpfc_hba
*phba
= vport
->phba
;
2581 struct lpfc_iocbq
*elsiocb
;
2582 struct lpfc_sli
*psli
;
2587 struct lpfc_nodelist
*ondlp
;
2588 struct lpfc_nodelist
*ndlp
;
2591 cmdsize
= (sizeof(uint32_t) + sizeof(FARP
));
2593 ndlp
= lpfc_findnode_did(vport
, nportid
);
2595 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
2598 lpfc_nlp_init(vport
, ndlp
, nportid
);
2599 lpfc_enqueue_node(vport
, ndlp
);
2600 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
2601 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
2606 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2607 ndlp
->nlp_DID
, ELS_CMD_RNID
);
2609 /* This will trigger the release of the node just
2616 icmd
= &elsiocb
->iocb
;
2617 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2619 *((uint32_t *) (pcmd
)) = ELS_CMD_FARPR
;
2620 pcmd
+= sizeof(uint32_t);
2622 /* Fill in FARPR payload */
2623 fp
= (FARP
*) (pcmd
);
2624 memset(fp
, 0, sizeof(FARP
));
2625 lp
= (uint32_t *) pcmd
;
2626 *lp
++ = be32_to_cpu(nportid
);
2627 *lp
++ = be32_to_cpu(vport
->fc_myDID
);
2629 fp
->Mflags
= (FARP_MATCH_PORT
| FARP_MATCH_NODE
);
2631 memcpy(&fp
->RportName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
2632 memcpy(&fp
->RnodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
2633 ondlp
= lpfc_findnode_did(vport
, nportid
);
2634 if (ondlp
&& NLP_CHK_NODE_ACT(ondlp
)) {
2635 memcpy(&fp
->OportName
, &ondlp
->nlp_portname
,
2636 sizeof(struct lpfc_name
));
2637 memcpy(&fp
->OnodeName
, &ondlp
->nlp_nodename
,
2638 sizeof(struct lpfc_name
));
2641 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2642 "Issue FARPR: did:x%x",
2643 ndlp
->nlp_DID
, 0, 0);
2645 phba
->fc_stat
.elsXmitFARPR
++;
2646 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_cmd
;
2647 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
2649 /* The additional lpfc_nlp_put will cause the following
2650 * lpfc_els_free_iocb routine to trigger the release of
2654 lpfc_els_free_iocb(phba
, elsiocb
);
2657 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2658 * trigger the release of the node.
2665 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
2666 * @vport: pointer to a host virtual N_Port data structure.
2667 * @nlp: pointer to a node-list data structure.
2669 * This routine cancels the timer with a delayed IOCB-command retry for
2670 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2671 * removes the ELS retry event if it presents. In addition, if the
2672 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2673 * commands are sent for the @vport's nodes that require issuing discovery
2677 lpfc_cancel_retry_delay_tmo(struct lpfc_vport
*vport
, struct lpfc_nodelist
*nlp
)
2679 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2680 struct lpfc_work_evt
*evtp
;
2682 if (!(nlp
->nlp_flag
& NLP_DELAY_TMO
))
2684 spin_lock_irq(shost
->host_lock
);
2685 nlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
2686 spin_unlock_irq(shost
->host_lock
);
2687 del_timer_sync(&nlp
->nlp_delayfunc
);
2688 nlp
->nlp_last_elscmd
= 0;
2689 if (!list_empty(&nlp
->els_retry_evt
.evt_listp
)) {
2690 list_del_init(&nlp
->els_retry_evt
.evt_listp
);
2691 /* Decrement nlp reference count held for the delayed retry */
2692 evtp
= &nlp
->els_retry_evt
;
2693 lpfc_nlp_put((struct lpfc_nodelist
*)evtp
->evt_arg1
);
2695 if (nlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
2696 spin_lock_irq(shost
->host_lock
);
2697 nlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
2698 spin_unlock_irq(shost
->host_lock
);
2699 if (vport
->num_disc_nodes
) {
2700 if (vport
->port_state
< LPFC_VPORT_READY
) {
2701 /* Check if there are more ADISCs to be sent */
2702 lpfc_more_adisc(vport
);
2704 /* Check if there are more PLOGIs to be sent */
2705 lpfc_more_plogi(vport
);
2706 if (vport
->num_disc_nodes
== 0) {
2707 spin_lock_irq(shost
->host_lock
);
2708 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2709 spin_unlock_irq(shost
->host_lock
);
2710 lpfc_can_disctmo(vport
);
2711 lpfc_end_rscn(vport
);
2720 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
2721 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2723 * This routine is invoked by the ndlp delayed-function timer to check
2724 * whether there is any pending ELS retry event(s) with the node. If not, it
2725 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2726 * adds the delayed events to the HBA work list and invokes the
2727 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2728 * event. Note that lpfc_nlp_get() is called before posting the event to
2729 * the work list to hold reference count of ndlp so that it guarantees the
2730 * reference to ndlp will still be available when the worker thread gets
2731 * to the event associated with the ndlp.
2734 lpfc_els_retry_delay(unsigned long ptr
)
2736 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) ptr
;
2737 struct lpfc_vport
*vport
= ndlp
->vport
;
2738 struct lpfc_hba
*phba
= vport
->phba
;
2739 unsigned long flags
;
2740 struct lpfc_work_evt
*evtp
= &ndlp
->els_retry_evt
;
2742 spin_lock_irqsave(&phba
->hbalock
, flags
);
2743 if (!list_empty(&evtp
->evt_listp
)) {
2744 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2748 /* We need to hold the node by incrementing the reference
2749 * count until the queued work is done
2751 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
2752 if (evtp
->evt_arg1
) {
2753 evtp
->evt
= LPFC_EVT_ELS_RETRY
;
2754 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
2755 lpfc_worker_wake_up(phba
);
2757 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2762 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
2763 * @ndlp: pointer to a node-list data structure.
2765 * This routine is the worker-thread handler for processing the @ndlp delayed
2766 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2767 * the last ELS command from the associated ndlp and invokes the proper ELS
2768 * function according to the delayed ELS command to retry the command.
2771 lpfc_els_retry_delay_handler(struct lpfc_nodelist
*ndlp
)
2773 struct lpfc_vport
*vport
= ndlp
->vport
;
2774 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2775 uint32_t cmd
, did
, retry
;
2777 spin_lock_irq(shost
->host_lock
);
2778 did
= ndlp
->nlp_DID
;
2779 cmd
= ndlp
->nlp_last_elscmd
;
2780 ndlp
->nlp_last_elscmd
= 0;
2782 if (!(ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
2783 spin_unlock_irq(shost
->host_lock
);
2787 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
2788 spin_unlock_irq(shost
->host_lock
);
2790 * If a discovery event readded nlp_delayfunc after timer
2791 * firing and before processing the timer, cancel the
2794 del_timer_sync(&ndlp
->nlp_delayfunc
);
2795 retry
= ndlp
->nlp_retry
;
2796 ndlp
->nlp_retry
= 0;
2800 lpfc_issue_els_flogi(vport
, ndlp
, retry
);
2803 if (!lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, retry
)) {
2804 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
2805 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
2809 if (!lpfc_issue_els_adisc(vport
, ndlp
, retry
)) {
2810 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
2811 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
2815 if (!lpfc_issue_els_prli(vport
, ndlp
, retry
)) {
2816 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
2817 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PRLI_ISSUE
);
2821 if (!lpfc_issue_els_logo(vport
, ndlp
, retry
)) {
2822 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
2823 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
2827 if (!(vport
->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
))
2828 lpfc_issue_els_fdisc(vport
, ndlp
, retry
);
2835 * lpfc_els_retry - Make retry decision on an els command iocb
2836 * @phba: pointer to lpfc hba data structure.
2837 * @cmdiocb: pointer to lpfc command iocb data structure.
2838 * @rspiocb: pointer to lpfc response iocb data structure.
2840 * This routine makes a retry decision on an ELS command IOCB, which has
2841 * failed. The following ELS IOCBs use this function for retrying the command
2842 * when previously issued command responsed with error status: FLOGI, PLOGI,
2843 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2844 * returned error status, it makes the decision whether a retry shall be
2845 * issued for the command, and whether a retry shall be made immediately or
2846 * delayed. In the former case, the corresponding ELS command issuing-function
2847 * is called to retry the command. In the later case, the ELS command shall
2848 * be posted to the ndlp delayed event and delayed function timer set to the
2849 * ndlp for the delayed command issusing.
2852 * 0 - No retry of els command is made
2853 * 1 - Immediate or delayed retry of els command is made
2856 lpfc_els_retry(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2857 struct lpfc_iocbq
*rspiocb
)
2859 struct lpfc_vport
*vport
= cmdiocb
->vport
;
2860 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2861 IOCB_t
*irsp
= &rspiocb
->iocb
;
2862 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
2863 struct lpfc_dmabuf
*pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
2866 int retry
= 0, maxretry
= lpfc_max_els_tries
, delay
= 0;
2872 /* Note: context2 may be 0 for internal driver abort
2873 * of delays ELS command.
2876 if (pcmd
&& pcmd
->virt
) {
2877 elscmd
= (uint32_t *) (pcmd
->virt
);
2881 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
2882 did
= ndlp
->nlp_DID
;
2884 /* We should only hit this case for retrying PLOGI */
2885 did
= irsp
->un
.elsreq64
.remoteID
;
2886 ndlp
= lpfc_findnode_did(vport
, did
);
2887 if ((!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
2888 && (cmd
!= ELS_CMD_PLOGI
))
2892 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2893 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
2894 *(((uint32_t *) irsp
) + 7), irsp
->un
.ulpWord
[4], ndlp
->nlp_DID
);
2896 switch (irsp
->ulpStatus
) {
2897 case IOSTAT_FCP_RSP_ERROR
:
2899 case IOSTAT_REMOTE_STOP
:
2900 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2901 /* This IO was aborted by the target, we don't
2902 * know the rxid and because we did not send the
2903 * ABTS we cannot generate and RRQ.
2905 lpfc_set_rrq_active(phba
, ndlp
,
2906 cmdiocb
->sli4_xritag
, 0, 0);
2909 case IOSTAT_LOCAL_REJECT
:
2910 switch ((irsp
->un
.ulpWord
[4] & 0xff)) {
2911 case IOERR_LOOP_OPEN_FAILURE
:
2912 if (cmd
== ELS_CMD_FLOGI
) {
2913 if (PCI_DEVICE_ID_HORNET
==
2914 phba
->pcidev
->device
) {
2915 phba
->fc_topology
= LPFC_TOPOLOGY_LOOP
;
2916 phba
->pport
->fc_myDID
= 0;
2917 phba
->alpa_map
[0] = 0;
2918 phba
->alpa_map
[1] = 0;
2921 if (cmd
== ELS_CMD_PLOGI
&& cmdiocb
->retry
== 0)
2926 case IOERR_ILLEGAL_COMMAND
:
2927 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2928 "0124 Retry illegal cmd x%x "
2929 "retry:x%x delay:x%x\n",
2930 cmd
, cmdiocb
->retry
, delay
);
2932 /* All command's retry policy */
2934 if (cmdiocb
->retry
> 2)
2938 case IOERR_NO_RESOURCES
:
2939 logerr
= 1; /* HBA out of resources */
2941 if (cmdiocb
->retry
> 100)
2946 case IOERR_ILLEGAL_FRAME
:
2951 case IOERR_SEQUENCE_TIMEOUT
:
2952 case IOERR_INVALID_RPI
:
2958 case IOSTAT_NPORT_RJT
:
2959 case IOSTAT_FABRIC_RJT
:
2960 if (irsp
->un
.ulpWord
[4] & RJT_UNAVAIL_TEMP
) {
2966 case IOSTAT_NPORT_BSY
:
2967 case IOSTAT_FABRIC_BSY
:
2968 logerr
= 1; /* Fabric / Remote NPort out of resources */
2973 stat
.un
.lsRjtError
= be32_to_cpu(irsp
->un
.ulpWord
[4]);
2974 /* Added for Vendor specifc support
2975 * Just keep retrying for these Rsn / Exp codes
2977 switch (stat
.un
.b
.lsRjtRsnCode
) {
2978 case LSRJT_UNABLE_TPC
:
2979 if (stat
.un
.b
.lsRjtRsnCodeExp
==
2980 LSEXP_CMD_IN_PROGRESS
) {
2981 if (cmd
== ELS_CMD_PLOGI
) {
2988 if (stat
.un
.b
.lsRjtRsnCodeExp
==
2989 LSEXP_CANT_GIVE_DATA
) {
2990 if (cmd
== ELS_CMD_PLOGI
) {
2997 if (cmd
== ELS_CMD_PLOGI
) {
2999 maxretry
= lpfc_max_els_tries
+ 1;
3003 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
3004 (cmd
== ELS_CMD_FDISC
) &&
3005 (stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_OUT_OF_RESOURCE
)){
3006 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3007 "0125 FDISC Failed (x%x). "
3008 "Fabric out of resources\n",
3009 stat
.un
.lsRjtError
);
3010 lpfc_vport_set_state(vport
,
3011 FC_VPORT_NO_FABRIC_RSCS
);
3015 case LSRJT_LOGICAL_BSY
:
3016 if ((cmd
== ELS_CMD_PLOGI
) ||
3017 (cmd
== ELS_CMD_PRLI
)) {
3020 } else if (cmd
== ELS_CMD_FDISC
) {
3021 /* FDISC retry policy */
3023 if (cmdiocb
->retry
>= 32)
3029 case LSRJT_LOGICAL_ERR
:
3030 /* There are some cases where switches return this
3031 * error when they are not ready and should be returning
3032 * Logical Busy. We should delay every time.
3034 if (cmd
== ELS_CMD_FDISC
&&
3035 stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_PORT_LOGIN_REQ
) {
3041 case LSRJT_PROTOCOL_ERR
:
3042 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
3043 (cmd
== ELS_CMD_FDISC
) &&
3044 ((stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_INVALID_PNAME
) ||
3045 (stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_INVALID_NPORT_ID
))
3047 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3048 "0122 FDISC Failed (x%x). "
3049 "Fabric Detected Bad WWN\n",
3050 stat
.un
.lsRjtError
);
3051 lpfc_vport_set_state(vport
,
3052 FC_VPORT_FABRIC_REJ_WWN
);
3058 case IOSTAT_INTERMED_RSP
:
3066 if (did
== FDMI_DID
)
3069 if (((cmd
== ELS_CMD_FLOGI
) || (cmd
== ELS_CMD_FDISC
)) &&
3070 (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
) &&
3071 !lpfc_error_lost_link(irsp
)) {
3072 /* FLOGI retry policy */
3076 if (cmdiocb
->retry
>= 100)
3078 else if (cmdiocb
->retry
>= 32)
3083 if (maxretry
&& (cmdiocb
->retry
>= maxretry
)) {
3084 phba
->fc_stat
.elsRetryExceeded
++;
3088 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
3092 if ((cmd
== ELS_CMD_PLOGI
) || (cmd
== ELS_CMD_FDISC
)) {
3093 /* Stop retrying PLOGI and FDISC if in FCF discovery */
3094 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
3095 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3096 "2849 Stop retry ELS command "
3097 "x%x to remote NPORT x%x, "
3098 "Data: x%x x%x\n", cmd
, did
,
3099 cmdiocb
->retry
, delay
);
3104 /* Retry ELS command <elsCmd> to remote NPORT <did> */
3105 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3106 "0107 Retry ELS command x%x to remote "
3107 "NPORT x%x Data: x%x x%x\n",
3108 cmd
, did
, cmdiocb
->retry
, delay
);
3110 if (((cmd
== ELS_CMD_PLOGI
) || (cmd
== ELS_CMD_ADISC
)) &&
3111 ((irsp
->ulpStatus
!= IOSTAT_LOCAL_REJECT
) ||
3112 ((irsp
->un
.ulpWord
[4] & 0xff) != IOERR_NO_RESOURCES
))) {
3113 /* Don't reset timer for no resources */
3115 /* If discovery / RSCN timer is running, reset it */
3116 if (timer_pending(&vport
->fc_disctmo
) ||
3117 (vport
->fc_flag
& FC_RSCN_MODE
))
3118 lpfc_set_disctmo(vport
);
3121 phba
->fc_stat
.elsXmitRetry
++;
3122 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) && delay
) {
3123 phba
->fc_stat
.elsDelayRetry
++;
3124 ndlp
->nlp_retry
= cmdiocb
->retry
;
3126 /* delay is specified in milliseconds */
3127 mod_timer(&ndlp
->nlp_delayfunc
,
3128 jiffies
+ msecs_to_jiffies(delay
));
3129 spin_lock_irq(shost
->host_lock
);
3130 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
3131 spin_unlock_irq(shost
->host_lock
);
3133 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3134 if (cmd
== ELS_CMD_PRLI
)
3135 lpfc_nlp_set_state(vport
, ndlp
,
3136 NLP_STE_REG_LOGIN_ISSUE
);
3138 lpfc_nlp_set_state(vport
, ndlp
,
3140 ndlp
->nlp_last_elscmd
= cmd
;
3146 lpfc_issue_els_flogi(vport
, ndlp
, cmdiocb
->retry
);
3149 lpfc_issue_els_fdisc(vport
, ndlp
, cmdiocb
->retry
);
3152 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)) {
3153 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3154 lpfc_nlp_set_state(vport
, ndlp
,
3155 NLP_STE_PLOGI_ISSUE
);
3157 lpfc_issue_els_plogi(vport
, did
, cmdiocb
->retry
);
3160 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3161 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
3162 lpfc_issue_els_adisc(vport
, ndlp
, cmdiocb
->retry
);
3165 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3166 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PRLI_ISSUE
);
3167 lpfc_issue_els_prli(vport
, ndlp
, cmdiocb
->retry
);
3170 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3171 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
3172 lpfc_issue_els_logo(vport
, ndlp
, cmdiocb
->retry
);
3176 /* No retry ELS command <elsCmd> to remote NPORT <did> */
3178 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3179 "0137 No retry ELS command x%x to remote "
3180 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3181 cmd
, did
, irsp
->ulpStatus
,
3182 irsp
->un
.ulpWord
[4]);
3185 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3186 "0108 No retry ELS command x%x to remote "
3187 "NPORT x%x Retried:%d Error:x%x/%x\n",
3188 cmd
, did
, cmdiocb
->retry
, irsp
->ulpStatus
,
3189 irsp
->un
.ulpWord
[4]);
3195 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
3196 * @phba: pointer to lpfc hba data structure.
3197 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3199 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3200 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3201 * checks to see whether there is a lpfc DMA buffer associated with the
3202 * response of the command IOCB. If so, it will be released before releasing
3203 * the lpfc DMA buffer associated with the IOCB itself.
3206 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3209 lpfc_els_free_data(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*buf_ptr1
)
3211 struct lpfc_dmabuf
*buf_ptr
;
3213 /* Free the response before processing the command. */
3214 if (!list_empty(&buf_ptr1
->list
)) {
3215 list_remove_head(&buf_ptr1
->list
, buf_ptr
,
3218 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
3221 lpfc_mbuf_free(phba
, buf_ptr1
->virt
, buf_ptr1
->phys
);
3227 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
3228 * @phba: pointer to lpfc hba data structure.
3229 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3231 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3232 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3236 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3239 lpfc_els_free_bpl(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*buf_ptr
)
3241 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
3247 * lpfc_els_free_iocb - Free a command iocb and its associated resources
3248 * @phba: pointer to lpfc hba data structure.
3249 * @elsiocb: pointer to lpfc els command iocb data structure.
3251 * This routine frees a command IOCB and its associated resources. The
3252 * command IOCB data structure contains the reference to various associated
3253 * resources, these fields must be set to NULL if the associated reference
3255 * context1 - reference to ndlp
3256 * context2 - reference to cmd
3257 * context2->next - reference to rsp
3258 * context3 - reference to bpl
3260 * It first properly decrements the reference count held on ndlp for the
3261 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3262 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3263 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3264 * adds the DMA buffer the @phba data structure for the delayed release.
3265 * If reference to the Buffer Pointer List (BPL) is present, the
3266 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3267 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3268 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3271 * 0 - Success (currently, always return 0)
3274 lpfc_els_free_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*elsiocb
)
3276 struct lpfc_dmabuf
*buf_ptr
, *buf_ptr1
;
3277 struct lpfc_nodelist
*ndlp
;
3279 ndlp
= (struct lpfc_nodelist
*)elsiocb
->context1
;
3281 if (ndlp
->nlp_flag
& NLP_DEFER_RM
) {
3284 /* If the ndlp is not being used by another discovery
3287 if (!lpfc_nlp_not_used(ndlp
)) {
3288 /* If ndlp is being used by another discovery
3289 * thread, just clear NLP_DEFER_RM
3291 ndlp
->nlp_flag
&= ~NLP_DEFER_RM
;
3296 elsiocb
->context1
= NULL
;
3298 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3299 if (elsiocb
->context2
) {
3300 if (elsiocb
->iocb_flag
& LPFC_DELAY_MEM_FREE
) {
3301 /* Firmware could still be in progress of DMAing
3302 * payload, so don't free data buffer till after
3305 elsiocb
->iocb_flag
&= ~LPFC_DELAY_MEM_FREE
;
3306 buf_ptr
= elsiocb
->context2
;
3307 elsiocb
->context2
= NULL
;
3310 spin_lock_irq(&phba
->hbalock
);
3311 if (!list_empty(&buf_ptr
->list
)) {
3312 list_remove_head(&buf_ptr
->list
,
3313 buf_ptr1
, struct lpfc_dmabuf
,
3315 INIT_LIST_HEAD(&buf_ptr1
->list
);
3316 list_add_tail(&buf_ptr1
->list
,
3320 INIT_LIST_HEAD(&buf_ptr
->list
);
3321 list_add_tail(&buf_ptr
->list
, &phba
->elsbuf
);
3323 spin_unlock_irq(&phba
->hbalock
);
3326 buf_ptr1
= (struct lpfc_dmabuf
*) elsiocb
->context2
;
3327 lpfc_els_free_data(phba
, buf_ptr1
);
3331 if (elsiocb
->context3
) {
3332 buf_ptr
= (struct lpfc_dmabuf
*) elsiocb
->context3
;
3333 lpfc_els_free_bpl(phba
, buf_ptr
);
3335 lpfc_sli_release_iocbq(phba
, elsiocb
);
3340 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3341 * @phba: pointer to lpfc hba data structure.
3342 * @cmdiocb: pointer to lpfc command iocb data structure.
3343 * @rspiocb: pointer to lpfc response iocb data structure.
3345 * This routine is the completion callback function to the Logout (LOGO)
3346 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3347 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3348 * release the ndlp if it has the last reference remaining (reference count
3349 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3350 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3351 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3352 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3353 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3354 * IOCB data structure.
3357 lpfc_cmpl_els_logo_acc(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3358 struct lpfc_iocbq
*rspiocb
)
3360 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
3361 struct lpfc_vport
*vport
= cmdiocb
->vport
;
3364 irsp
= &rspiocb
->iocb
;
3365 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3366 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3367 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4], ndlp
->nlp_DID
);
3368 /* ACC to LOGO completes to NPort <nlp_DID> */
3369 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3370 "0109 ACC to LOGO completes to NPort x%x "
3371 "Data: x%x x%x x%x\n",
3372 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3375 if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
) {
3376 /* NPort Recovery mode or node is just allocated */
3377 if (!lpfc_nlp_not_used(ndlp
)) {
3378 /* If the ndlp is being used by another discovery
3379 * thread, just unregister the RPI.
3381 lpfc_unreg_rpi(vport
, ndlp
);
3383 /* Indicate the node has already released, should
3384 * not reference to it from within lpfc_els_free_iocb.
3386 cmdiocb
->context1
= NULL
;
3389 lpfc_els_free_iocb(phba
, cmdiocb
);
3394 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3395 * @phba: pointer to lpfc hba data structure.
3396 * @pmb: pointer to the driver internal queue element for mailbox command.
3398 * This routine is the completion callback function for unregister default
3399 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3400 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3401 * decrements the ndlp reference count held for this completion callback
3402 * function. After that, it invokes the lpfc_nlp_not_used() to check
3403 * whether there is only one reference left on the ndlp. If so, it will
3404 * perform one more decrement and trigger the release of the ndlp.
3407 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3409 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3410 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3412 pmb
->context1
= NULL
;
3413 pmb
->context2
= NULL
;
3415 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3417 mempool_free(pmb
, phba
->mbox_mem_pool
);
3418 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)) {
3420 /* This is the end of the default RPI cleanup logic for this
3421 * ndlp. If no other discovery threads are using this ndlp.
3422 * we should free all resources associated with it.
3424 lpfc_nlp_not_used(ndlp
);
3431 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3432 * @phba: pointer to lpfc hba data structure.
3433 * @cmdiocb: pointer to lpfc command iocb data structure.
3434 * @rspiocb: pointer to lpfc response iocb data structure.
3436 * This routine is the completion callback function for ELS Response IOCB
3437 * command. In normal case, this callback function just properly sets the
3438 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3439 * field in the command IOCB is not NULL, the referred mailbox command will
3440 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3441 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3442 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3443 * routine shall be invoked trying to release the ndlp if no other threads
3444 * are currently referring it.
3447 lpfc_cmpl_els_rsp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3448 struct lpfc_iocbq
*rspiocb
)
3450 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
3451 struct lpfc_vport
*vport
= ndlp
? ndlp
->vport
: NULL
;
3452 struct Scsi_Host
*shost
= vport
? lpfc_shost_from_vport(vport
) : NULL
;
3455 LPFC_MBOXQ_t
*mbox
= NULL
;
3456 struct lpfc_dmabuf
*mp
= NULL
;
3457 uint32_t ls_rjt
= 0;
3459 irsp
= &rspiocb
->iocb
;
3461 if (cmdiocb
->context_un
.mbox
)
3462 mbox
= cmdiocb
->context_un
.mbox
;
3464 /* First determine if this is a LS_RJT cmpl. Note, this callback
3465 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3467 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) cmdiocb
->context2
)->virt
);
3468 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
3469 (*((uint32_t *) (pcmd
)) == ELS_CMD_LS_RJT
)) {
3470 /* A LS_RJT associated with Default RPI cleanup has its own
3471 * separate code path.
3473 if (!(ndlp
->nlp_flag
& NLP_RM_DFLT_RPI
))
3477 /* Check to see if link went down during discovery */
3478 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) || lpfc_els_chk_latt(vport
)) {
3480 mp
= (struct lpfc_dmabuf
*) mbox
->context1
;
3482 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3485 mempool_free(mbox
, phba
->mbox_mem_pool
);
3487 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
3488 (ndlp
->nlp_flag
& NLP_RM_DFLT_RPI
))
3489 if (lpfc_nlp_not_used(ndlp
)) {
3491 /* Indicate the node has already released,
3492 * should not reference to it from within
3493 * the routine lpfc_els_free_iocb.
3495 cmdiocb
->context1
= NULL
;
3500 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3501 "ELS rsp cmpl: status:x%x/x%x did:x%x",
3502 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
3503 cmdiocb
->iocb
.un
.elsreq64
.remoteID
);
3504 /* ELS response tag <ulpIoTag> completes */
3505 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3506 "0110 ELS response tag x%x completes "
3507 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3508 cmdiocb
->iocb
.ulpIoTag
, rspiocb
->iocb
.ulpStatus
,
3509 rspiocb
->iocb
.un
.ulpWord
[4], rspiocb
->iocb
.ulpTimeout
,
3510 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3513 if ((rspiocb
->iocb
.ulpStatus
== 0)
3514 && (ndlp
->nlp_flag
& NLP_ACC_REGLOGIN
)) {
3515 lpfc_unreg_rpi(vport
, ndlp
);
3516 /* Increment reference count to ndlp to hold the
3517 * reference to ndlp for the callback function.
3519 mbox
->context2
= lpfc_nlp_get(ndlp
);
3520 mbox
->vport
= vport
;
3521 if (ndlp
->nlp_flag
& NLP_RM_DFLT_RPI
) {
3522 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
3523 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
3526 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_login
;
3527 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3528 lpfc_nlp_set_state(vport
, ndlp
,
3529 NLP_STE_REG_LOGIN_ISSUE
);
3531 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
3532 != MBX_NOT_FINISHED
)
3535 /* Decrement the ndlp reference count we
3536 * set for this failed mailbox command.
3540 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3541 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3542 "0138 ELS rsp: Cannot issue reg_login for x%x "
3543 "Data: x%x x%x x%x\n",
3544 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3547 if (lpfc_nlp_not_used(ndlp
)) {
3549 /* Indicate node has already been released,
3550 * should not reference to it from within
3551 * the routine lpfc_els_free_iocb.
3553 cmdiocb
->context1
= NULL
;
3556 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3557 if (!lpfc_error_lost_link(irsp
) &&
3558 ndlp
->nlp_flag
& NLP_ACC_REGLOGIN
) {
3559 if (lpfc_nlp_not_used(ndlp
)) {
3561 /* Indicate node has already been
3562 * released, should not reference
3563 * to it from within the routine
3564 * lpfc_els_free_iocb.
3566 cmdiocb
->context1
= NULL
;
3570 mp
= (struct lpfc_dmabuf
*) mbox
->context1
;
3572 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3575 mempool_free(mbox
, phba
->mbox_mem_pool
);
3578 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)) {
3579 spin_lock_irq(shost
->host_lock
);
3580 ndlp
->nlp_flag
&= ~(NLP_ACC_REGLOGIN
| NLP_RM_DFLT_RPI
);
3581 spin_unlock_irq(shost
->host_lock
);
3583 /* If the node is not being used by another discovery thread,
3584 * and we are sending a reject, we are done with it.
3585 * Release driver reference count here and free associated
3589 if (lpfc_nlp_not_used(ndlp
))
3590 /* Indicate node has already been released,
3591 * should not reference to it from within
3592 * the routine lpfc_els_free_iocb.
3594 cmdiocb
->context1
= NULL
;
3597 lpfc_els_free_iocb(phba
, cmdiocb
);
3602 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
3603 * @vport: pointer to a host virtual N_Port data structure.
3604 * @flag: the els command code to be accepted.
3605 * @oldiocb: pointer to the original lpfc command iocb data structure.
3606 * @ndlp: pointer to a node-list data structure.
3607 * @mbox: pointer to the driver internal queue element for mailbox command.
3609 * This routine prepares and issues an Accept (ACC) response IOCB
3610 * command. It uses the @flag to properly set up the IOCB field for the
3611 * specific ACC response command to be issued and invokes the
3612 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3613 * @mbox pointer is passed in, it will be put into the context_un.mbox
3614 * field of the IOCB for the completion callback function to issue the
3615 * mailbox command to the HBA later when callback is invoked.
3617 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3618 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3619 * will be stored into the context1 field of the IOCB for the completion
3620 * callback function to the corresponding response ELS IOCB command.
3623 * 0 - Successfully issued acc response
3624 * 1 - Failed to issue acc response
3627 lpfc_els_rsp_acc(struct lpfc_vport
*vport
, uint32_t flag
,
3628 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
,
3631 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3632 struct lpfc_hba
*phba
= vport
->phba
;
3635 struct lpfc_iocbq
*elsiocb
;
3636 struct lpfc_sli
*psli
;
3640 ELS_PKT
*els_pkt_ptr
;
3643 oldcmd
= &oldiocb
->iocb
;
3647 cmdsize
= sizeof(uint32_t);
3648 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
,
3649 ndlp
, ndlp
->nlp_DID
, ELS_CMD_ACC
);
3651 spin_lock_irq(shost
->host_lock
);
3652 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
3653 spin_unlock_irq(shost
->host_lock
);
3657 icmd
= &elsiocb
->iocb
;
3658 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
3659 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
3660 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
3661 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
3662 pcmd
+= sizeof(uint32_t);
3664 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3665 "Issue ACC: did:x%x flg:x%x",
3666 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
3669 cmdsize
= (sizeof(struct serv_parm
) + sizeof(uint32_t));
3670 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
,
3671 ndlp
, ndlp
->nlp_DID
, ELS_CMD_ACC
);
3675 icmd
= &elsiocb
->iocb
;
3676 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
3677 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
3678 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
3681 elsiocb
->context_un
.mbox
= mbox
;
3683 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
3684 pcmd
+= sizeof(uint32_t);
3685 memcpy(pcmd
, &vport
->fc_sparam
, sizeof(struct serv_parm
));
3687 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3688 "Issue ACC PLOGI: did:x%x flg:x%x",
3689 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
3692 cmdsize
= sizeof(uint32_t) + sizeof(PRLO
);
3693 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
,
3694 ndlp
, ndlp
->nlp_DID
, ELS_CMD_PRLO
);
3698 icmd
= &elsiocb
->iocb
;
3699 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
3700 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
3701 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
3703 memcpy(pcmd
, ((struct lpfc_dmabuf
*) oldiocb
->context2
)->virt
,
3704 sizeof(uint32_t) + sizeof(PRLO
));
3705 *((uint32_t *) (pcmd
)) = ELS_CMD_PRLO_ACC
;
3706 els_pkt_ptr
= (ELS_PKT
*) pcmd
;
3707 els_pkt_ptr
->un
.prlo
.acceptRspCode
= PRLO_REQ_EXECUTED
;
3709 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3710 "Issue ACC PRLO: did:x%x flg:x%x",
3711 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
3716 /* Xmit ELS ACC response tag <ulpIoTag> */
3717 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3718 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3719 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3720 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
3721 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3723 if (ndlp
->nlp_flag
& NLP_LOGO_ACC
) {
3724 spin_lock_irq(shost
->host_lock
);
3725 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
3726 spin_unlock_irq(shost
->host_lock
);
3727 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_logo_acc
;
3729 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
3732 phba
->fc_stat
.elsXmitACC
++;
3733 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
3734 if (rc
== IOCB_ERROR
) {
3735 lpfc_els_free_iocb(phba
, elsiocb
);
3742 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
3743 * @vport: pointer to a virtual N_Port data structure.
3745 * @oldiocb: pointer to the original lpfc command iocb data structure.
3746 * @ndlp: pointer to a node-list data structure.
3747 * @mbox: pointer to the driver internal queue element for mailbox command.
3749 * This routine prepares and issue an Reject (RJT) response IOCB
3750 * command. If a @mbox pointer is passed in, it will be put into the
3751 * context_un.mbox field of the IOCB for the completion callback function
3752 * to issue to the HBA later.
3754 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3755 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3756 * will be stored into the context1 field of the IOCB for the completion
3757 * callback function to the reject response ELS IOCB command.
3760 * 0 - Successfully issued reject response
3761 * 1 - Failed to issue reject response
3764 lpfc_els_rsp_reject(struct lpfc_vport
*vport
, uint32_t rejectError
,
3765 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
,
3768 struct lpfc_hba
*phba
= vport
->phba
;
3771 struct lpfc_iocbq
*elsiocb
;
3772 struct lpfc_sli
*psli
;
3778 cmdsize
= 2 * sizeof(uint32_t);
3779 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
3780 ndlp
->nlp_DID
, ELS_CMD_LS_RJT
);
3784 icmd
= &elsiocb
->iocb
;
3785 oldcmd
= &oldiocb
->iocb
;
3786 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
3787 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
3788 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
3790 *((uint32_t *) (pcmd
)) = ELS_CMD_LS_RJT
;
3791 pcmd
+= sizeof(uint32_t);
3792 *((uint32_t *) (pcmd
)) = rejectError
;
3795 elsiocb
->context_un
.mbox
= mbox
;
3797 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
3798 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3799 "0129 Xmit ELS RJT x%x response tag x%x "
3800 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3802 rejectError
, elsiocb
->iotag
,
3803 elsiocb
->iocb
.ulpContext
, ndlp
->nlp_DID
,
3804 ndlp
->nlp_flag
, ndlp
->nlp_state
, ndlp
->nlp_rpi
);
3805 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3806 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
3807 ndlp
->nlp_DID
, ndlp
->nlp_flag
, rejectError
);
3809 phba
->fc_stat
.elsXmitLSRJT
++;
3810 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
3811 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
3813 if (rc
== IOCB_ERROR
) {
3814 lpfc_els_free_iocb(phba
, elsiocb
);
3821 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
3822 * @vport: pointer to a virtual N_Port data structure.
3823 * @oldiocb: pointer to the original lpfc command iocb data structure.
3824 * @ndlp: pointer to a node-list data structure.
3826 * This routine prepares and issues an Accept (ACC) response to Address
3827 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3828 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3830 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3831 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3832 * will be stored into the context1 field of the IOCB for the completion
3833 * callback function to the ADISC Accept response ELS IOCB command.
3836 * 0 - Successfully issued acc adisc response
3837 * 1 - Failed to issue adisc acc response
3840 lpfc_els_rsp_adisc_acc(struct lpfc_vport
*vport
, struct lpfc_iocbq
*oldiocb
,
3841 struct lpfc_nodelist
*ndlp
)
3843 struct lpfc_hba
*phba
= vport
->phba
;
3845 IOCB_t
*icmd
, *oldcmd
;
3846 struct lpfc_iocbq
*elsiocb
;
3851 cmdsize
= sizeof(uint32_t) + sizeof(ADISC
);
3852 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
3853 ndlp
->nlp_DID
, ELS_CMD_ACC
);
3857 icmd
= &elsiocb
->iocb
;
3858 oldcmd
= &oldiocb
->iocb
;
3859 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
3860 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
3862 /* Xmit ADISC ACC response tag <ulpIoTag> */
3863 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3864 "0130 Xmit ADISC ACC response iotag x%x xri: "
3865 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3866 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
3867 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3869 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
3871 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
3872 pcmd
+= sizeof(uint32_t);
3874 ap
= (ADISC
*) (pcmd
);
3875 ap
->hardAL_PA
= phba
->fc_pref_ALPA
;
3876 memcpy(&ap
->portName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
3877 memcpy(&ap
->nodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
3878 ap
->DID
= be32_to_cpu(vport
->fc_myDID
);
3880 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3881 "Issue ACC ADISC: did:x%x flg:x%x",
3882 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
3884 phba
->fc_stat
.elsXmitACC
++;
3885 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
3886 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
3887 if (rc
== IOCB_ERROR
) {
3888 lpfc_els_free_iocb(phba
, elsiocb
);
3895 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
3896 * @vport: pointer to a virtual N_Port data structure.
3897 * @oldiocb: pointer to the original lpfc command iocb data structure.
3898 * @ndlp: pointer to a node-list data structure.
3900 * This routine prepares and issues an Accept (ACC) response to Process
3901 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3902 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3904 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3905 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3906 * will be stored into the context1 field of the IOCB for the completion
3907 * callback function to the PRLI Accept response ELS IOCB command.
3910 * 0 - Successfully issued acc prli response
3911 * 1 - Failed to issue acc prli response
3914 lpfc_els_rsp_prli_acc(struct lpfc_vport
*vport
, struct lpfc_iocbq
*oldiocb
,
3915 struct lpfc_nodelist
*ndlp
)
3917 struct lpfc_hba
*phba
= vport
->phba
;
3922 struct lpfc_iocbq
*elsiocb
;
3923 struct lpfc_sli
*psli
;
3930 cmdsize
= sizeof(uint32_t) + sizeof(PRLI
);
3931 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
3932 ndlp
->nlp_DID
, (ELS_CMD_ACC
| (ELS_CMD_PRLI
& ~ELS_RSP_MASK
)));
3936 icmd
= &elsiocb
->iocb
;
3937 oldcmd
= &oldiocb
->iocb
;
3938 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
3939 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
3941 /* Xmit PRLI ACC response tag <ulpIoTag> */
3942 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3943 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3944 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3945 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
3946 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3948 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
3950 *((uint32_t *) (pcmd
)) = (ELS_CMD_ACC
| (ELS_CMD_PRLI
& ~ELS_RSP_MASK
));
3951 pcmd
+= sizeof(uint32_t);
3953 /* For PRLI, remainder of payload is PRLI parameter page */
3954 memset(pcmd
, 0, sizeof(PRLI
));
3956 npr
= (PRLI
*) pcmd
;
3959 * If the remote port is a target and our firmware version is 3.20 or
3960 * later, set the following bits for FC-TAPE support.
3962 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
3963 (vpd
->rev
.feaLevelHigh
>= 0x02)) {
3964 npr
->ConfmComplAllowed
= 1;
3966 npr
->TaskRetryIdReq
= 1;
3969 npr
->acceptRspCode
= PRLI_REQ_EXECUTED
;
3970 npr
->estabImagePair
= 1;
3971 npr
->readXferRdyDis
= 1;
3972 npr
->ConfmComplAllowed
= 1;
3974 npr
->prliType
= PRLI_FCP_TYPE
;
3975 npr
->initiatorFunc
= 1;
3977 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3978 "Issue ACC PRLI: did:x%x flg:x%x",
3979 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
3981 phba
->fc_stat
.elsXmitACC
++;
3982 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
3984 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
3985 if (rc
== IOCB_ERROR
) {
3986 lpfc_els_free_iocb(phba
, elsiocb
);
3993 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
3994 * @vport: pointer to a virtual N_Port data structure.
3995 * @format: rnid command format.
3996 * @oldiocb: pointer to the original lpfc command iocb data structure.
3997 * @ndlp: pointer to a node-list data structure.
3999 * This routine issues a Request Node Identification Data (RNID) Accept
4000 * (ACC) response. It constructs the RNID ACC response command according to
4001 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4002 * issue the response. Note that this command does not need to hold the ndlp
4003 * reference count for the callback. So, the ndlp reference count taken by
4004 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4005 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4006 * there is no ndlp reference available.
4008 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4009 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4010 * will be stored into the context1 field of the IOCB for the completion
4011 * callback function. However, for the RNID Accept Response ELS command,
4012 * this is undone later by this routine after the IOCB is allocated.
4015 * 0 - Successfully issued acc rnid response
4016 * 1 - Failed to issue acc rnid response
4019 lpfc_els_rsp_rnid_acc(struct lpfc_vport
*vport
, uint8_t format
,
4020 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
)
4022 struct lpfc_hba
*phba
= vport
->phba
;
4024 IOCB_t
*icmd
, *oldcmd
;
4025 struct lpfc_iocbq
*elsiocb
;
4026 struct lpfc_sli
*psli
;
4032 cmdsize
= sizeof(uint32_t) + sizeof(uint32_t)
4033 + (2 * sizeof(struct lpfc_name
));
4035 cmdsize
+= sizeof(RNID_TOP_DISC
);
4037 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
4038 ndlp
->nlp_DID
, ELS_CMD_ACC
);
4042 icmd
= &elsiocb
->iocb
;
4043 oldcmd
= &oldiocb
->iocb
;
4044 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4045 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4047 /* Xmit RNID ACC response tag <ulpIoTag> */
4048 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4049 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4050 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
);
4051 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4052 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
4053 pcmd
+= sizeof(uint32_t);
4055 memset(pcmd
, 0, sizeof(RNID
));
4056 rn
= (RNID
*) (pcmd
);
4057 rn
->Format
= format
;
4058 rn
->CommonLen
= (2 * sizeof(struct lpfc_name
));
4059 memcpy(&rn
->portName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
4060 memcpy(&rn
->nodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
4063 rn
->SpecificLen
= 0;
4065 case RNID_TOPOLOGY_DISC
:
4066 rn
->SpecificLen
= sizeof(RNID_TOP_DISC
);
4067 memcpy(&rn
->un
.topologyDisc
.portName
,
4068 &vport
->fc_portname
, sizeof(struct lpfc_name
));
4069 rn
->un
.topologyDisc
.unitType
= RNID_HBA
;
4070 rn
->un
.topologyDisc
.physPort
= 0;
4071 rn
->un
.topologyDisc
.attachedNodes
= 0;
4075 rn
->SpecificLen
= 0;
4079 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4080 "Issue ACC RNID: did:x%x flg:x%x",
4081 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4083 phba
->fc_stat
.elsXmitACC
++;
4084 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4086 elsiocb
->context1
= NULL
; /* Don't need ndlp for cmpl,
4087 * it could be freed */
4089 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4090 if (rc
== IOCB_ERROR
) {
4091 lpfc_els_free_iocb(phba
, elsiocb
);
4098 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4099 * @vport: pointer to a virtual N_Port data structure.
4100 * @iocb: pointer to the lpfc command iocb data structure.
4101 * @ndlp: pointer to a node-list data structure.
4106 lpfc_els_clear_rrq(struct lpfc_vport
*vport
,
4107 struct lpfc_iocbq
*iocb
, struct lpfc_nodelist
*ndlp
)
4109 struct lpfc_hba
*phba
= vport
->phba
;
4114 struct lpfc_node_rrq
*prrq
;
4117 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) iocb
->context2
)->virt
);
4118 pcmd
+= sizeof(uint32_t);
4119 rrq
= (struct RRQ
*)pcmd
;
4120 rrq
->rrq_exchg
= be32_to_cpu(rrq
->rrq_exchg
);
4121 rxid
= bf_get(rrq_rxid
, rrq
);
4123 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4124 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4126 be32_to_cpu(bf_get(rrq_did
, rrq
)),
4127 bf_get(rrq_oxid
, rrq
),
4129 iocb
->iotag
, iocb
->iocb
.ulpContext
);
4131 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4132 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4133 ndlp
->nlp_DID
, ndlp
->nlp_flag
, rrq
->rrq_exchg
);
4134 if (vport
->fc_myDID
== be32_to_cpu(bf_get(rrq_did
, rrq
)))
4135 xri
= bf_get(rrq_oxid
, rrq
);
4138 prrq
= lpfc_get_active_rrq(vport
, xri
, ndlp
->nlp_DID
);
4140 lpfc_clr_rrq_active(phba
, xri
, prrq
);
4145 * lpfc_els_rsp_echo_acc - Issue echo acc response
4146 * @vport: pointer to a virtual N_Port data structure.
4147 * @data: pointer to echo data to return in the accept.
4148 * @oldiocb: pointer to the original lpfc command iocb data structure.
4149 * @ndlp: pointer to a node-list data structure.
4152 * 0 - Successfully issued acc echo response
4153 * 1 - Failed to issue acc echo response
4156 lpfc_els_rsp_echo_acc(struct lpfc_vport
*vport
, uint8_t *data
,
4157 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
)
4159 struct lpfc_hba
*phba
= vport
->phba
;
4160 struct lpfc_iocbq
*elsiocb
;
4161 struct lpfc_sli
*psli
;
4167 cmdsize
= oldiocb
->iocb
.unsli3
.rcvsli3
.acc_len
;
4169 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
4170 ndlp
->nlp_DID
, ELS_CMD_ACC
);
4174 elsiocb
->iocb
.ulpContext
= oldiocb
->iocb
.ulpContext
; /* Xri / rx_id */
4175 elsiocb
->iocb
.unsli3
.rcvsli3
.ox_id
= oldiocb
->iocb
.unsli3
.rcvsli3
.ox_id
;
4177 /* Xmit ECHO ACC response tag <ulpIoTag> */
4178 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4179 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4180 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
);
4181 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4182 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
4183 pcmd
+= sizeof(uint32_t);
4184 memcpy(pcmd
, data
, cmdsize
- sizeof(uint32_t));
4186 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4187 "Issue ACC ECHO: did:x%x flg:x%x",
4188 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4190 phba
->fc_stat
.elsXmitACC
++;
4191 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4193 elsiocb
->context1
= NULL
; /* Don't need ndlp for cmpl,
4194 * it could be freed */
4196 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4197 if (rc
== IOCB_ERROR
) {
4198 lpfc_els_free_iocb(phba
, elsiocb
);
4205 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
4206 * @vport: pointer to a host virtual N_Port data structure.
4208 * This routine issues Address Discover (ADISC) ELS commands to those
4209 * N_Ports which are in node port recovery state and ADISC has not been issued
4210 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4211 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4212 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4213 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4214 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4215 * IOCBs quit for later pick up. On the other hand, after walking through
4216 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4217 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4218 * no more ADISC need to be sent.
4221 * The number of N_Ports with adisc issued.
4224 lpfc_els_disc_adisc(struct lpfc_vport
*vport
)
4226 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4227 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4230 /* go thru NPR nodes and issue any remaining ELS ADISCs */
4231 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4232 if (!NLP_CHK_NODE_ACT(ndlp
))
4234 if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
&&
4235 (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) != 0 &&
4236 (ndlp
->nlp_flag
& NLP_NPR_ADISC
) != 0) {
4237 spin_lock_irq(shost
->host_lock
);
4238 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4239 spin_unlock_irq(shost
->host_lock
);
4240 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
4241 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
4242 lpfc_issue_els_adisc(vport
, ndlp
, 0);
4244 vport
->num_disc_nodes
++;
4245 if (vport
->num_disc_nodes
>=
4246 vport
->cfg_discovery_threads
) {
4247 spin_lock_irq(shost
->host_lock
);
4248 vport
->fc_flag
|= FC_NLP_MORE
;
4249 spin_unlock_irq(shost
->host_lock
);
4254 if (sentadisc
== 0) {
4255 spin_lock_irq(shost
->host_lock
);
4256 vport
->fc_flag
&= ~FC_NLP_MORE
;
4257 spin_unlock_irq(shost
->host_lock
);
4263 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
4264 * @vport: pointer to a host virtual N_Port data structure.
4266 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4267 * which are in node port recovery state, with a @vport. Each time an ELS
4268 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4269 * the per @vport number of discover count (num_disc_nodes) shall be
4270 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4271 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4272 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4273 * later pick up. On the other hand, after walking through all the ndlps with
4274 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4275 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4276 * PLOGI need to be sent.
4279 * The number of N_Ports with plogi issued.
4282 lpfc_els_disc_plogi(struct lpfc_vport
*vport
)
4284 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4285 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4288 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4289 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4290 if (!NLP_CHK_NODE_ACT(ndlp
))
4292 if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
&&
4293 (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) != 0 &&
4294 (ndlp
->nlp_flag
& NLP_DELAY_TMO
) == 0 &&
4295 (ndlp
->nlp_flag
& NLP_NPR_ADISC
) == 0) {
4296 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
4297 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
4298 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
4300 vport
->num_disc_nodes
++;
4301 if (vport
->num_disc_nodes
>=
4302 vport
->cfg_discovery_threads
) {
4303 spin_lock_irq(shost
->host_lock
);
4304 vport
->fc_flag
|= FC_NLP_MORE
;
4305 spin_unlock_irq(shost
->host_lock
);
4311 lpfc_set_disctmo(vport
);
4314 spin_lock_irq(shost
->host_lock
);
4315 vport
->fc_flag
&= ~FC_NLP_MORE
;
4316 spin_unlock_irq(shost
->host_lock
);
4322 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
4323 * @vport: pointer to a host virtual N_Port data structure.
4325 * This routine cleans up any Registration State Change Notification
4326 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
4327 * @vport together with the host_lock is used to prevent multiple thread
4328 * trying to access the RSCN array on a same @vport at the same time.
4331 lpfc_els_flush_rscn(struct lpfc_vport
*vport
)
4333 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4334 struct lpfc_hba
*phba
= vport
->phba
;
4337 spin_lock_irq(shost
->host_lock
);
4338 if (vport
->fc_rscn_flush
) {
4339 /* Another thread is walking fc_rscn_id_list on this vport */
4340 spin_unlock_irq(shost
->host_lock
);
4343 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
4344 vport
->fc_rscn_flush
= 1;
4345 spin_unlock_irq(shost
->host_lock
);
4347 for (i
= 0; i
< vport
->fc_rscn_id_cnt
; i
++) {
4348 lpfc_in_buf_free(phba
, vport
->fc_rscn_id_list
[i
]);
4349 vport
->fc_rscn_id_list
[i
] = NULL
;
4351 spin_lock_irq(shost
->host_lock
);
4352 vport
->fc_rscn_id_cnt
= 0;
4353 vport
->fc_flag
&= ~(FC_RSCN_MODE
| FC_RSCN_DISCOVERY
);
4354 spin_unlock_irq(shost
->host_lock
);
4355 lpfc_can_disctmo(vport
);
4356 /* Indicate we are done walking this fc_rscn_id_list */
4357 vport
->fc_rscn_flush
= 0;
4361 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
4362 * @vport: pointer to a host virtual N_Port data structure.
4363 * @did: remote destination port identifier.
4365 * This routine checks whether there is any pending Registration State
4366 * Configuration Notification (RSCN) to a @did on @vport.
4369 * None zero - The @did matched with a pending rscn
4370 * 0 - not able to match @did with a pending rscn
4373 lpfc_rscn_payload_check(struct lpfc_vport
*vport
, uint32_t did
)
4378 uint32_t payload_len
, i
;
4379 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4381 ns_did
.un
.word
= did
;
4383 /* Never match fabric nodes for RSCNs */
4384 if ((did
& Fabric_DID_MASK
) == Fabric_DID_MASK
)
4387 /* If we are doing a FULL RSCN rediscovery, match everything */
4388 if (vport
->fc_flag
& FC_RSCN_DISCOVERY
)
4391 spin_lock_irq(shost
->host_lock
);
4392 if (vport
->fc_rscn_flush
) {
4393 /* Another thread is walking fc_rscn_id_list on this vport */
4394 spin_unlock_irq(shost
->host_lock
);
4397 /* Indicate we are walking fc_rscn_id_list on this vport */
4398 vport
->fc_rscn_flush
= 1;
4399 spin_unlock_irq(shost
->host_lock
);
4400 for (i
= 0; i
< vport
->fc_rscn_id_cnt
; i
++) {
4401 lp
= vport
->fc_rscn_id_list
[i
]->virt
;
4402 payload_len
= be32_to_cpu(*lp
++ & ~ELS_CMD_MASK
);
4403 payload_len
-= sizeof(uint32_t); /* take off word 0 */
4404 while (payload_len
) {
4405 rscn_did
.un
.word
= be32_to_cpu(*lp
++);
4406 payload_len
-= sizeof(uint32_t);
4407 switch (rscn_did
.un
.b
.resv
& RSCN_ADDRESS_FORMAT_MASK
) {
4408 case RSCN_ADDRESS_FORMAT_PORT
:
4409 if ((ns_did
.un
.b
.domain
== rscn_did
.un
.b
.domain
)
4410 && (ns_did
.un
.b
.area
== rscn_did
.un
.b
.area
)
4411 && (ns_did
.un
.b
.id
== rscn_did
.un
.b
.id
))
4412 goto return_did_out
;
4414 case RSCN_ADDRESS_FORMAT_AREA
:
4415 if ((ns_did
.un
.b
.domain
== rscn_did
.un
.b
.domain
)
4416 && (ns_did
.un
.b
.area
== rscn_did
.un
.b
.area
))
4417 goto return_did_out
;
4419 case RSCN_ADDRESS_FORMAT_DOMAIN
:
4420 if (ns_did
.un
.b
.domain
== rscn_did
.un
.b
.domain
)
4421 goto return_did_out
;
4423 case RSCN_ADDRESS_FORMAT_FABRIC
:
4424 goto return_did_out
;
4428 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4429 vport
->fc_rscn_flush
= 0;
4432 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4433 vport
->fc_rscn_flush
= 0;
4438 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
4439 * @vport: pointer to a host virtual N_Port data structure.
4441 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4442 * state machine for a @vport's nodes that are with pending RSCN (Registration
4443 * State Change Notification).
4446 * 0 - Successful (currently alway return 0)
4449 lpfc_rscn_recovery_check(struct lpfc_vport
*vport
)
4451 struct lpfc_nodelist
*ndlp
= NULL
;
4453 /* Move all affected nodes by pending RSCNs to NPR state. */
4454 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4455 if (!NLP_CHK_NODE_ACT(ndlp
) ||
4456 (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) ||
4457 !lpfc_rscn_payload_check(vport
, ndlp
->nlp_DID
))
4459 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
4460 NLP_EVT_DEVICE_RECOVERY
);
4461 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4467 * lpfc_send_rscn_event - Send an RSCN event to management application
4468 * @vport: pointer to a host virtual N_Port data structure.
4469 * @cmdiocb: pointer to lpfc command iocb data structure.
4471 * lpfc_send_rscn_event sends an RSCN netlink event to management
4475 lpfc_send_rscn_event(struct lpfc_vport
*vport
,
4476 struct lpfc_iocbq
*cmdiocb
)
4478 struct lpfc_dmabuf
*pcmd
;
4479 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4480 uint32_t *payload_ptr
;
4481 uint32_t payload_len
;
4482 struct lpfc_rscn_event_header
*rscn_event_data
;
4484 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
4485 payload_ptr
= (uint32_t *) pcmd
->virt
;
4486 payload_len
= be32_to_cpu(*payload_ptr
& ~ELS_CMD_MASK
);
4488 rscn_event_data
= kmalloc(sizeof(struct lpfc_rscn_event_header
) +
4489 payload_len
, GFP_KERNEL
);
4490 if (!rscn_event_data
) {
4491 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
4492 "0147 Failed to allocate memory for RSCN event\n");
4495 rscn_event_data
->event_type
= FC_REG_RSCN_EVENT
;
4496 rscn_event_data
->payload_length
= payload_len
;
4497 memcpy(rscn_event_data
->rscn_payload
, payload_ptr
,
4500 fc_host_post_vendor_event(shost
,
4501 fc_get_event_number(),
4502 sizeof(struct lpfc_els_event_header
) + payload_len
,
4503 (char *)rscn_event_data
,
4506 kfree(rscn_event_data
);
4510 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
4511 * @vport: pointer to a host virtual N_Port data structure.
4512 * @cmdiocb: pointer to lpfc command iocb data structure.
4513 * @ndlp: pointer to a node-list data structure.
4515 * This routine processes an unsolicited RSCN (Registration State Change
4516 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4517 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4518 * discover state machine is about to begin discovery, it just accepts the
4519 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4520 * contains N_Port IDs for other vports on this HBA, it just accepts the
4521 * RSCN and ignore processing it. If the state machine is in the recovery
4522 * state, the fc_rscn_id_list of this @vport is walked and the
4523 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4524 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4525 * routine is invoked to handle the RSCN event.
4528 * 0 - Just sent the acc response
4529 * 1 - Sent the acc response and waited for name server completion
4532 lpfc_els_rcv_rscn(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
4533 struct lpfc_nodelist
*ndlp
)
4535 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4536 struct lpfc_hba
*phba
= vport
->phba
;
4537 struct lpfc_dmabuf
*pcmd
;
4538 uint32_t *lp
, *datap
;
4540 uint32_t payload_len
, length
, nportid
, *cmd
;
4542 int rscn_id
= 0, hba_id
= 0;
4545 icmd
= &cmdiocb
->iocb
;
4546 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
4547 lp
= (uint32_t *) pcmd
->virt
;
4549 payload_len
= be32_to_cpu(*lp
++ & ~ELS_CMD_MASK
);
4550 payload_len
-= sizeof(uint32_t); /* take off word 0 */
4552 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4553 "0214 RSCN received Data: x%x x%x x%x x%x\n",
4554 vport
->fc_flag
, payload_len
, *lp
,
4555 vport
->fc_rscn_id_cnt
);
4557 /* Send an RSCN event to the management application */
4558 lpfc_send_rscn_event(vport
, cmdiocb
);
4560 for (i
= 0; i
< payload_len
/sizeof(uint32_t); i
++)
4561 fc_host_post_event(shost
, fc_get_event_number(),
4562 FCH_EVT_RSCN
, lp
[i
]);
4564 /* If we are about to begin discovery, just ACC the RSCN.
4565 * Discovery processing will satisfy it.
4567 if (vport
->port_state
<= LPFC_NS_QRY
) {
4568 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
4569 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4570 ndlp
->nlp_DID
, vport
->port_state
, ndlp
->nlp_flag
);
4572 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
4576 /* If this RSCN just contains NPortIDs for other vports on this HBA,
4577 * just ACC and ignore it.
4579 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4580 !(vport
->cfg_peer_port_login
)) {
4585 nportid
= ((be32_to_cpu(nportid
)) & Mask_DID
);
4586 i
-= sizeof(uint32_t);
4588 if (lpfc_find_vport_by_did(phba
, nportid
))
4591 if (rscn_id
== hba_id
) {
4592 /* ALL NPortIDs in RSCN are on HBA */
4593 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4595 "Data: x%x x%x x%x x%x\n",
4596 vport
->fc_flag
, payload_len
,
4597 *lp
, vport
->fc_rscn_id_cnt
);
4598 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
4599 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4600 ndlp
->nlp_DID
, vport
->port_state
,
4603 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
,
4609 spin_lock_irq(shost
->host_lock
);
4610 if (vport
->fc_rscn_flush
) {
4611 /* Another thread is walking fc_rscn_id_list on this vport */
4612 vport
->fc_flag
|= FC_RSCN_DISCOVERY
;
4613 spin_unlock_irq(shost
->host_lock
);
4615 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
4618 /* Indicate we are walking fc_rscn_id_list on this vport */
4619 vport
->fc_rscn_flush
= 1;
4620 spin_unlock_irq(shost
->host_lock
);
4621 /* Get the array count after successfully have the token */
4622 rscn_cnt
= vport
->fc_rscn_id_cnt
;
4623 /* If we are already processing an RSCN, save the received
4624 * RSCN payload buffer, cmdiocb->context2 to process later.
4626 if (vport
->fc_flag
& (FC_RSCN_MODE
| FC_NDISC_ACTIVE
)) {
4627 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
4628 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4629 ndlp
->nlp_DID
, vport
->port_state
, ndlp
->nlp_flag
);
4631 spin_lock_irq(shost
->host_lock
);
4632 vport
->fc_flag
|= FC_RSCN_DEFERRED
;
4633 if ((rscn_cnt
< FC_MAX_HOLD_RSCN
) &&
4634 !(vport
->fc_flag
& FC_RSCN_DISCOVERY
)) {
4635 vport
->fc_flag
|= FC_RSCN_MODE
;
4636 spin_unlock_irq(shost
->host_lock
);
4638 cmd
= vport
->fc_rscn_id_list
[rscn_cnt
-1]->virt
;
4639 length
= be32_to_cpu(*cmd
& ~ELS_CMD_MASK
);
4642 (payload_len
+ length
<= LPFC_BPL_SIZE
)) {
4643 *cmd
&= ELS_CMD_MASK
;
4644 *cmd
|= cpu_to_be32(payload_len
+ length
);
4645 memcpy(((uint8_t *)cmd
) + length
, lp
,
4648 vport
->fc_rscn_id_list
[rscn_cnt
] = pcmd
;
4649 vport
->fc_rscn_id_cnt
++;
4650 /* If we zero, cmdiocb->context2, the calling
4651 * routine will not try to free it.
4653 cmdiocb
->context2
= NULL
;
4656 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4657 "0235 Deferred RSCN "
4658 "Data: x%x x%x x%x\n",
4659 vport
->fc_rscn_id_cnt
, vport
->fc_flag
,
4662 vport
->fc_flag
|= FC_RSCN_DISCOVERY
;
4663 spin_unlock_irq(shost
->host_lock
);
4664 /* ReDiscovery RSCN */
4665 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4666 "0234 ReDiscovery RSCN "
4667 "Data: x%x x%x x%x\n",
4668 vport
->fc_rscn_id_cnt
, vport
->fc_flag
,
4671 /* Indicate we are done walking fc_rscn_id_list on this vport */
4672 vport
->fc_rscn_flush
= 0;
4674 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
4675 /* send RECOVERY event for ALL nodes that match RSCN payload */
4676 lpfc_rscn_recovery_check(vport
);
4677 spin_lock_irq(shost
->host_lock
);
4678 vport
->fc_flag
&= ~FC_RSCN_DEFERRED
;
4679 spin_unlock_irq(shost
->host_lock
);
4682 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
4683 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4684 ndlp
->nlp_DID
, vport
->port_state
, ndlp
->nlp_flag
);
4686 spin_lock_irq(shost
->host_lock
);
4687 vport
->fc_flag
|= FC_RSCN_MODE
;
4688 spin_unlock_irq(shost
->host_lock
);
4689 vport
->fc_rscn_id_list
[vport
->fc_rscn_id_cnt
++] = pcmd
;
4690 /* Indicate we are done walking fc_rscn_id_list on this vport */
4691 vport
->fc_rscn_flush
= 0;
4693 * If we zero, cmdiocb->context2, the calling routine will
4694 * not try to free it.
4696 cmdiocb
->context2
= NULL
;
4697 lpfc_set_disctmo(vport
);
4699 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
4700 /* send RECOVERY event for ALL nodes that match RSCN payload */
4701 lpfc_rscn_recovery_check(vport
);
4702 return lpfc_els_handle_rscn(vport
);
4706 * lpfc_els_handle_rscn - Handle rscn for a vport
4707 * @vport: pointer to a host virtual N_Port data structure.
4709 * This routine handles the Registration State Configuration Notification
4710 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4711 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4712 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4713 * NameServer shall be issued. If CT command to the NameServer fails to be
4714 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4715 * RSCN activities with the @vport.
4718 * 0 - Cleaned up rscn on the @vport
4719 * 1 - Wait for plogi to name server before proceed
4722 lpfc_els_handle_rscn(struct lpfc_vport
*vport
)
4724 struct lpfc_nodelist
*ndlp
;
4725 struct lpfc_hba
*phba
= vport
->phba
;
4727 /* Ignore RSCN if the port is being torn down. */
4728 if (vport
->load_flag
& FC_UNLOADING
) {
4729 lpfc_els_flush_rscn(vport
);
4733 /* Start timer for RSCN processing */
4734 lpfc_set_disctmo(vport
);
4736 /* RSCN processed */
4737 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4738 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4739 vport
->fc_flag
, 0, vport
->fc_rscn_id_cnt
,
4742 /* To process RSCN, first compare RSCN data with NameServer */
4743 vport
->fc_ns_retry
= 0;
4744 vport
->num_disc_nodes
= 0;
4746 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4747 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)
4748 && ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) {
4749 /* Good ndlp, issue CT Request to NameServer */
4750 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0) == 0)
4751 /* Wait for NameServer query cmpl before we can
4755 /* If login to NameServer does not exist, issue one */
4756 /* Good status, issue PLOGI to NameServer */
4757 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4758 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
4759 /* Wait for NameServer login cmpl before we can
4764 ndlp
= lpfc_enable_node(vport
, ndlp
,
4765 NLP_STE_PLOGI_ISSUE
);
4767 lpfc_els_flush_rscn(vport
);
4770 ndlp
->nlp_prev_state
= NLP_STE_UNUSED_NODE
;
4772 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
4774 lpfc_els_flush_rscn(vport
);
4777 lpfc_nlp_init(vport
, ndlp
, NameServer_DID
);
4778 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
4779 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
4781 ndlp
->nlp_type
|= NLP_FABRIC
;
4782 lpfc_issue_els_plogi(vport
, NameServer_DID
, 0);
4783 /* Wait for NameServer login cmpl before we can
4789 lpfc_els_flush_rscn(vport
);
4794 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
4795 * @vport: pointer to a host virtual N_Port data structure.
4796 * @cmdiocb: pointer to lpfc command iocb data structure.
4797 * @ndlp: pointer to a node-list data structure.
4799 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4800 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4801 * point topology. As an unsolicited FLOGI should not be received in a loop
4802 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4803 * lpfc_check_sparm() routine is invoked to check the parameters in the
4804 * unsolicited FLOGI. If parameters validation failed, the routine
4805 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4806 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4807 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4808 * will initiate PLOGI. The higher lexicographical value party shall has
4809 * higher priority (as the winning port) and will initiate PLOGI and
4810 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4811 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4812 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4815 * 0 - Successfully processed the unsolicited flogi
4816 * 1 - Failed to process the unsolicited flogi
4819 lpfc_els_rcv_flogi(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
4820 struct lpfc_nodelist
*ndlp
)
4822 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4823 struct lpfc_hba
*phba
= vport
->phba
;
4824 struct lpfc_dmabuf
*pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
4825 uint32_t *lp
= (uint32_t *) pcmd
->virt
;
4826 IOCB_t
*icmd
= &cmdiocb
->iocb
;
4827 struct serv_parm
*sp
;
4834 sp
= (struct serv_parm
*) lp
;
4836 /* FLOGI received */
4838 lpfc_set_disctmo(vport
);
4840 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
4841 /* We should never receive a FLOGI in loop mode, ignore it */
4842 did
= icmd
->un
.elsreq64
.remoteID
;
4844 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
4846 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
4847 "0113 An FLOGI ELS command x%x was "
4848 "received from DID x%x in Loop Mode\n",
4855 if ((lpfc_check_sparm(vport
, ndlp
, sp
, CLASS3
, 1))) {
4856 /* For a FLOGI we accept, then if our portname is greater
4857 * then the remote portname we initiate Nport login.
4860 rc
= memcmp(&vport
->fc_portname
, &sp
->portName
,
4861 sizeof(struct lpfc_name
));
4864 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4868 lpfc_linkdown(phba
);
4869 lpfc_init_link(phba
, mbox
,
4871 phba
->cfg_link_speed
);
4872 mbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
4873 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4874 mbox
->vport
= vport
;
4875 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4876 lpfc_set_loopback_flag(phba
);
4877 if (rc
== MBX_NOT_FINISHED
) {
4878 mempool_free(mbox
, phba
->mbox_mem_pool
);
4881 } else if (rc
> 0) { /* greater than */
4882 spin_lock_irq(shost
->host_lock
);
4883 vport
->fc_flag
|= FC_PT2PT_PLOGI
;
4884 spin_unlock_irq(shost
->host_lock
);
4886 spin_lock_irq(shost
->host_lock
);
4887 vport
->fc_flag
|= FC_PT2PT
;
4888 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
4889 spin_unlock_irq(shost
->host_lock
);
4891 /* Reject this request because invalid parameters */
4892 stat
.un
.b
.lsRjtRsvd0
= 0;
4893 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
4894 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_SPARM_OPTIONS
;
4895 stat
.un
.b
.vendorUnique
= 0;
4896 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
4902 lpfc_els_rsp_acc(vport
, ELS_CMD_PLOGI
, cmdiocb
, ndlp
, NULL
);
4908 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
4909 * @vport: pointer to a host virtual N_Port data structure.
4910 * @cmdiocb: pointer to lpfc command iocb data structure.
4911 * @ndlp: pointer to a node-list data structure.
4913 * This routine processes Request Node Identification Data (RNID) IOCB
4914 * received as an ELS unsolicited event. Only when the RNID specified format
4915 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4916 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4917 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4918 * rejected by invoking the lpfc_els_rsp_reject() routine.
4921 * 0 - Successfully processed rnid iocb (currently always return 0)
4924 lpfc_els_rcv_rnid(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
4925 struct lpfc_nodelist
*ndlp
)
4927 struct lpfc_dmabuf
*pcmd
;
4934 icmd
= &cmdiocb
->iocb
;
4935 did
= icmd
->un
.elsreq64
.remoteID
;
4936 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
4937 lp
= (uint32_t *) pcmd
->virt
;
4944 switch (rn
->Format
) {
4946 case RNID_TOPOLOGY_DISC
:
4948 lpfc_els_rsp_rnid_acc(vport
, rn
->Format
, cmdiocb
, ndlp
);
4951 /* Reject this request because format not supported */
4952 stat
.un
.b
.lsRjtRsvd0
= 0;
4953 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
4954 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
4955 stat
.un
.b
.vendorUnique
= 0;
4956 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
4963 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
4964 * @vport: pointer to a host virtual N_Port data structure.
4965 * @cmdiocb: pointer to lpfc command iocb data structure.
4966 * @ndlp: pointer to a node-list data structure.
4969 * 0 - Successfully processed echo iocb (currently always return 0)
4972 lpfc_els_rcv_echo(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
4973 struct lpfc_nodelist
*ndlp
)
4977 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) cmdiocb
->context2
)->virt
);
4979 /* skip over first word of echo command to find echo data */
4980 pcmd
+= sizeof(uint32_t);
4982 lpfc_els_rsp_echo_acc(vport
, pcmd
, cmdiocb
, ndlp
);
4987 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
4988 * @vport: pointer to a host virtual N_Port data structure.
4989 * @cmdiocb: pointer to lpfc command iocb data structure.
4990 * @ndlp: pointer to a node-list data structure.
4992 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4993 * received as an ELS unsolicited event. Currently, this function just invokes
4994 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4997 * 0 - Successfully processed lirr iocb (currently always return 0)
5000 lpfc_els_rcv_lirr(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5001 struct lpfc_nodelist
*ndlp
)
5005 /* For now, unconditionally reject this command */
5006 stat
.un
.b
.lsRjtRsvd0
= 0;
5007 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
5008 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
5009 stat
.un
.b
.vendorUnique
= 0;
5010 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
5015 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
5016 * @vport: pointer to a host virtual N_Port data structure.
5017 * @cmdiocb: pointer to lpfc command iocb data structure.
5018 * @ndlp: pointer to a node-list data structure.
5020 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
5021 * received as an ELS unsolicited event. A request to RRQ shall only
5022 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
5023 * Nx_Port N_Port_ID of the target Exchange is the same as the
5024 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
5025 * not accepted, an LS_RJT with reason code "Unable to perform
5026 * command request" and reason code explanation "Invalid Originator
5027 * S_ID" shall be returned. For now, we just unconditionally accept
5028 * RRQ from the target.
5031 lpfc_els_rcv_rrq(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5032 struct lpfc_nodelist
*ndlp
)
5034 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
5035 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
5036 lpfc_els_clear_rrq(vport
, cmdiocb
, ndlp
);
5040 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5041 * @phba: pointer to lpfc hba data structure.
5042 * @pmb: pointer to the driver internal queue element for mailbox command.
5044 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5045 * mailbox command. This callback function is to actually send the Accept
5046 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5047 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5048 * mailbox command, constructs the RPS response with the link statistics
5049 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5050 * response to the RPS.
5052 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5053 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5054 * will be stored into the context1 field of the IOCB for the completion
5055 * callback function to the RPS Accept Response ELS IOCB command.
5059 lpfc_els_rsp_rls_acc(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5063 struct RLS_RSP
*rls_rsp
;
5065 struct lpfc_iocbq
*elsiocb
;
5066 struct lpfc_nodelist
*ndlp
;
5073 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5074 rxid
= (uint16_t) ((unsigned long)(pmb
->context1
) & 0xffff);
5075 oxid
= (uint16_t) (((unsigned long)(pmb
->context1
) >> 16) & 0xffff);
5076 pmb
->context1
= NULL
;
5077 pmb
->context2
= NULL
;
5079 if (mb
->mbxStatus
) {
5080 mempool_free(pmb
, phba
->mbox_mem_pool
);
5084 cmdsize
= sizeof(struct RLS_RSP
) + sizeof(uint32_t);
5085 mempool_free(pmb
, phba
->mbox_mem_pool
);
5086 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
5087 lpfc_max_els_tries
, ndlp
,
5088 ndlp
->nlp_DID
, ELS_CMD_ACC
);
5090 /* Decrement the ndlp reference count from previous mbox command */
5096 icmd
= &elsiocb
->iocb
;
5097 icmd
->ulpContext
= rxid
;
5098 icmd
->unsli3
.rcvsli3
.ox_id
= oxid
;
5100 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5101 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
5102 pcmd
+= sizeof(uint32_t); /* Skip past command */
5103 rls_rsp
= (struct RLS_RSP
*)pcmd
;
5105 rls_rsp
->linkFailureCnt
= cpu_to_be32(mb
->un
.varRdLnk
.linkFailureCnt
);
5106 rls_rsp
->lossSyncCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSyncCnt
);
5107 rls_rsp
->lossSignalCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSignalCnt
);
5108 rls_rsp
->primSeqErrCnt
= cpu_to_be32(mb
->un
.varRdLnk
.primSeqErrCnt
);
5109 rls_rsp
->invalidXmitWord
= cpu_to_be32(mb
->un
.varRdLnk
.invalidXmitWord
);
5110 rls_rsp
->crcCnt
= cpu_to_be32(mb
->un
.varRdLnk
.crcCnt
);
5112 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5113 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_ELS
,
5114 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
5115 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5116 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
5117 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
5119 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5120 phba
->fc_stat
.elsXmitACC
++;
5121 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) == IOCB_ERROR
)
5122 lpfc_els_free_iocb(phba
, elsiocb
);
5126 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5127 * @phba: pointer to lpfc hba data structure.
5128 * @pmb: pointer to the driver internal queue element for mailbox command.
5130 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5131 * mailbox command. This callback function is to actually send the Accept
5132 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5133 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5134 * mailbox command, constructs the RPS response with the link statistics
5135 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5136 * response to the RPS.
5138 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5139 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5140 * will be stored into the context1 field of the IOCB for the completion
5141 * callback function to the RPS Accept Response ELS IOCB command.
5145 lpfc_els_rsp_rps_acc(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5151 struct lpfc_iocbq
*elsiocb
;
5152 struct lpfc_nodelist
*ndlp
;
5160 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5161 rxid
= (uint16_t) ((unsigned long)(pmb
->context1
) & 0xffff);
5162 oxid
= (uint16_t) (((unsigned long)(pmb
->context1
) >> 16) & 0xffff);
5163 pmb
->context1
= NULL
;
5164 pmb
->context2
= NULL
;
5166 if (mb
->mbxStatus
) {
5167 mempool_free(pmb
, phba
->mbox_mem_pool
);
5171 cmdsize
= sizeof(RPS_RSP
) + sizeof(uint32_t);
5172 mempool_free(pmb
, phba
->mbox_mem_pool
);
5173 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
5174 lpfc_max_els_tries
, ndlp
,
5175 ndlp
->nlp_DID
, ELS_CMD_ACC
);
5177 /* Decrement the ndlp reference count from previous mbox command */
5183 icmd
= &elsiocb
->iocb
;
5184 icmd
->ulpContext
= rxid
;
5185 icmd
->unsli3
.rcvsli3
.ox_id
= oxid
;
5187 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5188 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
5189 pcmd
+= sizeof(uint32_t); /* Skip past command */
5190 rps_rsp
= (RPS_RSP
*)pcmd
;
5192 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
5196 if (phba
->pport
->fc_flag
& FC_FABRIC
)
5200 rps_rsp
->portStatus
= cpu_to_be16(status
);
5201 rps_rsp
->linkFailureCnt
= cpu_to_be32(mb
->un
.varRdLnk
.linkFailureCnt
);
5202 rps_rsp
->lossSyncCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSyncCnt
);
5203 rps_rsp
->lossSignalCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSignalCnt
);
5204 rps_rsp
->primSeqErrCnt
= cpu_to_be32(mb
->un
.varRdLnk
.primSeqErrCnt
);
5205 rps_rsp
->invalidXmitWord
= cpu_to_be32(mb
->un
.varRdLnk
.invalidXmitWord
);
5206 rps_rsp
->crcCnt
= cpu_to_be32(mb
->un
.varRdLnk
.crcCnt
);
5207 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
5208 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_ELS
,
5209 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
5210 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5211 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
5212 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
5214 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5215 phba
->fc_stat
.elsXmitACC
++;
5216 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) == IOCB_ERROR
)
5217 lpfc_els_free_iocb(phba
, elsiocb
);
5222 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
5223 * @vport: pointer to a host virtual N_Port data structure.
5224 * @cmdiocb: pointer to lpfc command iocb data structure.
5225 * @ndlp: pointer to a node-list data structure.
5227 * This routine processes Read Port Status (RPL) IOCB received as an
5228 * ELS unsolicited event. It first checks the remote port state. If the
5229 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5230 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5231 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5232 * for reading the HBA link statistics. It is for the callback function,
5233 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
5234 * to actually sending out RPL Accept (ACC) response.
5237 * 0 - Successfully processed rls iocb (currently always return 0)
5240 lpfc_els_rcv_rls(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5241 struct lpfc_nodelist
*ndlp
)
5243 struct lpfc_hba
*phba
= vport
->phba
;
5245 struct lpfc_dmabuf
*pcmd
;
5248 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
5249 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))
5250 /* reject the unsolicited RPS request and done with it */
5253 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
5255 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_ATOMIC
);
5257 lpfc_read_lnk_stat(phba
, mbox
);
5258 mbox
->context1
= (void *)((unsigned long)
5259 ((cmdiocb
->iocb
.unsli3
.rcvsli3
.ox_id
<< 16) |
5260 cmdiocb
->iocb
.ulpContext
)); /* rx_id */
5261 mbox
->context2
= lpfc_nlp_get(ndlp
);
5262 mbox
->vport
= vport
;
5263 mbox
->mbox_cmpl
= lpfc_els_rsp_rls_acc
;
5264 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
5265 != MBX_NOT_FINISHED
)
5266 /* Mbox completion will send ELS Response */
5268 /* Decrement reference count used for the failed mbox
5272 mempool_free(mbox
, phba
->mbox_mem_pool
);
5275 /* issue rejection response */
5276 stat
.un
.b
.lsRjtRsvd0
= 0;
5277 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
5278 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
5279 stat
.un
.b
.vendorUnique
= 0;
5280 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
5285 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
5286 * @vport: pointer to a host virtual N_Port data structure.
5287 * @cmdiocb: pointer to lpfc command iocb data structure.
5288 * @ndlp: pointer to a node-list data structure.
5290 * This routine processes Read Timout Value (RTV) IOCB received as an
5291 * ELS unsolicited event. It first checks the remote port state. If the
5292 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5293 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5294 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
5295 * Value (RTV) unsolicited IOCB event.
5297 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5298 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5299 * will be stored into the context1 field of the IOCB for the completion
5300 * callback function to the RPS Accept Response ELS IOCB command.
5303 * 0 - Successfully processed rtv iocb (currently always return 0)
5306 lpfc_els_rcv_rtv(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5307 struct lpfc_nodelist
*ndlp
)
5309 struct lpfc_hba
*phba
= vport
->phba
;
5311 struct RTV_RSP
*rtv_rsp
;
5313 struct lpfc_iocbq
*elsiocb
;
5317 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
5318 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))
5319 /* reject the unsolicited RPS request and done with it */
5322 cmdsize
= sizeof(struct RTV_RSP
) + sizeof(uint32_t);
5323 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
5324 lpfc_max_els_tries
, ndlp
,
5325 ndlp
->nlp_DID
, ELS_CMD_ACC
);
5330 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5331 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
5332 pcmd
+= sizeof(uint32_t); /* Skip past command */
5334 /* use the command's xri in the response */
5335 elsiocb
->iocb
.ulpContext
= cmdiocb
->iocb
.ulpContext
; /* Xri / rx_id */
5336 elsiocb
->iocb
.unsli3
.rcvsli3
.ox_id
= cmdiocb
->iocb
.unsli3
.rcvsli3
.ox_id
;
5338 rtv_rsp
= (struct RTV_RSP
*)pcmd
;
5340 /* populate RTV payload */
5341 rtv_rsp
->ratov
= cpu_to_be32(phba
->fc_ratov
* 1000); /* report msecs */
5342 rtv_rsp
->edtov
= cpu_to_be32(phba
->fc_edtov
);
5343 bf_set(qtov_edtovres
, rtv_rsp
, phba
->fc_edtovResol
? 1 : 0);
5344 bf_set(qtov_rttov
, rtv_rsp
, 0); /* Field is for FC ONLY */
5345 rtv_rsp
->qtov
= cpu_to_be32(rtv_rsp
->qtov
);
5347 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5348 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_ELS
,
5349 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
5350 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
5351 "Data: x%x x%x x%x\n",
5352 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
5353 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
5355 rtv_rsp
->ratov
, rtv_rsp
->edtov
, rtv_rsp
->qtov
);
5356 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5357 phba
->fc_stat
.elsXmitACC
++;
5358 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) == IOCB_ERROR
)
5359 lpfc_els_free_iocb(phba
, elsiocb
);
5363 /* issue rejection response */
5364 stat
.un
.b
.lsRjtRsvd0
= 0;
5365 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
5366 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
5367 stat
.un
.b
.vendorUnique
= 0;
5368 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
5372 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb
5373 * @vport: pointer to a host virtual N_Port data structure.
5374 * @cmdiocb: pointer to lpfc command iocb data structure.
5375 * @ndlp: pointer to a node-list data structure.
5377 * This routine processes Read Port Status (RPS) IOCB received as an
5378 * ELS unsolicited event. It first checks the remote port state. If the
5379 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5380 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
5381 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5382 * for reading the HBA link statistics. It is for the callback function,
5383 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
5384 * to actually sending out RPS Accept (ACC) response.
5387 * 0 - Successfully processed rps iocb (currently always return 0)
5390 lpfc_els_rcv_rps(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5391 struct lpfc_nodelist
*ndlp
)
5393 struct lpfc_hba
*phba
= vport
->phba
;
5397 struct lpfc_dmabuf
*pcmd
;
5401 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
5402 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))
5403 /* reject the unsolicited RPS request and done with it */
5406 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
5407 lp
= (uint32_t *) pcmd
->virt
;
5408 flag
= (be32_to_cpu(*lp
++) & 0xf);
5412 ((flag
== 1) && (be32_to_cpu(rps
->un
.portNum
) == 0)) ||
5413 ((flag
== 2) && (memcmp(&rps
->un
.portName
, &vport
->fc_portname
,
5414 sizeof(struct lpfc_name
)) == 0))) {
5416 printk("Fix me....\n");
5418 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_ATOMIC
);
5420 lpfc_read_lnk_stat(phba
, mbox
);
5421 mbox
->context1
= (void *)((unsigned long)
5422 ((cmdiocb
->iocb
.unsli3
.rcvsli3
.ox_id
<< 16) |
5423 cmdiocb
->iocb
.ulpContext
)); /* rx_id */
5424 mbox
->context2
= lpfc_nlp_get(ndlp
);
5425 mbox
->vport
= vport
;
5426 mbox
->mbox_cmpl
= lpfc_els_rsp_rps_acc
;
5427 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
5428 != MBX_NOT_FINISHED
)
5429 /* Mbox completion will send ELS Response */
5431 /* Decrement reference count used for the failed mbox
5435 mempool_free(mbox
, phba
->mbox_mem_pool
);
5440 /* issue rejection response */
5441 stat
.un
.b
.lsRjtRsvd0
= 0;
5442 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
5443 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
5444 stat
.un
.b
.vendorUnique
= 0;
5445 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
5449 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb
5450 * @vport: pointer to a host virtual N_Port data structure.
5451 * @ndlp: pointer to a node-list data structure.
5452 * @did: DID of the target.
5453 * @rrq: Pointer to the rrq struct.
5455 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
5456 * Successful the the completion handler will clear the RRQ.
5459 * 0 - Successfully sent rrq els iocb.
5460 * 1 - Failed to send rrq els iocb.
5463 lpfc_issue_els_rrq(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5464 uint32_t did
, struct lpfc_node_rrq
*rrq
)
5466 struct lpfc_hba
*phba
= vport
->phba
;
5467 struct RRQ
*els_rrq
;
5469 struct lpfc_iocbq
*elsiocb
;
5475 if (ndlp
!= rrq
->ndlp
)
5477 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
5480 /* If ndlp is not NULL, we will bump the reference count on it */
5481 cmdsize
= (sizeof(uint32_t) + sizeof(struct RRQ
));
5482 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
, did
,
5487 icmd
= &elsiocb
->iocb
;
5488 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5490 /* For RRQ request, remainder of payload is Exchange IDs */
5491 *((uint32_t *) (pcmd
)) = ELS_CMD_RRQ
;
5492 pcmd
+= sizeof(uint32_t);
5493 els_rrq
= (struct RRQ
*) pcmd
;
5495 bf_set(rrq_oxid
, els_rrq
, rrq
->xritag
);
5496 bf_set(rrq_rxid
, els_rrq
, rrq
->rxid
);
5497 bf_set(rrq_did
, els_rrq
, vport
->fc_myDID
);
5498 els_rrq
->rrq
= cpu_to_be32(els_rrq
->rrq
);
5499 els_rrq
->rrq_exchg
= cpu_to_be32(els_rrq
->rrq_exchg
);
5502 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5503 "Issue RRQ: did:x%x",
5504 did
, rrq
->xritag
, rrq
->rxid
);
5505 elsiocb
->context_un
.rrq
= rrq
;
5506 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rrq
;
5507 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
5509 if (ret
== IOCB_ERROR
) {
5510 lpfc_els_free_iocb(phba
, elsiocb
);
5517 * lpfc_send_rrq - Sends ELS RRQ if needed.
5518 * @phba: pointer to lpfc hba data structure.
5519 * @rrq: pointer to the active rrq.
5521 * This routine will call the lpfc_issue_els_rrq if the rrq is
5522 * still active for the xri. If this function returns a failure then
5523 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
5525 * Returns 0 Success.
5529 lpfc_send_rrq(struct lpfc_hba
*phba
, struct lpfc_node_rrq
*rrq
)
5531 struct lpfc_nodelist
*ndlp
= lpfc_findnode_did(rrq
->vport
,
5533 if (lpfc_test_rrq_active(phba
, ndlp
, rrq
->xritag
))
5534 return lpfc_issue_els_rrq(rrq
->vport
, ndlp
,
5541 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
5542 * @vport: pointer to a host virtual N_Port data structure.
5543 * @cmdsize: size of the ELS command.
5544 * @oldiocb: pointer to the original lpfc command iocb data structure.
5545 * @ndlp: pointer to a node-list data structure.
5547 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
5548 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
5550 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5551 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5552 * will be stored into the context1 field of the IOCB for the completion
5553 * callback function to the RPL Accept Response ELS command.
5556 * 0 - Successfully issued ACC RPL ELS command
5557 * 1 - Failed to issue ACC RPL ELS command
5560 lpfc_els_rsp_rpl_acc(struct lpfc_vport
*vport
, uint16_t cmdsize
,
5561 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
)
5563 struct lpfc_hba
*phba
= vport
->phba
;
5564 IOCB_t
*icmd
, *oldcmd
;
5566 struct lpfc_iocbq
*elsiocb
;
5569 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
5570 ndlp
->nlp_DID
, ELS_CMD_ACC
);
5575 icmd
= &elsiocb
->iocb
;
5576 oldcmd
= &oldiocb
->iocb
;
5577 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
5578 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
5580 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5581 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
5582 pcmd
+= sizeof(uint16_t);
5583 *((uint16_t *)(pcmd
)) = be16_to_cpu(cmdsize
);
5584 pcmd
+= sizeof(uint16_t);
5586 /* Setup the RPL ACC payload */
5587 rpl_rsp
.listLen
= be32_to_cpu(1);
5589 rpl_rsp
.port_num_blk
.portNum
= 0;
5590 rpl_rsp
.port_num_blk
.portID
= be32_to_cpu(vport
->fc_myDID
);
5591 memcpy(&rpl_rsp
.port_num_blk
.portName
, &vport
->fc_portname
,
5592 sizeof(struct lpfc_name
));
5593 memcpy(pcmd
, &rpl_rsp
, cmdsize
- sizeof(uint32_t));
5594 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
5595 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
5596 "0120 Xmit ELS RPL ACC response tag x%x "
5597 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5599 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
5600 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
5602 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5603 phba
->fc_stat
.elsXmitACC
++;
5604 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
5606 lpfc_els_free_iocb(phba
, elsiocb
);
5613 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
5614 * @vport: pointer to a host virtual N_Port data structure.
5615 * @cmdiocb: pointer to lpfc command iocb data structure.
5616 * @ndlp: pointer to a node-list data structure.
5618 * This routine processes Read Port List (RPL) IOCB received as an ELS
5619 * unsolicited event. It first checks the remote port state. If the remote
5620 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
5621 * invokes the lpfc_els_rsp_reject() routine to send reject response.
5622 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
5623 * to accept the RPL.
5626 * 0 - Successfully processed rpl iocb (currently always return 0)
5629 lpfc_els_rcv_rpl(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5630 struct lpfc_nodelist
*ndlp
)
5632 struct lpfc_dmabuf
*pcmd
;
5639 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
5640 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
5641 /* issue rejection response */
5642 stat
.un
.b
.lsRjtRsvd0
= 0;
5643 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
5644 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
5645 stat
.un
.b
.vendorUnique
= 0;
5646 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
5648 /* rejected the unsolicited RPL request and done with it */
5652 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
5653 lp
= (uint32_t *) pcmd
->virt
;
5654 rpl
= (RPL
*) (lp
+ 1);
5655 maxsize
= be32_to_cpu(rpl
->maxsize
);
5657 /* We support only one port */
5658 if ((rpl
->index
== 0) &&
5660 ((maxsize
* sizeof(uint32_t)) >= sizeof(RPL_RSP
)))) {
5661 cmdsize
= sizeof(uint32_t) + sizeof(RPL_RSP
);
5663 cmdsize
= sizeof(uint32_t) + maxsize
* sizeof(uint32_t);
5665 lpfc_els_rsp_rpl_acc(vport
, cmdsize
, cmdiocb
, ndlp
);
5671 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
5672 * @vport: pointer to a virtual N_Port data structure.
5673 * @cmdiocb: pointer to lpfc command iocb data structure.
5674 * @ndlp: pointer to a node-list data structure.
5676 * This routine processes Fibre Channel Address Resolution Protocol
5677 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
5678 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
5679 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
5680 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
5681 * remote PortName is compared against the FC PortName stored in the @vport
5682 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
5683 * compared against the FC NodeName stored in the @vport data structure.
5684 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
5685 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
5686 * invoked to send out FARP Response to the remote node. Before sending the
5687 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
5688 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
5689 * routine is invoked to log into the remote port first.
5692 * 0 - Either the FARP Match Mode not supported or successfully processed
5695 lpfc_els_rcv_farp(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5696 struct lpfc_nodelist
*ndlp
)
5698 struct lpfc_dmabuf
*pcmd
;
5702 uint32_t cmd
, cnt
, did
;
5704 icmd
= &cmdiocb
->iocb
;
5705 did
= icmd
->un
.elsreq64
.remoteID
;
5706 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
5707 lp
= (uint32_t *) pcmd
->virt
;
5711 /* FARP-REQ received from DID <did> */
5712 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
5713 "0601 FARP-REQ received from DID x%x\n", did
);
5714 /* We will only support match on WWPN or WWNN */
5715 if (fp
->Mflags
& ~(FARP_MATCH_NODE
| FARP_MATCH_PORT
)) {
5720 /* If this FARP command is searching for my portname */
5721 if (fp
->Mflags
& FARP_MATCH_PORT
) {
5722 if (memcmp(&fp
->RportName
, &vport
->fc_portname
,
5723 sizeof(struct lpfc_name
)) == 0)
5727 /* If this FARP command is searching for my nodename */
5728 if (fp
->Mflags
& FARP_MATCH_NODE
) {
5729 if (memcmp(&fp
->RnodeName
, &vport
->fc_nodename
,
5730 sizeof(struct lpfc_name
)) == 0)
5735 if ((ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) ||
5736 (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
5737 /* Log back into the node before sending the FARP. */
5738 if (fp
->Rflags
& FARP_REQUEST_PLOGI
) {
5739 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
5740 lpfc_nlp_set_state(vport
, ndlp
,
5741 NLP_STE_PLOGI_ISSUE
);
5742 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
5745 /* Send a FARP response to that node */
5746 if (fp
->Rflags
& FARP_REQUEST_FARPR
)
5747 lpfc_issue_els_farpr(vport
, did
, 0);
5754 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
5755 * @vport: pointer to a host virtual N_Port data structure.
5756 * @cmdiocb: pointer to lpfc command iocb data structure.
5757 * @ndlp: pointer to a node-list data structure.
5759 * This routine processes Fibre Channel Address Resolution Protocol
5760 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
5761 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
5762 * the FARP response request.
5765 * 0 - Successfully processed FARPR IOCB (currently always return 0)
5768 lpfc_els_rcv_farpr(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5769 struct lpfc_nodelist
*ndlp
)
5771 struct lpfc_dmabuf
*pcmd
;
5776 icmd
= &cmdiocb
->iocb
;
5777 did
= icmd
->un
.elsreq64
.remoteID
;
5778 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
5779 lp
= (uint32_t *) pcmd
->virt
;
5782 /* FARP-RSP received from DID <did> */
5783 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
5784 "0600 FARP-RSP received from DID x%x\n", did
);
5785 /* ACCEPT the Farp resp request */
5786 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
5792 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
5793 * @vport: pointer to a host virtual N_Port data structure.
5794 * @cmdiocb: pointer to lpfc command iocb data structure.
5795 * @fan_ndlp: pointer to a node-list data structure.
5797 * This routine processes a Fabric Address Notification (FAN) IOCB
5798 * command received as an ELS unsolicited event. The FAN ELS command will
5799 * only be processed on a physical port (i.e., the @vport represents the
5800 * physical port). The fabric NodeName and PortName from the FAN IOCB are
5801 * compared against those in the phba data structure. If any of those is
5802 * different, the lpfc_initial_flogi() routine is invoked to initialize
5803 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
5804 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
5805 * is invoked to register login to the fabric.
5808 * 0 - Successfully processed fan iocb (currently always return 0).
5811 lpfc_els_rcv_fan(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5812 struct lpfc_nodelist
*fan_ndlp
)
5814 struct lpfc_hba
*phba
= vport
->phba
;
5818 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
, "0265 FAN received\n");
5819 lp
= (uint32_t *)((struct lpfc_dmabuf
*)cmdiocb
->context2
)->virt
;
5821 /* FAN received; Fan does not have a reply sequence */
5822 if ((vport
== phba
->pport
) &&
5823 (vport
->port_state
== LPFC_LOCAL_CFG_LINK
)) {
5824 if ((memcmp(&phba
->fc_fabparam
.nodeName
, &fp
->FnodeName
,
5825 sizeof(struct lpfc_name
))) ||
5826 (memcmp(&phba
->fc_fabparam
.portName
, &fp
->FportName
,
5827 sizeof(struct lpfc_name
)))) {
5828 /* This port has switched fabrics. FLOGI is required */
5829 lpfc_issue_init_vfi(vport
);
5831 /* FAN verified - skip FLOGI */
5832 vport
->fc_myDID
= vport
->fc_prevDID
;
5833 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5834 lpfc_issue_fabric_reglogin(vport
);
5836 lpfc_issue_reg_vfi(vport
);
5843 * lpfc_els_timeout - Handler funciton to the els timer
5844 * @ptr: holder for the timer function associated data.
5846 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5847 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5848 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5849 * up the worker thread. It is for the worker thread to invoke the routine
5850 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5853 lpfc_els_timeout(unsigned long ptr
)
5855 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
5856 struct lpfc_hba
*phba
= vport
->phba
;
5857 uint32_t tmo_posted
;
5858 unsigned long iflag
;
5860 spin_lock_irqsave(&vport
->work_port_lock
, iflag
);
5861 tmo_posted
= vport
->work_port_events
& WORKER_ELS_TMO
;
5863 vport
->work_port_events
|= WORKER_ELS_TMO
;
5864 spin_unlock_irqrestore(&vport
->work_port_lock
, iflag
);
5867 lpfc_worker_wake_up(phba
);
5873 * lpfc_els_timeout_handler - Process an els timeout event
5874 * @vport: pointer to a virtual N_Port data structure.
5876 * This routine is the actual handler function that processes an ELS timeout
5877 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5878 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5879 * invoking the lpfc_sli_issue_abort_iotag() routine.
5882 lpfc_els_timeout_handler(struct lpfc_vport
*vport
)
5884 struct lpfc_hba
*phba
= vport
->phba
;
5885 struct lpfc_sli_ring
*pring
;
5886 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
5888 struct lpfc_dmabuf
*pcmd
;
5889 uint32_t els_command
= 0;
5891 uint32_t remote_ID
= 0xffffffff;
5892 LIST_HEAD(txcmplq_completions
);
5893 LIST_HEAD(abort_list
);
5896 timeout
= (uint32_t)(phba
->fc_ratov
<< 1);
5898 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
5900 spin_lock_irq(&phba
->hbalock
);
5901 list_splice_init(&pring
->txcmplq
, &txcmplq_completions
);
5902 spin_unlock_irq(&phba
->hbalock
);
5904 list_for_each_entry_safe(piocb
, tmp_iocb
, &txcmplq_completions
, list
) {
5907 if ((piocb
->iocb_flag
& LPFC_IO_LIBDFC
) != 0 ||
5908 piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
5909 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
)
5912 if (piocb
->vport
!= vport
)
5915 pcmd
= (struct lpfc_dmabuf
*) piocb
->context2
;
5917 els_command
= *(uint32_t *) (pcmd
->virt
);
5919 if (els_command
== ELS_CMD_FARP
||
5920 els_command
== ELS_CMD_FARPR
||
5921 els_command
== ELS_CMD_FDISC
)
5924 if (piocb
->drvrTimeout
> 0) {
5925 if (piocb
->drvrTimeout
>= timeout
)
5926 piocb
->drvrTimeout
-= timeout
;
5928 piocb
->drvrTimeout
= 0;
5932 remote_ID
= 0xffffffff;
5933 if (cmd
->ulpCommand
!= CMD_GEN_REQUEST64_CR
)
5934 remote_ID
= cmd
->un
.elsreq64
.remoteID
;
5936 struct lpfc_nodelist
*ndlp
;
5937 ndlp
= __lpfc_findnode_rpi(vport
, cmd
->ulpContext
);
5938 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5939 remote_ID
= ndlp
->nlp_DID
;
5941 list_add_tail(&piocb
->dlist
, &abort_list
);
5943 spin_lock_irq(&phba
->hbalock
);
5944 list_splice(&txcmplq_completions
, &pring
->txcmplq
);
5945 spin_unlock_irq(&phba
->hbalock
);
5947 list_for_each_entry_safe(piocb
, tmp_iocb
, &abort_list
, dlist
) {
5948 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
5949 "0127 ELS timeout Data: x%x x%x x%x "
5950 "x%x\n", els_command
,
5951 remote_ID
, cmd
->ulpCommand
, cmd
->ulpIoTag
);
5952 spin_lock_irq(&phba
->hbalock
);
5953 list_del_init(&piocb
->dlist
);
5954 lpfc_sli_issue_abort_iotag(phba
, pring
, piocb
);
5955 spin_unlock_irq(&phba
->hbalock
);
5958 if (phba
->sli
.ring
[LPFC_ELS_RING
].txcmplq_cnt
)
5959 mod_timer(&vport
->els_tmofunc
, jiffies
+ HZ
* timeout
);
5963 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
5964 * @vport: pointer to a host virtual N_Port data structure.
5966 * This routine is used to clean up all the outstanding ELS commands on a
5967 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5968 * routine. After that, it walks the ELS transmit queue to remove all the
5969 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5970 * the IOCBs with a non-NULL completion callback function, the callback
5971 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5972 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5973 * callback function, the IOCB will simply be released. Finally, it walks
5974 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5975 * completion queue IOCB that is associated with the @vport and is not
5976 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5977 * part of the discovery state machine) out to HBA by invoking the
5978 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5979 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5980 * the IOCBs are aborted when this function returns.
5983 lpfc_els_flush_cmd(struct lpfc_vport
*vport
)
5985 LIST_HEAD(completions
);
5986 struct lpfc_hba
*phba
= vport
->phba
;
5987 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
5988 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
5991 lpfc_fabric_abort_vport(vport
);
5993 spin_lock_irq(&phba
->hbalock
);
5994 list_for_each_entry_safe(piocb
, tmp_iocb
, &pring
->txq
, list
) {
5997 if (piocb
->iocb_flag
& LPFC_IO_LIBDFC
) {
6001 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6002 if (cmd
->ulpCommand
== CMD_QUE_RING_BUF_CN
||
6003 cmd
->ulpCommand
== CMD_QUE_RING_BUF64_CN
||
6004 cmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
6005 cmd
->ulpCommand
== CMD_ABORT_XRI_CN
)
6008 if (piocb
->vport
!= vport
)
6011 list_move_tail(&piocb
->list
, &completions
);
6015 list_for_each_entry_safe(piocb
, tmp_iocb
, &pring
->txcmplq
, list
) {
6016 if (piocb
->iocb_flag
& LPFC_IO_LIBDFC
) {
6020 if (piocb
->vport
!= vport
)
6023 lpfc_sli_issue_abort_iotag(phba
, pring
, piocb
);
6025 spin_unlock_irq(&phba
->hbalock
);
6027 /* Cancell all the IOCBs from the completions list */
6028 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
6035 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
6036 * @phba: pointer to lpfc hba data structure.
6038 * This routine is used to clean up all the outstanding ELS commands on a
6039 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
6040 * routine. After that, it walks the ELS transmit queue to remove all the
6041 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
6042 * the IOCBs with the completion callback function associated, the callback
6043 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
6044 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
6045 * callback function associated, the IOCB will simply be released. Finally,
6046 * it walks the ELS transmit completion queue to issue an abort IOCB to any
6047 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
6048 * management plane IOCBs that are not part of the discovery state machine)
6049 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
6052 lpfc_els_flush_all_cmd(struct lpfc_hba
*phba
)
6054 LIST_HEAD(completions
);
6055 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
6056 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
6059 lpfc_fabric_abort_hba(phba
);
6060 spin_lock_irq(&phba
->hbalock
);
6061 list_for_each_entry_safe(piocb
, tmp_iocb
, &pring
->txq
, list
) {
6063 if (piocb
->iocb_flag
& LPFC_IO_LIBDFC
)
6065 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6066 if (cmd
->ulpCommand
== CMD_QUE_RING_BUF_CN
||
6067 cmd
->ulpCommand
== CMD_QUE_RING_BUF64_CN
||
6068 cmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
6069 cmd
->ulpCommand
== CMD_ABORT_XRI_CN
)
6071 list_move_tail(&piocb
->list
, &completions
);
6074 list_for_each_entry_safe(piocb
, tmp_iocb
, &pring
->txcmplq
, list
) {
6075 if (piocb
->iocb_flag
& LPFC_IO_LIBDFC
)
6077 lpfc_sli_issue_abort_iotag(phba
, pring
, piocb
);
6079 spin_unlock_irq(&phba
->hbalock
);
6081 /* Cancel all the IOCBs from the completions list */
6082 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
6089 * lpfc_send_els_failure_event - Posts an ELS command failure event
6090 * @phba: Pointer to hba context object.
6091 * @cmdiocbp: Pointer to command iocb which reported error.
6092 * @rspiocbp: Pointer to response iocb which reported error.
6094 * This function sends an event when there is an ELS command
6098 lpfc_send_els_failure_event(struct lpfc_hba
*phba
,
6099 struct lpfc_iocbq
*cmdiocbp
,
6100 struct lpfc_iocbq
*rspiocbp
)
6102 struct lpfc_vport
*vport
= cmdiocbp
->vport
;
6103 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6104 struct lpfc_lsrjt_event lsrjt_event
;
6105 struct lpfc_fabric_event_header fabric_event
;
6107 struct lpfc_nodelist
*ndlp
;
6110 ndlp
= cmdiocbp
->context1
;
6111 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
6114 if (rspiocbp
->iocb
.ulpStatus
== IOSTAT_LS_RJT
) {
6115 lsrjt_event
.header
.event_type
= FC_REG_ELS_EVENT
;
6116 lsrjt_event
.header
.subcategory
= LPFC_EVENT_LSRJT_RCV
;
6117 memcpy(lsrjt_event
.header
.wwpn
, &ndlp
->nlp_portname
,
6118 sizeof(struct lpfc_name
));
6119 memcpy(lsrjt_event
.header
.wwnn
, &ndlp
->nlp_nodename
,
6120 sizeof(struct lpfc_name
));
6121 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
6122 cmdiocbp
->context2
)->virt
);
6123 lsrjt_event
.command
= (pcmd
!= NULL
) ? *pcmd
: 0;
6124 stat
.un
.lsRjtError
= be32_to_cpu(rspiocbp
->iocb
.un
.ulpWord
[4]);
6125 lsrjt_event
.reason_code
= stat
.un
.b
.lsRjtRsnCode
;
6126 lsrjt_event
.explanation
= stat
.un
.b
.lsRjtRsnCodeExp
;
6127 fc_host_post_vendor_event(shost
,
6128 fc_get_event_number(),
6129 sizeof(lsrjt_event
),
6130 (char *)&lsrjt_event
,
6134 if ((rspiocbp
->iocb
.ulpStatus
== IOSTAT_NPORT_BSY
) ||
6135 (rspiocbp
->iocb
.ulpStatus
== IOSTAT_FABRIC_BSY
)) {
6136 fabric_event
.event_type
= FC_REG_FABRIC_EVENT
;
6137 if (rspiocbp
->iocb
.ulpStatus
== IOSTAT_NPORT_BSY
)
6138 fabric_event
.subcategory
= LPFC_EVENT_PORT_BUSY
;
6140 fabric_event
.subcategory
= LPFC_EVENT_FABRIC_BUSY
;
6141 memcpy(fabric_event
.wwpn
, &ndlp
->nlp_portname
,
6142 sizeof(struct lpfc_name
));
6143 memcpy(fabric_event
.wwnn
, &ndlp
->nlp_nodename
,
6144 sizeof(struct lpfc_name
));
6145 fc_host_post_vendor_event(shost
,
6146 fc_get_event_number(),
6147 sizeof(fabric_event
),
6148 (char *)&fabric_event
,
6156 * lpfc_send_els_event - Posts unsolicited els event
6157 * @vport: Pointer to vport object.
6158 * @ndlp: Pointer FC node object.
6159 * @cmd: ELS command code.
6161 * This function posts an event when there is an incoming
6162 * unsolicited ELS command.
6165 lpfc_send_els_event(struct lpfc_vport
*vport
,
6166 struct lpfc_nodelist
*ndlp
,
6169 struct lpfc_els_event_header
*els_data
= NULL
;
6170 struct lpfc_logo_event
*logo_data
= NULL
;
6171 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6173 if (*payload
== ELS_CMD_LOGO
) {
6174 logo_data
= kmalloc(sizeof(struct lpfc_logo_event
), GFP_KERNEL
);
6176 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6177 "0148 Failed to allocate memory "
6178 "for LOGO event\n");
6181 els_data
= &logo_data
->header
;
6183 els_data
= kmalloc(sizeof(struct lpfc_els_event_header
),
6186 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6187 "0149 Failed to allocate memory "
6192 els_data
->event_type
= FC_REG_ELS_EVENT
;
6195 els_data
->subcategory
= LPFC_EVENT_PLOGI_RCV
;
6198 els_data
->subcategory
= LPFC_EVENT_PRLO_RCV
;
6201 els_data
->subcategory
= LPFC_EVENT_ADISC_RCV
;
6204 els_data
->subcategory
= LPFC_EVENT_LOGO_RCV
;
6205 /* Copy the WWPN in the LOGO payload */
6206 memcpy(logo_data
->logo_wwpn
, &payload
[2],
6207 sizeof(struct lpfc_name
));
6213 memcpy(els_data
->wwpn
, &ndlp
->nlp_portname
, sizeof(struct lpfc_name
));
6214 memcpy(els_data
->wwnn
, &ndlp
->nlp_nodename
, sizeof(struct lpfc_name
));
6215 if (*payload
== ELS_CMD_LOGO
) {
6216 fc_host_post_vendor_event(shost
,
6217 fc_get_event_number(),
6218 sizeof(struct lpfc_logo_event
),
6223 fc_host_post_vendor_event(shost
,
6224 fc_get_event_number(),
6225 sizeof(struct lpfc_els_event_header
),
6236 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
6237 * @phba: pointer to lpfc hba data structure.
6238 * @pring: pointer to a SLI ring.
6239 * @vport: pointer to a host virtual N_Port data structure.
6240 * @elsiocb: pointer to lpfc els command iocb data structure.
6242 * This routine is used for processing the IOCB associated with a unsolicited
6243 * event. It first determines whether there is an existing ndlp that matches
6244 * the DID from the unsolicited IOCB. If not, it will create a new one with
6245 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
6246 * IOCB is then used to invoke the proper routine and to set up proper state
6247 * of the discovery state machine.
6250 lpfc_els_unsol_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
6251 struct lpfc_vport
*vport
, struct lpfc_iocbq
*elsiocb
)
6253 struct Scsi_Host
*shost
;
6254 struct lpfc_nodelist
*ndlp
;
6257 uint32_t cmd
, did
, newnode
, rjt_err
= 0;
6258 IOCB_t
*icmd
= &elsiocb
->iocb
;
6260 if (!vport
|| !(elsiocb
->context2
))
6264 payload
= ((struct lpfc_dmabuf
*)elsiocb
->context2
)->virt
;
6266 if ((phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) == 0)
6267 lpfc_post_buffer(phba
, pring
, 1);
6269 did
= icmd
->un
.rcvels
.remoteID
;
6270 if (icmd
->ulpStatus
) {
6271 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6272 "RCV Unsol ELS: status:x%x/x%x did:x%x",
6273 icmd
->ulpStatus
, icmd
->un
.ulpWord
[4], did
);
6277 /* Check to see if link went down during discovery */
6278 if (lpfc_els_chk_latt(vport
))
6281 /* Ignore traffic received during vport shutdown. */
6282 if (vport
->load_flag
& FC_UNLOADING
)
6285 /* If NPort discovery is delayed drop incoming ELS */
6286 if ((vport
->fc_flag
& FC_DISC_DELAYED
) &&
6287 (cmd
!= ELS_CMD_PLOGI
))
6290 ndlp
= lpfc_findnode_did(vport
, did
);
6292 /* Cannot find existing Fabric ndlp, so allocate a new one */
6293 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
6297 lpfc_nlp_init(vport
, ndlp
, did
);
6298 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
6300 if ((did
& Fabric_DID_MASK
) == Fabric_DID_MASK
)
6301 ndlp
->nlp_type
|= NLP_FABRIC
;
6302 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
6303 ndlp
= lpfc_enable_node(vport
, ndlp
,
6304 NLP_STE_UNUSED_NODE
);
6307 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
6309 if ((did
& Fabric_DID_MASK
) == Fabric_DID_MASK
)
6310 ndlp
->nlp_type
|= NLP_FABRIC
;
6311 } else if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
6312 /* This is similar to the new node path */
6313 ndlp
= lpfc_nlp_get(ndlp
);
6316 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
6320 phba
->fc_stat
.elsRcvFrame
++;
6322 elsiocb
->context1
= lpfc_nlp_get(ndlp
);
6323 elsiocb
->vport
= vport
;
6325 if ((cmd
& ELS_CMD_MASK
) == ELS_CMD_RSCN
) {
6326 cmd
&= ELS_CMD_MASK
;
6328 /* ELS command <elsCmd> received from NPORT <did> */
6329 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
6330 "0112 ELS command x%x received from NPORT x%x "
6331 "Data: x%x\n", cmd
, did
, vport
->port_state
);
6334 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6335 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
6336 did
, vport
->port_state
, ndlp
->nlp_flag
);
6338 phba
->fc_stat
.elsRcvPLOGI
++;
6339 ndlp
= lpfc_plogi_confirm_nport(phba
, payload
, ndlp
);
6341 lpfc_send_els_event(vport
, ndlp
, payload
);
6343 /* If Nport discovery is delayed, reject PLOGIs */
6344 if (vport
->fc_flag
& FC_DISC_DELAYED
) {
6345 rjt_err
= LSRJT_UNABLE_TPC
;
6348 if (vport
->port_state
< LPFC_DISC_AUTH
) {
6349 if (!(phba
->pport
->fc_flag
& FC_PT2PT
) ||
6350 (phba
->pport
->fc_flag
& FC_PT2PT_PLOGI
)) {
6351 rjt_err
= LSRJT_UNABLE_TPC
;
6354 /* We get here, and drop thru, if we are PT2PT with
6355 * another NPort and the other side has initiated
6356 * the PLOGI before responding to our FLOGI.
6360 shost
= lpfc_shost_from_vport(vport
);
6361 spin_lock_irq(shost
->host_lock
);
6362 ndlp
->nlp_flag
&= ~NLP_TARGET_REMOVE
;
6363 spin_unlock_irq(shost
->host_lock
);
6365 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
,
6370 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6371 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
6372 did
, vport
->port_state
, ndlp
->nlp_flag
);
6374 phba
->fc_stat
.elsRcvFLOGI
++;
6375 lpfc_els_rcv_flogi(vport
, elsiocb
, ndlp
);
6380 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6381 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
6382 did
, vport
->port_state
, ndlp
->nlp_flag
);
6384 phba
->fc_stat
.elsRcvLOGO
++;
6385 lpfc_send_els_event(vport
, ndlp
, payload
);
6386 if (vport
->port_state
< LPFC_DISC_AUTH
) {
6387 rjt_err
= LSRJT_UNABLE_TPC
;
6390 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
, NLP_EVT_RCV_LOGO
);
6393 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6394 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
6395 did
, vport
->port_state
, ndlp
->nlp_flag
);
6397 phba
->fc_stat
.elsRcvPRLO
++;
6398 lpfc_send_els_event(vport
, ndlp
, payload
);
6399 if (vport
->port_state
< LPFC_DISC_AUTH
) {
6400 rjt_err
= LSRJT_UNABLE_TPC
;
6403 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
, NLP_EVT_RCV_PRLO
);
6406 phba
->fc_stat
.elsRcvRSCN
++;
6407 lpfc_els_rcv_rscn(vport
, elsiocb
, ndlp
);
6412 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6413 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
6414 did
, vport
->port_state
, ndlp
->nlp_flag
);
6416 lpfc_send_els_event(vport
, ndlp
, payload
);
6417 phba
->fc_stat
.elsRcvADISC
++;
6418 if (vport
->port_state
< LPFC_DISC_AUTH
) {
6419 rjt_err
= LSRJT_UNABLE_TPC
;
6422 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
,
6426 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6427 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
6428 did
, vport
->port_state
, ndlp
->nlp_flag
);
6430 phba
->fc_stat
.elsRcvPDISC
++;
6431 if (vport
->port_state
< LPFC_DISC_AUTH
) {
6432 rjt_err
= LSRJT_UNABLE_TPC
;
6435 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
,
6439 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6440 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
6441 did
, vport
->port_state
, ndlp
->nlp_flag
);
6443 phba
->fc_stat
.elsRcvFARPR
++;
6444 lpfc_els_rcv_farpr(vport
, elsiocb
, ndlp
);
6447 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6448 "RCV FARP: did:x%x/ste:x%x flg:x%x",
6449 did
, vport
->port_state
, ndlp
->nlp_flag
);
6451 phba
->fc_stat
.elsRcvFARP
++;
6452 lpfc_els_rcv_farp(vport
, elsiocb
, ndlp
);
6455 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6456 "RCV FAN: did:x%x/ste:x%x flg:x%x",
6457 did
, vport
->port_state
, ndlp
->nlp_flag
);
6459 phba
->fc_stat
.elsRcvFAN
++;
6460 lpfc_els_rcv_fan(vport
, elsiocb
, ndlp
);
6463 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6464 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
6465 did
, vport
->port_state
, ndlp
->nlp_flag
);
6467 phba
->fc_stat
.elsRcvPRLI
++;
6468 if (vport
->port_state
< LPFC_DISC_AUTH
) {
6469 rjt_err
= LSRJT_UNABLE_TPC
;
6472 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
, NLP_EVT_RCV_PRLI
);
6475 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6476 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
6477 did
, vport
->port_state
, ndlp
->nlp_flag
);
6479 phba
->fc_stat
.elsRcvLIRR
++;
6480 lpfc_els_rcv_lirr(vport
, elsiocb
, ndlp
);
6485 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6486 "RCV RLS: did:x%x/ste:x%x flg:x%x",
6487 did
, vport
->port_state
, ndlp
->nlp_flag
);
6489 phba
->fc_stat
.elsRcvRLS
++;
6490 lpfc_els_rcv_rls(vport
, elsiocb
, ndlp
);
6495 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6496 "RCV RPS: did:x%x/ste:x%x flg:x%x",
6497 did
, vport
->port_state
, ndlp
->nlp_flag
);
6499 phba
->fc_stat
.elsRcvRPS
++;
6500 lpfc_els_rcv_rps(vport
, elsiocb
, ndlp
);
6505 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6506 "RCV RPL: did:x%x/ste:x%x flg:x%x",
6507 did
, vport
->port_state
, ndlp
->nlp_flag
);
6509 phba
->fc_stat
.elsRcvRPL
++;
6510 lpfc_els_rcv_rpl(vport
, elsiocb
, ndlp
);
6515 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6516 "RCV RNID: did:x%x/ste:x%x flg:x%x",
6517 did
, vport
->port_state
, ndlp
->nlp_flag
);
6519 phba
->fc_stat
.elsRcvRNID
++;
6520 lpfc_els_rcv_rnid(vport
, elsiocb
, ndlp
);
6525 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6526 "RCV RTV: did:x%x/ste:x%x flg:x%x",
6527 did
, vport
->port_state
, ndlp
->nlp_flag
);
6528 phba
->fc_stat
.elsRcvRTV
++;
6529 lpfc_els_rcv_rtv(vport
, elsiocb
, ndlp
);
6534 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6535 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
6536 did
, vport
->port_state
, ndlp
->nlp_flag
);
6538 phba
->fc_stat
.elsRcvRRQ
++;
6539 lpfc_els_rcv_rrq(vport
, elsiocb
, ndlp
);
6544 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6545 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
6546 did
, vport
->port_state
, ndlp
->nlp_flag
);
6548 phba
->fc_stat
.elsRcvECHO
++;
6549 lpfc_els_rcv_echo(vport
, elsiocb
, ndlp
);
6554 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6555 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
6556 cmd
, did
, vport
->port_state
);
6558 /* Unsupported ELS command, reject */
6559 rjt_err
= LSRJT_CMD_UNSUPPORTED
;
6561 /* Unknown ELS command <elsCmd> received from NPORT <did> */
6562 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6563 "0115 Unknown ELS command x%x "
6564 "received from NPORT x%x\n", cmd
, did
);
6570 /* check if need to LS_RJT received ELS cmd */
6572 memset(&stat
, 0, sizeof(stat
));
6573 stat
.un
.b
.lsRjtRsnCode
= rjt_err
;
6574 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
6575 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, elsiocb
, ndlp
,
6579 lpfc_nlp_put(elsiocb
->context1
);
6580 elsiocb
->context1
= NULL
;
6584 if (vport
&& !(vport
->load_flag
& FC_UNLOADING
))
6585 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6586 "0111 Dropping received ELS cmd "
6587 "Data: x%x x%x x%x\n",
6588 icmd
->ulpStatus
, icmd
->un
.ulpWord
[4], icmd
->ulpTimeout
);
6589 phba
->fc_stat
.elsRcvDrop
++;
6593 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6594 * @phba: pointer to lpfc hba data structure.
6595 * @vpi: host virtual N_Port identifier.
6597 * This routine finds a vport on a HBA (referred by @phba) through a
6598 * @vpi. The function walks the HBA's vport list and returns the address
6599 * of the vport with the matching @vpi.
6602 * NULL - No vport with the matching @vpi found
6603 * Otherwise - Address to the vport with the matching @vpi.
6606 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
6608 struct lpfc_vport
*vport
;
6609 unsigned long flags
;
6612 /* The physical ports are always vpi 0 - translate is unnecessary. */
6615 * Translate the physical vpi to the logical vpi. The
6616 * vport stores the logical vpi.
6618 for (i
= 0; i
< phba
->max_vpi
; i
++) {
6619 if (vpi
== phba
->vpi_ids
[i
])
6623 if (i
>= phba
->max_vpi
) {
6624 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
6625 "2936 Could not find Vport mapped "
6626 "to vpi %d\n", vpi
);
6631 spin_lock_irqsave(&phba
->hbalock
, flags
);
6632 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
6633 if (vport
->vpi
== i
) {
6634 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
6638 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
6643 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
6644 * @phba: pointer to lpfc hba data structure.
6645 * @pring: pointer to a SLI ring.
6646 * @elsiocb: pointer to lpfc els iocb data structure.
6648 * This routine is used to process an unsolicited event received from a SLI
6649 * (Service Level Interface) ring. The actual processing of the data buffer
6650 * associated with the unsolicited event is done by invoking the routine
6651 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
6652 * SLI ring on which the unsolicited event was received.
6655 lpfc_els_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
6656 struct lpfc_iocbq
*elsiocb
)
6658 struct lpfc_vport
*vport
= phba
->pport
;
6659 IOCB_t
*icmd
= &elsiocb
->iocb
;
6661 struct lpfc_dmabuf
*bdeBuf1
= elsiocb
->context2
;
6662 struct lpfc_dmabuf
*bdeBuf2
= elsiocb
->context3
;
6664 elsiocb
->context1
= NULL
;
6665 elsiocb
->context2
= NULL
;
6666 elsiocb
->context3
= NULL
;
6668 if (icmd
->ulpStatus
== IOSTAT_NEED_BUFFER
) {
6669 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
6670 } else if (icmd
->ulpStatus
== IOSTAT_LOCAL_REJECT
&&
6671 (icmd
->un
.ulpWord
[4] & 0xff) == IOERR_RCV_BUFFER_WAITING
) {
6672 phba
->fc_stat
.NoRcvBuf
++;
6673 /* Not enough posted buffers; Try posting more buffers */
6674 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
6675 lpfc_post_buffer(phba
, pring
, 0);
6679 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
6680 (icmd
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
||
6681 icmd
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
6682 if (icmd
->unsli3
.rcvsli3
.vpi
== 0xffff)
6683 vport
= phba
->pport
;
6685 vport
= lpfc_find_vport_by_vpid(phba
,
6686 icmd
->unsli3
.rcvsli3
.vpi
);
6689 /* If there are no BDEs associated
6690 * with this IOCB, there is nothing to do.
6692 if (icmd
->ulpBdeCount
== 0)
6695 /* type of ELS cmd is first 32bit word
6698 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
6699 elsiocb
->context2
= bdeBuf1
;
6701 paddr
= getPaddr(icmd
->un
.cont64
[0].addrHigh
,
6702 icmd
->un
.cont64
[0].addrLow
);
6703 elsiocb
->context2
= lpfc_sli_ringpostbuf_get(phba
, pring
,
6707 lpfc_els_unsol_buffer(phba
, pring
, vport
, elsiocb
);
6709 * The different unsolicited event handlers would tell us
6710 * if they are done with "mp" by setting context2 to NULL.
6712 if (elsiocb
->context2
) {
6713 lpfc_in_buf_free(phba
, (struct lpfc_dmabuf
*)elsiocb
->context2
);
6714 elsiocb
->context2
= NULL
;
6717 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
6718 if ((phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) &&
6719 icmd
->ulpBdeCount
== 2) {
6720 elsiocb
->context2
= bdeBuf2
;
6721 lpfc_els_unsol_buffer(phba
, pring
, vport
, elsiocb
);
6722 /* free mp if we are done with it */
6723 if (elsiocb
->context2
) {
6724 lpfc_in_buf_free(phba
, elsiocb
->context2
);
6725 elsiocb
->context2
= NULL
;
6731 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
6732 * @phba: pointer to lpfc hba data structure.
6733 * @vport: pointer to a virtual N_Port data structure.
6735 * This routine issues a Port Login (PLOGI) to the Name Server with
6736 * State Change Request (SCR) for a @vport. This routine will create an
6737 * ndlp for the Name Server associated to the @vport if such node does
6738 * not already exist. The PLOGI to Name Server is issued by invoking the
6739 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
6740 * (FDMI) is configured to the @vport, a FDMI node will be created and
6741 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
6744 lpfc_do_scr_ns_plogi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
6746 struct lpfc_nodelist
*ndlp
, *ndlp_fdmi
;
6747 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6750 * If lpfc_delay_discovery parameter is set and the clean address
6751 * bit is cleared and fc fabric parameters chenged, delay FC NPort
6754 spin_lock_irq(shost
->host_lock
);
6755 if (vport
->fc_flag
& FC_DISC_DELAYED
) {
6756 spin_unlock_irq(shost
->host_lock
);
6757 mod_timer(&vport
->delayed_disc_tmo
,
6758 jiffies
+ HZ
* phba
->fc_ratov
);
6761 spin_unlock_irq(shost
->host_lock
);
6763 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
6765 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
6767 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
6768 lpfc_disc_start(vport
);
6771 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
6772 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6773 "0251 NameServer login: no memory\n");
6776 lpfc_nlp_init(vport
, ndlp
, NameServer_DID
);
6777 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
6778 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
6780 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
6781 lpfc_disc_start(vport
);
6784 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
6785 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6786 "0348 NameServer login: node freed\n");
6790 ndlp
->nlp_type
|= NLP_FABRIC
;
6792 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
6794 if (lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0)) {
6795 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
6796 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6797 "0252 Cannot issue NameServer login\n");
6801 if (vport
->cfg_fdmi_on
) {
6802 /* If this is the first time, allocate an ndlp and initialize
6803 * it. Otherwise, make sure the node is enabled and then do the
6806 ndlp_fdmi
= lpfc_findnode_did(vport
, FDMI_DID
);
6808 ndlp_fdmi
= mempool_alloc(phba
->nlp_mem_pool
,
6811 lpfc_nlp_init(vport
, ndlp_fdmi
, FDMI_DID
);
6812 ndlp_fdmi
->nlp_type
|= NLP_FABRIC
;
6816 if (!NLP_CHK_NODE_ACT(ndlp_fdmi
))
6817 ndlp_fdmi
= lpfc_enable_node(vport
,
6822 lpfc_nlp_set_state(vport
, ndlp_fdmi
,
6823 NLP_STE_PLOGI_ISSUE
);
6824 lpfc_issue_els_plogi(vport
, ndlp_fdmi
->nlp_DID
, 0);
6830 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
6831 * @phba: pointer to lpfc hba data structure.
6832 * @pmb: pointer to the driver internal queue element for mailbox command.
6834 * This routine is the completion callback function to register new vport
6835 * mailbox command. If the new vport mailbox command completes successfully,
6836 * the fabric registration login shall be performed on physical port (the
6837 * new vport created is actually a physical port, with VPI 0) or the port
6838 * login to Name Server for State Change Request (SCR) will be performed
6839 * on virtual port (real virtual port, with VPI greater than 0).
6842 lpfc_cmpl_reg_new_vport(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
6844 struct lpfc_vport
*vport
= pmb
->vport
;
6845 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6846 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
6847 MAILBOX_t
*mb
= &pmb
->u
.mb
;
6850 spin_lock_irq(shost
->host_lock
);
6851 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
6852 spin_unlock_irq(shost
->host_lock
);
6854 if (mb
->mbxStatus
) {
6855 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
6856 "0915 Register VPI failed : Status: x%x"
6857 " upd bit: x%x \n", mb
->mbxStatus
,
6858 mb
->un
.varRegVpi
.upd
);
6859 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
6860 mb
->un
.varRegVpi
.upd
)
6861 goto mbox_err_exit
;
6863 switch (mb
->mbxStatus
) {
6864 case 0x11: /* unsupported feature */
6865 case 0x9603: /* max_vpi exceeded */
6866 case 0x9602: /* Link event since CLEAR_LA */
6867 /* giving up on vport registration */
6868 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
6869 spin_lock_irq(shost
->host_lock
);
6870 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
6871 spin_unlock_irq(shost
->host_lock
);
6872 lpfc_can_disctmo(vport
);
6874 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6876 spin_lock_irq(shost
->host_lock
);
6877 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
6878 spin_unlock_irq(shost
->host_lock
);
6879 lpfc_init_vpi(phba
, pmb
, vport
->vpi
);
6881 pmb
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
6882 rc
= lpfc_sli_issue_mbox(phba
, pmb
,
6884 if (rc
== MBX_NOT_FINISHED
) {
6885 lpfc_printf_vlog(vport
,
6887 "2732 Failed to issue INIT_VPI"
6888 " mailbox command\n");
6895 /* Try to recover from this error */
6896 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6897 lpfc_sli4_unreg_all_rpis(vport
);
6898 lpfc_mbx_unreg_vpi(vport
);
6899 spin_lock_irq(shost
->host_lock
);
6900 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
6901 spin_unlock_irq(shost
->host_lock
);
6902 if (vport
->port_type
== LPFC_PHYSICAL_PORT
6903 && !(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
6904 lpfc_issue_init_vfi(vport
);
6906 lpfc_initial_fdisc(vport
);
6910 spin_lock_irq(shost
->host_lock
);
6911 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
6912 spin_unlock_irq(shost
->host_lock
);
6913 if (vport
== phba
->pport
) {
6914 if (phba
->sli_rev
< LPFC_SLI_REV4
)
6915 lpfc_issue_fabric_reglogin(vport
);
6918 * If the physical port is instantiated using
6919 * FDISC, do not start vport discovery.
6921 if (vport
->port_state
!= LPFC_FDISC
)
6922 lpfc_start_fdiscs(phba
);
6923 lpfc_do_scr_ns_plogi(phba
, vport
);
6926 lpfc_do_scr_ns_plogi(phba
, vport
);
6929 /* Now, we decrement the ndlp reference count held for this
6934 mempool_free(pmb
, phba
->mbox_mem_pool
);
6939 * lpfc_register_new_vport - Register a new vport with a HBA
6940 * @phba: pointer to lpfc hba data structure.
6941 * @vport: pointer to a host virtual N_Port data structure.
6942 * @ndlp: pointer to a node-list data structure.
6944 * This routine registers the @vport as a new virtual port with a HBA.
6945 * It is done through a registering vpi mailbox command.
6948 lpfc_register_new_vport(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
,
6949 struct lpfc_nodelist
*ndlp
)
6951 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6954 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6956 lpfc_reg_vpi(vport
, mbox
);
6957 mbox
->vport
= vport
;
6958 mbox
->context2
= lpfc_nlp_get(ndlp
);
6959 mbox
->mbox_cmpl
= lpfc_cmpl_reg_new_vport
;
6960 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
6961 == MBX_NOT_FINISHED
) {
6962 /* mailbox command not success, decrement ndlp
6963 * reference count for this command
6966 mempool_free(mbox
, phba
->mbox_mem_pool
);
6968 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
6969 "0253 Register VPI: Can't send mbox\n");
6973 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
6974 "0254 Register VPI: no memory\n");
6980 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
6981 spin_lock_irq(shost
->host_lock
);
6982 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
6983 spin_unlock_irq(shost
->host_lock
);
6988 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6989 * @phba: pointer to lpfc hba data structure.
6991 * This routine cancels the retry delay timers to all the vports.
6994 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba
*phba
)
6996 struct lpfc_vport
**vports
;
6997 struct lpfc_nodelist
*ndlp
;
6998 uint32_t link_state
;
7001 /* Treat this failure as linkdown for all vports */
7002 link_state
= phba
->link_state
;
7003 lpfc_linkdown(phba
);
7004 phba
->link_state
= link_state
;
7006 vports
= lpfc_create_vport_work_array(phba
);
7009 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
7010 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
7012 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
7013 lpfc_els_flush_cmd(vports
[i
]);
7015 lpfc_destroy_vport_work_array(phba
, vports
);
7020 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
7021 * @phba: pointer to lpfc hba data structure.
7023 * This routine abort all pending discovery commands and
7024 * start a timer to retry FLOGI for the physical port
7028 lpfc_retry_pport_discovery(struct lpfc_hba
*phba
)
7030 struct lpfc_nodelist
*ndlp
;
7031 struct Scsi_Host
*shost
;
7033 /* Cancel the all vports retry delay retry timers */
7034 lpfc_cancel_all_vport_retry_delay_timer(phba
);
7036 /* If fabric require FLOGI, then re-instantiate physical login */
7037 ndlp
= lpfc_findnode_did(phba
->pport
, Fabric_DID
);
7041 shost
= lpfc_shost_from_vport(phba
->pport
);
7042 mod_timer(&ndlp
->nlp_delayfunc
, jiffies
+ HZ
);
7043 spin_lock_irq(shost
->host_lock
);
7044 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
7045 spin_unlock_irq(shost
->host_lock
);
7046 ndlp
->nlp_last_elscmd
= ELS_CMD_FLOGI
;
7047 phba
->pport
->port_state
= LPFC_FLOGI
;
7052 * lpfc_fabric_login_reqd - Check if FLOGI required.
7053 * @phba: pointer to lpfc hba data structure.
7054 * @cmdiocb: pointer to FDISC command iocb.
7055 * @rspiocb: pointer to FDISC response iocb.
7057 * This routine checks if a FLOGI is reguired for FDISC
7061 lpfc_fabric_login_reqd(struct lpfc_hba
*phba
,
7062 struct lpfc_iocbq
*cmdiocb
,
7063 struct lpfc_iocbq
*rspiocb
)
7066 if ((rspiocb
->iocb
.ulpStatus
!= IOSTAT_FABRIC_RJT
) ||
7067 (rspiocb
->iocb
.un
.ulpWord
[4] != RJT_LOGIN_REQUIRED
))
7074 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
7075 * @phba: pointer to lpfc hba data structure.
7076 * @cmdiocb: pointer to lpfc command iocb data structure.
7077 * @rspiocb: pointer to lpfc response iocb data structure.
7079 * This routine is the completion callback function to a Fabric Discover
7080 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
7081 * single threaded, each FDISC completion callback function will reset
7082 * the discovery timer for all vports such that the timers will not get
7083 * unnecessary timeout. The function checks the FDISC IOCB status. If error
7084 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
7085 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
7086 * assigned to the vport has been changed with the completion of the FDISC
7087 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
7088 * are unregistered from the HBA, and then the lpfc_register_new_vport()
7089 * routine is invoked to register new vport with the HBA. Otherwise, the
7090 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
7091 * Server for State Change Request (SCR).
7094 lpfc_cmpl_els_fdisc(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7095 struct lpfc_iocbq
*rspiocb
)
7097 struct lpfc_vport
*vport
= cmdiocb
->vport
;
7098 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
7099 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
7100 struct lpfc_nodelist
*np
;
7101 struct lpfc_nodelist
*next_np
;
7102 IOCB_t
*irsp
= &rspiocb
->iocb
;
7103 struct lpfc_iocbq
*piocb
;
7104 struct lpfc_dmabuf
*pcmd
= cmdiocb
->context2
, *prsp
;
7105 struct serv_parm
*sp
;
7106 uint8_t fabric_param_changed
;
7108 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7109 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
7110 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
7112 /* Since all FDISCs are being single threaded, we
7113 * must reset the discovery timer for ALL vports
7114 * waiting to send FDISC when one completes.
7116 list_for_each_entry(piocb
, &phba
->fabric_iocb_list
, list
) {
7117 lpfc_set_disctmo(piocb
->vport
);
7120 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
7121 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
7122 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4], vport
->fc_prevDID
);
7124 if (irsp
->ulpStatus
) {
7126 if (lpfc_fabric_login_reqd(phba
, cmdiocb
, rspiocb
)) {
7127 lpfc_retry_pport_discovery(phba
);
7131 /* Check for retry */
7132 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
))
7135 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
7136 "0126 FDISC failed. (%d/%d)\n",
7137 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
7140 spin_lock_irq(shost
->host_lock
);
7141 vport
->fc_flag
&= ~FC_VPORT_CVL_RCVD
;
7142 vport
->fc_flag
&= ~FC_VPORT_LOGO_RCVD
;
7143 vport
->fc_flag
|= FC_FABRIC
;
7144 if (vport
->phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
)
7145 vport
->fc_flag
|= FC_PUBLIC_LOOP
;
7146 spin_unlock_irq(shost
->host_lock
);
7148 vport
->fc_myDID
= irsp
->un
.ulpWord
[4] & Mask_DID
;
7149 lpfc_vport_set_state(vport
, FC_VPORT_ACTIVE
);
7150 prsp
= list_get_first(&pcmd
->list
, struct lpfc_dmabuf
, list
);
7151 sp
= prsp
->virt
+ sizeof(uint32_t);
7152 fabric_param_changed
= lpfc_check_clean_addr_bit(vport
, sp
);
7153 memcpy(&vport
->fabric_portname
, &sp
->portName
,
7154 sizeof(struct lpfc_name
));
7155 memcpy(&vport
->fabric_nodename
, &sp
->nodeName
,
7156 sizeof(struct lpfc_name
));
7157 if (fabric_param_changed
&&
7158 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
7159 /* If our NportID changed, we need to ensure all
7160 * remaining NPORTs get unreg_login'ed so we can
7163 list_for_each_entry_safe(np
, next_np
,
7164 &vport
->fc_nodes
, nlp_listp
) {
7165 if (!NLP_CHK_NODE_ACT(ndlp
) ||
7166 (np
->nlp_state
!= NLP_STE_NPR_NODE
) ||
7167 !(np
->nlp_flag
& NLP_NPR_ADISC
))
7169 spin_lock_irq(shost
->host_lock
);
7170 np
->nlp_flag
&= ~NLP_NPR_ADISC
;
7171 spin_unlock_irq(shost
->host_lock
);
7172 lpfc_unreg_rpi(vport
, np
);
7174 lpfc_cleanup_pending_mbox(vport
);
7176 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7177 lpfc_sli4_unreg_all_rpis(vport
);
7179 lpfc_mbx_unreg_vpi(vport
);
7180 spin_lock_irq(shost
->host_lock
);
7181 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
7182 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7183 vport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
7185 vport
->fc_flag
|= FC_LOGO_RCVD_DID_CHNG
;
7186 spin_unlock_irq(shost
->host_lock
);
7187 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
7188 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
7190 * Driver needs to re-reg VPI in order for f/w
7191 * to update the MAC address.
7193 lpfc_register_new_vport(phba
, vport
, ndlp
);
7197 if (vport
->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
)
7198 lpfc_issue_init_vpi(vport
);
7199 else if (vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)
7200 lpfc_register_new_vport(phba
, vport
, ndlp
);
7202 lpfc_do_scr_ns_plogi(phba
, vport
);
7205 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
7206 /* Cancel discovery timer */
7207 lpfc_can_disctmo(vport
);
7210 lpfc_els_free_iocb(phba
, cmdiocb
);
7214 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
7215 * @vport: pointer to a virtual N_Port data structure.
7216 * @ndlp: pointer to a node-list data structure.
7217 * @retry: number of retries to the command IOCB.
7219 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
7220 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
7221 * routine to issue the IOCB, which makes sure only one outstanding fabric
7222 * IOCB will be sent off HBA at any given time.
7224 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7225 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7226 * will be stored into the context1 field of the IOCB for the completion
7227 * callback function to the FDISC ELS command.
7230 * 0 - Successfully issued fdisc iocb command
7231 * 1 - Failed to issue fdisc iocb command
7234 lpfc_issue_els_fdisc(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
7237 struct lpfc_hba
*phba
= vport
->phba
;
7239 struct lpfc_iocbq
*elsiocb
;
7240 struct serv_parm
*sp
;
7243 int did
= ndlp
->nlp_DID
;
7246 vport
->port_state
= LPFC_FDISC
;
7247 cmdsize
= (sizeof(uint32_t) + sizeof(struct serv_parm
));
7248 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
, did
,
7251 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
7252 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
7253 "0255 Issue FDISC: no IOCB\n");
7257 icmd
= &elsiocb
->iocb
;
7258 icmd
->un
.elsreq64
.myID
= 0;
7259 icmd
->un
.elsreq64
.fl
= 1;
7261 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
7262 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
7263 LPFC_SLI_INTF_IF_TYPE_0
)) {
7264 /* FDISC needs to be 1 for WQE VPI */
7265 elsiocb
->iocb
.ulpCt_h
= (SLI4_CT_VPI
>> 1) & 1;
7266 elsiocb
->iocb
.ulpCt_l
= SLI4_CT_VPI
& 1 ;
7267 /* Set the ulpContext to the vpi */
7268 elsiocb
->iocb
.ulpContext
= phba
->vpi_ids
[vport
->vpi
];
7270 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7275 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
7276 *((uint32_t *) (pcmd
)) = ELS_CMD_FDISC
;
7277 pcmd
+= sizeof(uint32_t); /* CSP Word 1 */
7278 memcpy(pcmd
, &vport
->phba
->pport
->fc_sparam
, sizeof(struct serv_parm
));
7279 sp
= (struct serv_parm
*) pcmd
;
7280 /* Setup CSPs accordingly for Fabric */
7281 sp
->cmn
.e_d_tov
= 0;
7282 sp
->cmn
.w2
.r_a_tov
= 0;
7283 sp
->cls1
.classValid
= 0;
7284 sp
->cls2
.seqDelivery
= 1;
7285 sp
->cls3
.seqDelivery
= 1;
7287 pcmd
+= sizeof(uint32_t); /* CSP Word 2 */
7288 pcmd
+= sizeof(uint32_t); /* CSP Word 3 */
7289 pcmd
+= sizeof(uint32_t); /* CSP Word 4 */
7290 pcmd
+= sizeof(uint32_t); /* Port Name */
7291 memcpy(pcmd
, &vport
->fc_portname
, 8);
7292 pcmd
+= sizeof(uint32_t); /* Node Name */
7293 pcmd
+= sizeof(uint32_t); /* Node Name */
7294 memcpy(pcmd
, &vport
->fc_nodename
, 8);
7296 lpfc_set_disctmo(vport
);
7298 phba
->fc_stat
.elsXmitFDISC
++;
7299 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_fdisc
;
7301 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
7302 "Issue FDISC: did:x%x",
7305 rc
= lpfc_issue_fabric_iocb(phba
, elsiocb
);
7306 if (rc
== IOCB_ERROR
) {
7307 lpfc_els_free_iocb(phba
, elsiocb
);
7308 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
7309 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
7310 "0256 Issue FDISC: Cannot send IOCB\n");
7313 lpfc_vport_set_state(vport
, FC_VPORT_INITIALIZING
);
7318 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
7319 * @phba: pointer to lpfc hba data structure.
7320 * @cmdiocb: pointer to lpfc command iocb data structure.
7321 * @rspiocb: pointer to lpfc response iocb data structure.
7323 * This routine is the completion callback function to the issuing of a LOGO
7324 * ELS command off a vport. It frees the command IOCB and then decrement the
7325 * reference count held on ndlp for this completion function, indicating that
7326 * the reference to the ndlp is no long needed. Note that the
7327 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
7328 * callback function and an additional explicit ndlp reference decrementation
7329 * will trigger the actual release of the ndlp.
7332 lpfc_cmpl_els_npiv_logo(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7333 struct lpfc_iocbq
*rspiocb
)
7335 struct lpfc_vport
*vport
= cmdiocb
->vport
;
7337 struct lpfc_nodelist
*ndlp
;
7338 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
7340 ndlp
= (struct lpfc_nodelist
*)cmdiocb
->context1
;
7341 irsp
= &rspiocb
->iocb
;
7342 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
7343 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
7344 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4], irsp
->un
.rcvels
.remoteID
);
7346 lpfc_els_free_iocb(phba
, cmdiocb
);
7347 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
7349 /* Trigger the release of the ndlp after logo */
7352 /* NPIV LOGO completes to NPort <nlp_DID> */
7353 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7354 "2928 NPIV LOGO completes to NPort x%x "
7355 "Data: x%x x%x x%x x%x\n",
7356 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
7357 irsp
->ulpTimeout
, vport
->num_disc_nodes
);
7359 if (irsp
->ulpStatus
== IOSTAT_SUCCESS
) {
7360 spin_lock_irq(shost
->host_lock
);
7361 vport
->fc_flag
&= ~FC_FABRIC
;
7362 spin_unlock_irq(shost
->host_lock
);
7367 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
7368 * @vport: pointer to a virtual N_Port data structure.
7369 * @ndlp: pointer to a node-list data structure.
7371 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
7373 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7374 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7375 * will be stored into the context1 field of the IOCB for the completion
7376 * callback function to the LOGO ELS command.
7379 * 0 - Successfully issued logo off the @vport
7380 * 1 - Failed to issue logo off the @vport
7383 lpfc_issue_els_npiv_logo(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
7385 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
7386 struct lpfc_hba
*phba
= vport
->phba
;
7388 struct lpfc_iocbq
*elsiocb
;
7392 cmdsize
= 2 * sizeof(uint32_t) + sizeof(struct lpfc_name
);
7393 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
, ndlp
->nlp_DID
,
7398 icmd
= &elsiocb
->iocb
;
7399 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
7400 *((uint32_t *) (pcmd
)) = ELS_CMD_LOGO
;
7401 pcmd
+= sizeof(uint32_t);
7403 /* Fill in LOGO payload */
7404 *((uint32_t *) (pcmd
)) = be32_to_cpu(vport
->fc_myDID
);
7405 pcmd
+= sizeof(uint32_t);
7406 memcpy(pcmd
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
7408 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
7409 "Issue LOGO npiv did:x%x flg:x%x",
7410 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
7412 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_npiv_logo
;
7413 spin_lock_irq(shost
->host_lock
);
7414 ndlp
->nlp_flag
|= NLP_LOGO_SND
;
7415 spin_unlock_irq(shost
->host_lock
);
7416 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
7418 spin_lock_irq(shost
->host_lock
);
7419 ndlp
->nlp_flag
&= ~NLP_LOGO_SND
;
7420 spin_unlock_irq(shost
->host_lock
);
7421 lpfc_els_free_iocb(phba
, elsiocb
);
7428 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
7429 * @ptr: holder for the timer function associated data.
7431 * This routine is invoked by the fabric iocb block timer after
7432 * timeout. It posts the fabric iocb block timeout event by setting the
7433 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
7434 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
7435 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
7436 * posted event WORKER_FABRIC_BLOCK_TMO.
7439 lpfc_fabric_block_timeout(unsigned long ptr
)
7441 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
7442 unsigned long iflags
;
7443 uint32_t tmo_posted
;
7445 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
7446 tmo_posted
= phba
->pport
->work_port_events
& WORKER_FABRIC_BLOCK_TMO
;
7448 phba
->pport
->work_port_events
|= WORKER_FABRIC_BLOCK_TMO
;
7449 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
7452 lpfc_worker_wake_up(phba
);
7457 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
7458 * @phba: pointer to lpfc hba data structure.
7460 * This routine issues one fabric iocb from the driver internal list to
7461 * the HBA. It first checks whether it's ready to issue one fabric iocb to
7462 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
7463 * remove one pending fabric iocb from the driver internal list and invokes
7464 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
7467 lpfc_resume_fabric_iocbs(struct lpfc_hba
*phba
)
7469 struct lpfc_iocbq
*iocb
;
7470 unsigned long iflags
;
7476 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7477 /* Post any pending iocb to the SLI layer */
7478 if (atomic_read(&phba
->fabric_iocb_count
) == 0) {
7479 list_remove_head(&phba
->fabric_iocb_list
, iocb
, typeof(*iocb
),
7482 /* Increment fabric iocb count to hold the position */
7483 atomic_inc(&phba
->fabric_iocb_count
);
7485 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7487 iocb
->fabric_iocb_cmpl
= iocb
->iocb_cmpl
;
7488 iocb
->iocb_cmpl
= lpfc_cmpl_fabric_iocb
;
7489 iocb
->iocb_flag
|= LPFC_IO_FABRIC
;
7491 lpfc_debugfs_disc_trc(iocb
->vport
, LPFC_DISC_TRC_ELS_CMD
,
7492 "Fabric sched1: ste:x%x",
7493 iocb
->vport
->port_state
, 0, 0);
7495 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, iocb
, 0);
7497 if (ret
== IOCB_ERROR
) {
7498 iocb
->iocb_cmpl
= iocb
->fabric_iocb_cmpl
;
7499 iocb
->fabric_iocb_cmpl
= NULL
;
7500 iocb
->iocb_flag
&= ~LPFC_IO_FABRIC
;
7502 cmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
7503 cmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
7504 iocb
->iocb_cmpl(phba
, iocb
, iocb
);
7506 atomic_dec(&phba
->fabric_iocb_count
);
7515 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
7516 * @phba: pointer to lpfc hba data structure.
7518 * This routine unblocks the issuing fabric iocb command. The function
7519 * will clear the fabric iocb block bit and then invoke the routine
7520 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
7521 * from the driver internal fabric iocb list.
7524 lpfc_unblock_fabric_iocbs(struct lpfc_hba
*phba
)
7526 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
7528 lpfc_resume_fabric_iocbs(phba
);
7533 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
7534 * @phba: pointer to lpfc hba data structure.
7536 * This routine blocks the issuing fabric iocb for a specified amount of
7537 * time (currently 100 ms). This is done by set the fabric iocb block bit
7538 * and set up a timeout timer for 100ms. When the block bit is set, no more
7539 * fabric iocb will be issued out of the HBA.
7542 lpfc_block_fabric_iocbs(struct lpfc_hba
*phba
)
7546 blocked
= test_and_set_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
7547 /* Start a timer to unblock fabric iocbs after 100ms */
7549 mod_timer(&phba
->fabric_block_timer
, jiffies
+ HZ
/10 );
7555 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
7556 * @phba: pointer to lpfc hba data structure.
7557 * @cmdiocb: pointer to lpfc command iocb data structure.
7558 * @rspiocb: pointer to lpfc response iocb data structure.
7560 * This routine is the callback function that is put to the fabric iocb's
7561 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
7562 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
7563 * function first restores and invokes the original iocb's callback function
7564 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
7565 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
7568 lpfc_cmpl_fabric_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7569 struct lpfc_iocbq
*rspiocb
)
7573 if ((cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
) != LPFC_IO_FABRIC
)
7576 switch (rspiocb
->iocb
.ulpStatus
) {
7577 case IOSTAT_NPORT_RJT
:
7578 case IOSTAT_FABRIC_RJT
:
7579 if (rspiocb
->iocb
.un
.ulpWord
[4] & RJT_UNAVAIL_TEMP
) {
7580 lpfc_block_fabric_iocbs(phba
);
7584 case IOSTAT_NPORT_BSY
:
7585 case IOSTAT_FABRIC_BSY
:
7586 lpfc_block_fabric_iocbs(phba
);
7590 stat
.un
.lsRjtError
=
7591 be32_to_cpu(rspiocb
->iocb
.un
.ulpWord
[4]);
7592 if ((stat
.un
.b
.lsRjtRsnCode
== LSRJT_UNABLE_TPC
) ||
7593 (stat
.un
.b
.lsRjtRsnCode
== LSRJT_LOGICAL_BSY
))
7594 lpfc_block_fabric_iocbs(phba
);
7598 if (atomic_read(&phba
->fabric_iocb_count
) == 0)
7601 cmdiocb
->iocb_cmpl
= cmdiocb
->fabric_iocb_cmpl
;
7602 cmdiocb
->fabric_iocb_cmpl
= NULL
;
7603 cmdiocb
->iocb_flag
&= ~LPFC_IO_FABRIC
;
7604 cmdiocb
->iocb_cmpl(phba
, cmdiocb
, rspiocb
);
7606 atomic_dec(&phba
->fabric_iocb_count
);
7607 if (!test_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
)) {
7608 /* Post any pending iocbs to HBA */
7609 lpfc_resume_fabric_iocbs(phba
);
7614 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
7615 * @phba: pointer to lpfc hba data structure.
7616 * @iocb: pointer to lpfc command iocb data structure.
7618 * This routine is used as the top-level API for issuing a fabric iocb command
7619 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
7620 * function makes sure that only one fabric bound iocb will be outstanding at
7621 * any given time. As such, this function will first check to see whether there
7622 * is already an outstanding fabric iocb on the wire. If so, it will put the
7623 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
7624 * issued later. Otherwise, it will issue the iocb on the wire and update the
7625 * fabric iocb count it indicate that there is one fabric iocb on the wire.
7627 * Note, this implementation has a potential sending out fabric IOCBs out of
7628 * order. The problem is caused by the construction of the "ready" boolen does
7629 * not include the condition that the internal fabric IOCB list is empty. As
7630 * such, it is possible a fabric IOCB issued by this routine might be "jump"
7631 * ahead of the fabric IOCBs in the internal list.
7634 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
7635 * IOCB_ERROR - failed to issue fabric iocb
7638 lpfc_issue_fabric_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocb
)
7640 unsigned long iflags
;
7644 if (atomic_read(&phba
->fabric_iocb_count
) > 1)
7647 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7648 ready
= atomic_read(&phba
->fabric_iocb_count
) == 0 &&
7649 !test_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
7652 /* Increment fabric iocb count to hold the position */
7653 atomic_inc(&phba
->fabric_iocb_count
);
7654 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7656 iocb
->fabric_iocb_cmpl
= iocb
->iocb_cmpl
;
7657 iocb
->iocb_cmpl
= lpfc_cmpl_fabric_iocb
;
7658 iocb
->iocb_flag
|= LPFC_IO_FABRIC
;
7660 lpfc_debugfs_disc_trc(iocb
->vport
, LPFC_DISC_TRC_ELS_CMD
,
7661 "Fabric sched2: ste:x%x",
7662 iocb
->vport
->port_state
, 0, 0);
7664 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, iocb
, 0);
7666 if (ret
== IOCB_ERROR
) {
7667 iocb
->iocb_cmpl
= iocb
->fabric_iocb_cmpl
;
7668 iocb
->fabric_iocb_cmpl
= NULL
;
7669 iocb
->iocb_flag
&= ~LPFC_IO_FABRIC
;
7670 atomic_dec(&phba
->fabric_iocb_count
);
7673 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7674 list_add_tail(&iocb
->list
, &phba
->fabric_iocb_list
);
7675 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7682 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
7683 * @vport: pointer to a virtual N_Port data structure.
7685 * This routine aborts all the IOCBs associated with a @vport from the
7686 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7687 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7688 * list, removes each IOCB associated with the @vport off the list, set the
7689 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7690 * associated with the IOCB.
7692 static void lpfc_fabric_abort_vport(struct lpfc_vport
*vport
)
7694 LIST_HEAD(completions
);
7695 struct lpfc_hba
*phba
= vport
->phba
;
7696 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
7698 spin_lock_irq(&phba
->hbalock
);
7699 list_for_each_entry_safe(piocb
, tmp_iocb
, &phba
->fabric_iocb_list
,
7702 if (piocb
->vport
!= vport
)
7705 list_move_tail(&piocb
->list
, &completions
);
7707 spin_unlock_irq(&phba
->hbalock
);
7709 /* Cancel all the IOCBs from the completions list */
7710 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
7715 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
7716 * @ndlp: pointer to a node-list data structure.
7718 * This routine aborts all the IOCBs associated with an @ndlp from the
7719 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7720 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7721 * list, removes each IOCB associated with the @ndlp off the list, set the
7722 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7723 * associated with the IOCB.
7725 void lpfc_fabric_abort_nport(struct lpfc_nodelist
*ndlp
)
7727 LIST_HEAD(completions
);
7728 struct lpfc_hba
*phba
= ndlp
->phba
;
7729 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
7730 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
7732 spin_lock_irq(&phba
->hbalock
);
7733 list_for_each_entry_safe(piocb
, tmp_iocb
, &phba
->fabric_iocb_list
,
7735 if ((lpfc_check_sli_ndlp(phba
, pring
, piocb
, ndlp
))) {
7737 list_move_tail(&piocb
->list
, &completions
);
7740 spin_unlock_irq(&phba
->hbalock
);
7742 /* Cancel all the IOCBs from the completions list */
7743 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
7748 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
7749 * @phba: pointer to lpfc hba data structure.
7751 * This routine aborts all the IOCBs currently on the driver internal
7752 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
7753 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
7754 * list, removes IOCBs off the list, set the status feild to
7755 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
7758 void lpfc_fabric_abort_hba(struct lpfc_hba
*phba
)
7760 LIST_HEAD(completions
);
7762 spin_lock_irq(&phba
->hbalock
);
7763 list_splice_init(&phba
->fabric_iocb_list
, &completions
);
7764 spin_unlock_irq(&phba
->hbalock
);
7766 /* Cancel all the IOCBs from the completions list */
7767 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
7772 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
7773 * @vport: pointer to lpfc vport data structure.
7775 * This routine is invoked by the vport cleanup for deletions and the cleanup
7776 * for an ndlp on removal.
7779 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport
*vport
)
7781 struct lpfc_hba
*phba
= vport
->phba
;
7782 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
7783 unsigned long iflag
= 0;
7785 spin_lock_irqsave(&phba
->hbalock
, iflag
);
7786 spin_lock(&phba
->sli4_hba
.abts_sgl_list_lock
);
7787 list_for_each_entry_safe(sglq_entry
, sglq_next
,
7788 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
) {
7789 if (sglq_entry
->ndlp
&& sglq_entry
->ndlp
->vport
== vport
)
7790 sglq_entry
->ndlp
= NULL
;
7792 spin_unlock(&phba
->sli4_hba
.abts_sgl_list_lock
);
7793 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7798 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
7799 * @phba: pointer to lpfc hba data structure.
7800 * @axri: pointer to the els xri abort wcqe structure.
7802 * This routine is invoked by the worker thread to process a SLI4 slow-path
7806 lpfc_sli4_els_xri_aborted(struct lpfc_hba
*phba
,
7807 struct sli4_wcqe_xri_aborted
*axri
)
7809 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
7810 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
7813 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
7814 unsigned long iflag
= 0;
7815 struct lpfc_nodelist
*ndlp
;
7816 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
7818 spin_lock_irqsave(&phba
->hbalock
, iflag
);
7819 spin_lock(&phba
->sli4_hba
.abts_sgl_list_lock
);
7820 list_for_each_entry_safe(sglq_entry
, sglq_next
,
7821 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
) {
7822 if (sglq_entry
->sli4_xritag
== xri
) {
7823 list_del(&sglq_entry
->list
);
7824 ndlp
= sglq_entry
->ndlp
;
7825 sglq_entry
->ndlp
= NULL
;
7826 list_add_tail(&sglq_entry
->list
,
7827 &phba
->sli4_hba
.lpfc_sgl_list
);
7828 sglq_entry
->state
= SGL_FREED
;
7829 spin_unlock(&phba
->sli4_hba
.abts_sgl_list_lock
);
7830 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7831 lpfc_set_rrq_active(phba
, ndlp
, xri
, rxid
, 1);
7833 /* Check if TXQ queue needs to be serviced */
7835 lpfc_worker_wake_up(phba
);
7839 spin_unlock(&phba
->sli4_hba
.abts_sgl_list_lock
);
7840 lxri
= lpfc_sli4_xri_inrange(phba
, xri
);
7841 if (lxri
== NO_XRI
) {
7842 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7845 sglq_entry
= __lpfc_get_active_sglq(phba
, lxri
);
7846 if (!sglq_entry
|| (sglq_entry
->sli4_xritag
!= xri
)) {
7847 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7850 sglq_entry
->state
= SGL_XRI_ABORTED
;
7851 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);