1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/kthread.h>
29 #include <linux/interrupt.h>
30 #include <linux/lockdep.h>
31 #include <linux/utsname.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
42 #include "lpfc_disc.h"
44 #include "lpfc_sli4.h"
46 #include "lpfc_scsi.h"
47 #include "lpfc_nvme.h"
48 #include "lpfc_logmsg.h"
49 #include "lpfc_crtn.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_debugfs.h"
53 /* AlpaArray for assignment of scsid for scan-down and bind_method */
54 static uint8_t lpfcAlpaArray
[] = {
55 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
56 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
57 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
58 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
59 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
60 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
61 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
62 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
63 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
64 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
65 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
66 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
67 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
70 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
71 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
72 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
73 static int lpfc_fcf_inuse(struct lpfc_hba
*);
74 static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
75 static void lpfc_check_inactive_vmid(struct lpfc_hba
*phba
);
76 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba
*phba
);
79 lpfc_valid_xpt_node(struct lpfc_nodelist
*ndlp
)
81 if (ndlp
->nlp_fc4_type
||
82 ndlp
->nlp_type
& NLP_FABRIC
)
86 /* The source of a terminate rport I/O is either a dev_loss_tmo
87 * event or a call to fc_remove_host. While the rport should be
88 * valid during these downcalls, the transport can call twice
89 * in a single event. This routine provides somoe protection
90 * as the NDLP isn't really free, just released to the pool.
93 lpfc_rport_invalid(struct fc_rport
*rport
)
95 struct lpfc_rport_data
*rdata
;
96 struct lpfc_nodelist
*ndlp
;
99 pr_err("**** %s: NULL rport, exit.\n", __func__
);
103 if (rport
->flags
& FC_RPORT_DEVLOSS_CALLBK_DONE
) {
104 pr_info("**** %s: devloss_callbk_done rport x%px SID x%x\n",
105 __func__
, rport
, rport
->scsi_target_id
);
109 rdata
= rport
->dd_data
;
111 pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
112 __func__
, rport
, rport
->scsi_target_id
);
118 pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
119 __func__
, rport
, rport
->scsi_target_id
);
124 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
125 "SID x%x\n", __func__
, ndlp
, ndlp
->nlp_DID
, rport
,
126 rport
->scsi_target_id
);
133 lpfc_terminate_rport_io(struct fc_rport
*rport
)
135 struct lpfc_rport_data
*rdata
;
136 struct lpfc_nodelist
*ndlp
;
137 struct lpfc_vport
*vport
;
139 if (lpfc_rport_invalid(rport
))
142 rdata
= rport
->dd_data
;
145 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
146 "rport terminate: sid:x%x did:x%x flg:x%lx",
147 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
149 if (ndlp
->nlp_sid
!= NLP_NO_SID
)
150 lpfc_sli_abort_iocb(vport
, ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
154 * This function will be called when dev_loss_tmo fire.
157 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
159 struct lpfc_nodelist
*ndlp
;
160 struct lpfc_vport
*vport
;
161 struct lpfc_hba
*phba
;
162 struct lpfc_work_evt
*evtp
;
163 unsigned long iflags
;
164 bool nvme_reg
= false;
166 ndlp
= ((struct lpfc_rport_data
*)rport
->dd_data
)->pnode
;
173 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
174 "rport devlosscb: sid:x%x did:x%x flg:x%lx",
175 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
177 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
178 "3181 dev_loss_callbk x%06x, rport x%px flg x%lx "
179 "load_flag x%lx refcnt %u state %d xpt x%x\n",
180 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
,
181 vport
->load_flag
, kref_read(&ndlp
->kref
),
182 ndlp
->nlp_state
, ndlp
->fc4_xpt_flags
);
184 /* Don't schedule a worker thread event if the vport is going down. */
185 if (test_bit(FC_UNLOADING
, &vport
->load_flag
) ||
186 !test_bit(HBA_SETUP
, &phba
->hba_flag
)) {
188 spin_lock_irqsave(&ndlp
->lock
, iflags
);
191 if (ndlp
->fc4_xpt_flags
& NVME_XPT_REGD
)
194 /* The scsi_transport is done with the rport so lpfc cannot
195 * call to unregister.
197 if (ndlp
->fc4_xpt_flags
& SCSI_XPT_REGD
) {
198 ndlp
->fc4_xpt_flags
&= ~SCSI_XPT_REGD
;
200 /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
201 * unregister calls were made to the scsi and nvme
202 * transports and refcnt was already decremented. Clear
203 * the NLP_XPT_REGD flag only if the NVME Rport is
204 * confirmed unregistered.
206 if (!nvme_reg
&& ndlp
->fc4_xpt_flags
& NLP_XPT_REGD
) {
207 ndlp
->fc4_xpt_flags
&= ~NLP_XPT_REGD
;
208 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
209 lpfc_nlp_put(ndlp
); /* may free ndlp */
211 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
214 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
217 /* Only 1 thread can drop the initial node reference. If
218 * another thread has set NLP_DROPPED, this thread is done.
220 if (nvme_reg
|| test_bit(NLP_DROPPED
, &ndlp
->nlp_flag
))
223 set_bit(NLP_DROPPED
, &ndlp
->nlp_flag
);
228 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
231 /* check for recovered fabric node */
232 if (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
&&
233 ndlp
->nlp_DID
== Fabric_DID
)
236 if (rport
->port_name
!= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
))
237 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
238 "6789 rport name %llx != node port name %llx",
240 wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
));
242 evtp
= &ndlp
->dev_loss_evt
;
244 if (!list_empty(&evtp
->evt_listp
)) {
245 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
246 "6790 rport name %llx dev_loss_evt pending\n",
251 set_bit(NLP_IN_DEV_LOSS
, &ndlp
->nlp_flag
);
253 spin_lock_irqsave(&ndlp
->lock
, iflags
);
254 /* If there is a PLOGI in progress, and we are in a
255 * NLP_NPR_2B_DISC state, don't turn off the flag.
257 if (ndlp
->nlp_state
!= NLP_STE_PLOGI_ISSUE
)
258 clear_bit(NLP_NPR_2B_DISC
, &ndlp
->nlp_flag
);
261 * The backend does not expect any more calls associated with this
262 * rport. Remove the association between rport and ndlp.
264 ndlp
->fc4_xpt_flags
&= ~SCSI_XPT_REGD
;
265 ((struct lpfc_rport_data
*)rport
->dd_data
)->pnode
= NULL
;
267 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
269 if (phba
->worker_thread
) {
270 /* We need to hold the node by incrementing the reference
271 * count until this queued work is done
273 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
275 spin_lock_irqsave(&phba
->hbalock
, iflags
);
276 if (evtp
->evt_arg1
) {
277 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
278 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
279 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
280 lpfc_worker_wake_up(phba
);
283 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
285 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
286 "3188 worker thread is stopped %s x%06x, "
287 " rport x%px flg x%lx load_flag x%lx refcnt "
288 "%d\n", __func__
, ndlp
->nlp_DID
,
289 ndlp
->rport
, ndlp
->nlp_flag
,
290 vport
->load_flag
, kref_read(&ndlp
->kref
));
291 if (!(ndlp
->fc4_xpt_flags
& NVME_XPT_REGD
)) {
292 /* Node is in dev loss. No further transaction. */
293 clear_bit(NLP_IN_DEV_LOSS
, &ndlp
->nlp_flag
);
294 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
301 * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport
302 * @vport: Pointer to vport context object.
304 * This function checks for idle VMID entries related to a particular vport. If
305 * found unused/idle, free them accordingly.
307 static void lpfc_check_inactive_vmid_one(struct lpfc_vport
*vport
)
310 u32 difftime
= 0, r
, bucket
;
313 struct lpfc_vmid
*vmp
;
315 write_lock(&vport
->vmid_lock
);
317 if (!vport
->cur_vmid_cnt
)
320 /* iterate through the table */
321 hash_for_each(vport
->hash_table
, bucket
, vmp
, hnode
) {
323 if (vmp
->flag
& LPFC_VMID_REGISTERED
) {
324 /* check if the particular VMID is in use */
325 /* for all available per cpu variable */
326 for_each_possible_cpu(cpu
) {
327 /* if last access time is less than timeout */
328 lta
= per_cpu_ptr(vmp
->last_io_time
, cpu
);
331 difftime
= (jiffies
) - (*lta
);
332 if ((vport
->vmid_inactivity_timeout
*
333 JIFFIES_PER_HR
) > difftime
) {
339 /* if none of the cpus have been used by the vm, */
340 /* remove the entry if already registered */
342 /* mark the entry for deregistration */
343 vmp
->flag
= LPFC_VMID_DE_REGISTER
;
344 write_unlock(&vport
->vmid_lock
);
345 if (vport
->vmid_priority_tagging
)
346 r
= lpfc_vmid_uvem(vport
, vmp
, false);
348 r
= lpfc_vmid_cmd(vport
,
352 /* decrement number of active vms and mark */
353 /* entry in slot as free */
354 write_lock(&vport
->vmid_lock
);
356 struct lpfc_vmid
*ht
= vmp
;
358 vport
->cur_vmid_cnt
--;
359 ht
->flag
= LPFC_VMID_SLOT_FREE
;
360 free_percpu(ht
->last_io_time
);
361 ht
->last_io_time
= NULL
;
362 hash_del(&ht
->hnode
);
368 write_unlock(&vport
->vmid_lock
);
372 * lpfc_check_inactive_vmid - VMID inactivity checker
373 * @phba: Pointer to hba context object.
375 * This function is called from the worker thread to determine if an entry in
376 * the VMID table can be released since there was no I/O activity seen from that
377 * particular VM for the specified time. When this happens, the entry in the
378 * table is released and also the resources on the switch cleared.
381 static void lpfc_check_inactive_vmid(struct lpfc_hba
*phba
)
383 struct lpfc_vport
*vport
;
384 struct lpfc_vport
**vports
;
387 vports
= lpfc_create_vport_work_array(phba
);
391 for (i
= 0; i
<= phba
->max_vports
; i
++) {
392 if ((!vports
[i
]) && (i
== 0))
399 lpfc_check_inactive_vmid_one(vport
);
401 lpfc_destroy_vport_work_array(phba
, vports
);
405 * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss
406 * @vport: Pointer to vport object.
407 * @ndlp: Pointer to remote node object.
409 * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of
410 * node during dev_loss_tmo processing, then this function restores the nlp_put
411 * kref decrement from lpfc_dev_loss_tmo_handler.
414 lpfc_check_nlp_post_devloss(struct lpfc_vport
*vport
,
415 struct lpfc_nodelist
*ndlp
)
417 unsigned long iflags
;
419 spin_lock_irqsave(&ndlp
->lock
, iflags
);
420 if (ndlp
->save_flags
& NLP_IN_RECOV_POST_DEV_LOSS
) {
421 ndlp
->save_flags
&= ~NLP_IN_RECOV_POST_DEV_LOSS
;
422 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
424 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
| LOG_NODE
,
425 "8438 Devloss timeout reversed on DID x%x "
426 "refcnt %d ndlp %p flag x%lx "
427 "port_state = x%x\n",
428 ndlp
->nlp_DID
, kref_read(&ndlp
->kref
), ndlp
,
429 ndlp
->nlp_flag
, vport
->port_state
);
432 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
436 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
437 * @ndlp: Pointer to remote node object.
439 * This function is called from the worker thread when devloss timeout timer
440 * expires. For SLI4 host, this routine shall return 1 when at lease one
441 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
442 * routine shall return 0 when there is no remote node is still in use of FCF
443 * when devloss timeout happened to this @ndlp.
446 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
448 struct lpfc_vport
*vport
;
449 struct lpfc_hba
*phba
;
453 bool recovering
= false;
454 struct fc_vport
*fc_vport
= NULL
;
455 unsigned long iflags
;
458 name
= (uint8_t *)&ndlp
->nlp_portname
;
461 if (phba
->sli_rev
== LPFC_SLI_REV4
)
462 fcf_inuse
= lpfc_fcf_inuse(phba
);
464 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
465 "rport devlosstmo:did:x%x type:x%x id:x%x",
466 ndlp
->nlp_DID
, ndlp
->nlp_type
, ndlp
->nlp_sid
);
468 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
469 "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n",
470 __func__
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
471 ndlp
->fc4_xpt_flags
, kref_read(&ndlp
->kref
));
473 /* If the driver is recovering the rport, ignore devloss. */
474 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
475 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
476 "0284 Devloss timeout Ignored on "
477 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
479 *name
, *(name
+1), *(name
+2), *(name
+3),
480 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
483 clear_bit(NLP_IN_DEV_LOSS
, &ndlp
->nlp_flag
);
487 /* Fabric nodes are done. */
488 if (ndlp
->nlp_type
& NLP_FABRIC
) {
489 spin_lock_irqsave(&ndlp
->lock
, iflags
);
491 /* The driver has to account for a race between any fabric
492 * node that's in recovery when dev_loss_tmo expires. When this
493 * happens, the driver has to allow node recovery.
495 switch (ndlp
->nlp_DID
) {
497 fc_vport
= vport
->fc_vport
;
500 if (fc_vport
->vport_state
==
501 FC_VPORT_INITIALIZING
)
504 /* Physical port path. */
505 if (test_bit(HBA_FLOGI_OUTSTANDING
,
510 case Fabric_Cntl_DID
:
511 if (test_bit(NLP_REG_LOGIN_SEND
, &ndlp
->nlp_flag
))
517 if (ndlp
->nlp_state
>= NLP_STE_PLOGI_ISSUE
&&
518 ndlp
->nlp_state
<= NLP_STE_REG_LOGIN_ISSUE
)
522 /* Ensure the nlp_DID at least has the correct prefix.
523 * The fabric domain controller's last three nibbles
524 * vary so we handle it in the default case.
526 if (ndlp
->nlp_DID
& Fabric_DID_MASK
) {
527 if (ndlp
->nlp_state
>= NLP_STE_PLOGI_ISSUE
&&
528 ndlp
->nlp_state
<= NLP_STE_REG_LOGIN_ISSUE
)
533 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
535 /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing
536 * the following lpfc_nlp_put is necessary after fabric node is
539 clear_bit(NLP_IN_DEV_LOSS
, &ndlp
->nlp_flag
);
541 lpfc_printf_vlog(vport
, KERN_INFO
,
542 LOG_DISCOVERY
| LOG_NODE
,
543 "8436 Devloss timeout marked on "
544 "DID x%x refcnt %d ndlp %p "
545 "flag x%lx port_state = x%x\n",
546 ndlp
->nlp_DID
, kref_read(&ndlp
->kref
),
547 ndlp
, ndlp
->nlp_flag
,
549 spin_lock_irqsave(&ndlp
->lock
, iflags
);
550 ndlp
->save_flags
|= NLP_IN_RECOV_POST_DEV_LOSS
;
551 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
553 } else if (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) {
554 /* Fabric node fully recovered before this dev_loss_tmo
555 * queue work is processed. Thus, ignore the
556 * dev_loss_tmo event.
558 lpfc_printf_vlog(vport
, KERN_INFO
,
559 LOG_DISCOVERY
| LOG_NODE
,
560 "8437 Devloss timeout ignored on "
561 "DID x%x refcnt %d ndlp %p "
562 "flag x%lx port_state = x%x\n",
563 ndlp
->nlp_DID
, kref_read(&ndlp
->kref
),
564 ndlp
, ndlp
->nlp_flag
,
573 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
575 lpfc_sli_abort_iocb(vport
, ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
579 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
580 "0203 Devloss timeout on "
581 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
582 "NPort x%06x Data: x%lx x%x x%x refcnt %d\n",
583 *name
, *(name
+1), *(name
+2), *(name
+3),
584 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
585 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
586 ndlp
->nlp_state
, ndlp
->nlp_rpi
,
587 kref_read(&ndlp
->kref
));
589 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_TRACE_EVENT
,
590 "0204 Devloss timeout on "
591 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
592 "NPort x%06x Data: x%lx x%x x%x\n",
593 *name
, *(name
+1), *(name
+2), *(name
+3),
594 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
595 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
596 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
598 clear_bit(NLP_IN_DEV_LOSS
, &ndlp
->nlp_flag
);
600 /* If we are devloss, but we are in the process of rediscovering the
601 * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
603 if (ndlp
->nlp_state
>= NLP_STE_PLOGI_ISSUE
&&
604 ndlp
->nlp_state
<= NLP_STE_PRLI_ISSUE
) {
608 if (!(ndlp
->fc4_xpt_flags
& NVME_XPT_REGD
))
609 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
614 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba
*phba
)
616 struct lpfc_vport
*vport
;
617 struct lpfc_vport
**vports
;
620 vports
= lpfc_create_vport_work_array(phba
);
624 for (i
= 0; i
<= phba
->max_vports
; i
++) {
625 if ((!vports
[i
]) && (i
== 0))
632 if (vport
->vmid_flag
& LPFC_VMID_ISSUE_QFPA
) {
633 if (!lpfc_issue_els_qfpa(vport
))
634 vport
->vmid_flag
&= ~LPFC_VMID_ISSUE_QFPA
;
637 lpfc_destroy_vport_work_array(phba
, vports
);
641 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
642 * @phba: Pointer to hba context object.
643 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
644 * @nlp_did: remote node identifer with devloss timeout.
646 * This function is called from the worker thread after invoking devloss
647 * timeout handler and releasing the reference count for the ndlp with
648 * which the devloss timeout was handled for SLI4 host. For the devloss
649 * timeout of the last remote node which had been in use of FCF, when this
650 * routine is invoked, it shall be guaranteed that none of the remote are
651 * in-use of FCF. When devloss timeout to the last remote using the FCF,
652 * if the FIP engine is neither in FCF table scan process nor roundrobin
653 * failover process, the in-use FCF shall be unregistered. If the FIP
654 * engine is in FCF discovery process, the devloss timeout state shall
655 * be set for either the FCF table scan process or roundrobin failover
656 * process to unregister the in-use FCF.
659 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
662 /* If devloss timeout happened to a remote node when FCF had no
663 * longer been in-use, do nothing.
668 if (test_bit(HBA_FIP_SUPPORT
, &phba
->hba_flag
) &&
669 !lpfc_fcf_inuse(phba
)) {
670 spin_lock_irq(&phba
->hbalock
);
671 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
672 if (test_and_set_bit(HBA_DEVLOSS_TMO
,
674 spin_unlock_irq(&phba
->hbalock
);
677 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
678 "2847 Last remote node (x%x) using "
679 "FCF devloss tmo\n", nlp_did
);
681 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
682 spin_unlock_irq(&phba
->hbalock
);
683 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
684 "2868 Devloss tmo to FCF rediscovery "
688 spin_unlock_irq(&phba
->hbalock
);
689 if (!test_bit(FCF_TS_INPROG
, &phba
->hba_flag
) &&
690 !test_bit(FCF_RR_INPROG
, &phba
->hba_flag
)) {
691 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
692 "2869 Devloss tmo to idle FIP engine, "
693 "unreg in-use FCF and rescan.\n");
694 /* Unregister in-use FCF and rescan */
695 lpfc_unregister_fcf_rescan(phba
);
698 if (test_bit(FCF_TS_INPROG
, &phba
->hba_flag
))
699 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
700 "2870 FCF table scan in progress\n");
701 if (test_bit(FCF_RR_INPROG
, &phba
->hba_flag
))
702 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
703 "2871 FLOGI roundrobin FCF failover "
706 lpfc_unregister_unused_fcf(phba
);
710 * lpfc_alloc_fast_evt - Allocates data structure for posting event
711 * @phba: Pointer to hba context object.
713 * This function is called from the functions which need to post
714 * events from interrupt context. This function allocates data
715 * structure required for posting event. It also keeps track of
716 * number of events pending and prevent event storm when there are
719 struct lpfc_fast_path_event
*
720 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
721 struct lpfc_fast_path_event
*ret
;
723 /* If there are lot of fast event do not exhaust memory due to this */
724 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
727 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
730 atomic_inc(&phba
->fast_event_count
);
731 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
732 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
738 * lpfc_free_fast_evt - Frees event data structure
739 * @phba: Pointer to hba context object.
740 * @evt: Event object which need to be freed.
742 * This function frees the data structure required for posting
746 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
747 struct lpfc_fast_path_event
*evt
) {
749 atomic_dec(&phba
->fast_event_count
);
754 * lpfc_send_fastpath_evt - Posts events generated from fast path
755 * @phba: Pointer to hba context object.
756 * @evtp: Event data structure.
758 * This function is called from worker thread, when the interrupt
759 * context need to post an event. This function posts the event
760 * to fc transport netlink interface.
763 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
764 struct lpfc_work_evt
*evtp
)
766 unsigned long evt_category
, evt_sub_category
;
767 struct lpfc_fast_path_event
*fast_evt_data
;
769 uint32_t evt_data_size
;
770 struct Scsi_Host
*shost
;
772 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
775 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
776 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
777 fabric_evt
.subcategory
;
778 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
779 if (evt_category
== FC_REG_FABRIC_EVENT
) {
780 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
781 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
782 evt_data_size
= sizeof(fast_evt_data
->un
.
784 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
785 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
786 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
787 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
789 lpfc_free_fast_evt(phba
, fast_evt_data
);
792 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
793 switch (evt_sub_category
) {
794 case LPFC_EVENT_QFULL
:
795 case LPFC_EVENT_DEVBSY
:
796 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
797 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
799 case LPFC_EVENT_CHECK_COND
:
800 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
801 evt_data_size
= sizeof(fast_evt_data
->un
.
804 case LPFC_EVENT_VARQUEDEPTH
:
805 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
806 evt_data_size
= sizeof(fast_evt_data
->un
.
810 lpfc_free_fast_evt(phba
, fast_evt_data
);
814 lpfc_free_fast_evt(phba
, fast_evt_data
);
818 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
819 fc_host_post_vendor_event(shost
,
820 fc_get_event_number(),
825 lpfc_free_fast_evt(phba
, fast_evt_data
);
830 lpfc_work_list_done(struct lpfc_hba
*phba
)
832 struct lpfc_work_evt
*evtp
= NULL
;
833 struct lpfc_nodelist
*ndlp
;
839 spin_lock_irq(&phba
->hbalock
);
840 while (!list_empty(&phba
->work_list
)) {
841 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
843 spin_unlock_irq(&phba
->hbalock
);
844 hba_pci_err
= test_bit(HBA_PCI_ERR
, &phba
->bit_flags
);
847 case LPFC_EVT_ELS_RETRY
:
848 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
850 lpfc_els_retry_delay_handler(ndlp
);
851 free_evt
= 0; /* evt is part of ndlp */
853 /* decrement the node reference count held
854 * for this queued work
858 case LPFC_EVT_DEV_LOSS
:
859 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
860 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
862 /* decrement the node reference count held for
865 nlp_did
= ndlp
->nlp_DID
;
867 if (phba
->sli_rev
== LPFC_SLI_REV4
)
868 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
872 case LPFC_EVT_RECOVER_PORT
:
873 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
875 lpfc_sli_abts_recover_port(ndlp
->vport
, ndlp
);
878 /* decrement the node reference count held for
883 case LPFC_EVT_ONLINE
:
884 if (phba
->link_state
< LPFC_LINK_DOWN
)
885 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
887 *(int *) (evtp
->evt_arg1
) = 0;
888 complete((struct completion
*)(evtp
->evt_arg2
));
890 case LPFC_EVT_OFFLINE_PREP
:
891 if (phba
->link_state
>= LPFC_LINK_DOWN
)
892 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
893 *(int *)(evtp
->evt_arg1
) = 0;
894 complete((struct completion
*)(evtp
->evt_arg2
));
896 case LPFC_EVT_OFFLINE
:
898 lpfc_sli_brdrestart(phba
);
899 *(int *)(evtp
->evt_arg1
) =
900 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
901 lpfc_unblock_mgmt_io(phba
);
902 complete((struct completion
*)(evtp
->evt_arg2
));
904 case LPFC_EVT_WARM_START
:
906 lpfc_reset_barrier(phba
);
907 lpfc_sli_brdreset(phba
);
908 lpfc_hba_down_post(phba
);
909 *(int *)(evtp
->evt_arg1
) =
910 lpfc_sli_brdready(phba
, HS_MBRDY
);
911 lpfc_unblock_mgmt_io(phba
);
912 complete((struct completion
*)(evtp
->evt_arg2
));
916 *(int *)(evtp
->evt_arg1
)
917 = (phba
->pport
->stopped
)
918 ? 0 : lpfc_sli_brdkill(phba
);
919 lpfc_unblock_mgmt_io(phba
);
920 complete((struct completion
*)(evtp
->evt_arg2
));
922 case LPFC_EVT_FASTPATH_MGMT_EVT
:
923 lpfc_send_fastpath_evt(phba
, evtp
);
926 case LPFC_EVT_RESET_HBA
:
927 if (!test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
928 lpfc_reset_hba(phba
);
933 spin_lock_irq(&phba
->hbalock
);
935 spin_unlock_irq(&phba
->hbalock
);
940 lpfc_work_done(struct lpfc_hba
*phba
)
942 struct lpfc_sli_ring
*pring
;
943 uint32_t ha_copy
, status
, control
, work_port_events
;
944 struct lpfc_vport
**vports
;
945 struct lpfc_vport
*vport
;
949 hba_pci_err
= test_bit(HBA_PCI_ERR
, &phba
->bit_flags
);
950 spin_lock_irq(&phba
->hbalock
);
951 ha_copy
= phba
->work_ha
;
953 spin_unlock_irq(&phba
->hbalock
);
957 /* First, try to post the next mailbox command to SLI4 device */
958 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
&& !hba_pci_err
)
959 lpfc_sli4_post_async_mbox(phba
);
961 if (ha_copy
& HA_ERATT
) {
962 /* Handle the error attention event */
963 lpfc_handle_eratt(phba
);
965 if (phba
->fw_dump_cmpl
) {
966 complete(phba
->fw_dump_cmpl
);
967 phba
->fw_dump_cmpl
= NULL
;
971 if (ha_copy
& HA_MBATT
)
972 lpfc_sli_handle_mb_event(phba
);
974 if (ha_copy
& HA_LATT
)
975 lpfc_handle_latt(phba
);
977 /* Handle VMID Events */
978 if (lpfc_is_vmid_enabled(phba
) && !hba_pci_err
) {
979 if (phba
->pport
->work_port_events
&
980 WORKER_CHECK_VMID_ISSUE_QFPA
) {
981 lpfc_check_vmid_qfpa_issue(phba
);
982 phba
->pport
->work_port_events
&=
983 ~WORKER_CHECK_VMID_ISSUE_QFPA
;
985 if (phba
->pport
->work_port_events
&
986 WORKER_CHECK_INACTIVE_VMID
) {
987 lpfc_check_inactive_vmid(phba
);
988 phba
->pport
->work_port_events
&=
989 ~WORKER_CHECK_INACTIVE_VMID
;
993 /* Process SLI4 events */
994 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
995 if (test_bit(HBA_RRQ_ACTIVE
, &phba
->hba_flag
))
996 lpfc_handle_rrq_active(phba
);
997 if (test_bit(ELS_XRI_ABORT_EVENT
, &phba
->hba_flag
))
998 lpfc_sli4_els_xri_abort_event_proc(phba
);
999 if (test_bit(ASYNC_EVENT
, &phba
->hba_flag
))
1000 lpfc_sli4_async_event_proc(phba
);
1001 if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER
,
1003 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
1004 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
1005 lpfc_sli4_fcf_redisc_event_proc(phba
);
1008 vports
= lpfc_create_vport_work_array(phba
);
1010 for (i
= 0; i
<= phba
->max_vports
; i
++) {
1012 * We could have no vports in array if unloading, so if
1013 * this happens then just use the pport
1015 if (vports
[i
] == NULL
&& i
== 0)
1016 vport
= phba
->pport
;
1021 spin_lock_irq(&vport
->work_port_lock
);
1022 work_port_events
= vport
->work_port_events
;
1023 vport
->work_port_events
&= ~work_port_events
;
1024 spin_unlock_irq(&vport
->work_port_lock
);
1027 if (work_port_events
& WORKER_DISC_TMO
)
1028 lpfc_disc_timeout_handler(vport
);
1029 if (work_port_events
& WORKER_ELS_TMO
)
1030 lpfc_els_timeout_handler(vport
);
1031 if (work_port_events
& WORKER_HB_TMO
)
1032 lpfc_hb_timeout_handler(phba
);
1033 if (work_port_events
& WORKER_MBOX_TMO
)
1034 lpfc_mbox_timeout_handler(phba
);
1035 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
1036 lpfc_unblock_fabric_iocbs(phba
);
1037 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
1038 lpfc_ramp_down_queue_handler(phba
);
1039 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
1040 lpfc_delayed_disc_timeout_handler(vport
);
1042 lpfc_destroy_vport_work_array(phba
, vports
);
1044 pring
= lpfc_phba_elsring(phba
);
1045 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
1046 status
>>= (4*LPFC_ELS_RING
);
1047 if (pring
&& (status
& HA_RXMASK
||
1048 pring
->flag
& LPFC_DEFERRED_RING_EVENT
||
1049 test_bit(HBA_SP_QUEUE_EVT
, &phba
->hba_flag
))) {
1050 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
1051 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
1052 /* Preserve legacy behavior. */
1053 if (!test_bit(HBA_SP_QUEUE_EVT
, &phba
->hba_flag
))
1054 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
1056 /* Driver could have abort request completed in queue
1057 * when link goes down. Allow for this transition.
1059 if (phba
->link_state
>= LPFC_LINK_DOWN
||
1060 phba
->link_flag
& LS_MDS_LOOPBACK
) {
1061 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
1062 lpfc_sli_handle_slow_ring_event(phba
, pring
,
1067 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1068 lpfc_drain_txq(phba
);
1070 * Turn on Ring interrupts
1072 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
1073 spin_lock_irq(&phba
->hbalock
);
1074 control
= readl(phba
->HCregaddr
);
1075 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
1076 lpfc_debugfs_slow_ring_trc(phba
,
1077 "WRK Enable ring: cntl:x%x hacopy:x%x",
1078 control
, ha_copy
, 0);
1080 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
1081 writel(control
, phba
->HCregaddr
);
1082 readl(phba
->HCregaddr
); /* flush */
1084 lpfc_debugfs_slow_ring_trc(phba
,
1085 "WRK Ring ok: cntl:x%x hacopy:x%x",
1086 control
, ha_copy
, 0);
1088 spin_unlock_irq(&phba
->hbalock
);
1091 lpfc_work_list_done(phba
);
1095 lpfc_do_work(void *p
)
1097 struct lpfc_hba
*phba
= p
;
1100 set_user_nice(current
, MIN_NICE
);
1101 current
->flags
|= PF_NOFREEZE
;
1102 phba
->data_flags
= 0;
1104 while (!kthread_should_stop()) {
1105 /* wait and check worker queue activities */
1106 rc
= wait_event_interruptible(phba
->work_waitq
,
1107 (test_and_clear_bit(LPFC_DATA_READY
,
1109 || kthread_should_stop()));
1110 /* Signal wakeup shall terminate the worker thread */
1112 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1113 "0433 Wakeup on signal: rc=x%x\n", rc
);
1117 /* Attend pending lpfc data processing */
1118 lpfc_work_done(phba
);
1120 phba
->worker_thread
= NULL
;
1121 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
1122 "0432 Worker thread stopped.\n");
1127 * This is only called to handle FC worker events. Since this a rare
1128 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
1129 * embedding it in the IOCB.
1132 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
1135 struct lpfc_work_evt
*evtp
;
1136 unsigned long flags
;
1139 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
1140 * be queued to worker thread for processing
1142 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
1146 evtp
->evt_arg1
= arg1
;
1147 evtp
->evt_arg2
= arg2
;
1150 spin_lock_irqsave(&phba
->hbalock
, flags
);
1151 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
1152 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1154 lpfc_worker_wake_up(phba
);
1160 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
1162 struct lpfc_hba
*phba
= vport
->phba
;
1163 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1165 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
1166 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
1167 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
1168 ((ndlp
->nlp_DID
== NameServer_DID
) ||
1169 (ndlp
->nlp_DID
== FDMI_DID
) ||
1170 (ndlp
->nlp_DID
== Fabric_Cntl_DID
))))
1171 lpfc_unreg_rpi(vport
, ndlp
);
1173 /* Leave Fabric nodes alone on link down */
1174 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
1175 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
1178 /* Notify transport of connectivity loss to trigger cleanup. */
1179 if (phba
->nvmet_support
&&
1180 ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
)
1181 lpfc_nvmet_invalidate_host(phba
, ndlp
);
1183 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
1186 : NLP_EVT_DEVICE_RECOVERY
);
1188 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
1189 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1190 lpfc_sli4_unreg_all_rpis(vport
);
1191 lpfc_mbx_unreg_vpi(vport
);
1192 set_bit(FC_VPORT_NEEDS_REG_VPI
, &vport
->fc_flag
);
1197 lpfc_port_link_failure(struct lpfc_vport
*vport
)
1199 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
1201 /* Cleanup any outstanding received buffers */
1202 lpfc_cleanup_rcv_buffers(vport
);
1204 /* Cleanup any outstanding RSCN activity */
1205 lpfc_els_flush_rscn(vport
);
1207 /* Cleanup any outstanding ELS commands */
1208 lpfc_els_flush_cmd(vport
);
1210 lpfc_cleanup_rpis(vport
, 0);
1212 /* Turn off discovery timer if its running */
1213 lpfc_can_disctmo(vport
);
1217 lpfc_linkdown_port(struct lpfc_vport
*vport
)
1219 struct lpfc_hba
*phba
= vport
->phba
;
1220 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1222 if (vport
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
1223 fc_host_post_event(shost
, fc_get_event_number(),
1224 FCH_EVT_LINKDOWN
, 0);
1226 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1227 "Link Down: state:x%x rtry:x%x flg:x%x",
1228 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
1230 lpfc_port_link_failure(vport
);
1232 /* Stop delayed Nport discovery */
1233 clear_bit(FC_DISC_DELAYED
, &vport
->fc_flag
);
1234 del_timer_sync(&vport
->delayed_disc_tmo
);
1236 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
1237 vport
->port_type
== LPFC_PHYSICAL_PORT
&&
1238 phba
->sli4_hba
.fawwpn_flag
& LPFC_FAWWPN_CONFIG
) {
1239 /* Assume success on link up */
1240 phba
->sli4_hba
.fawwpn_flag
|= LPFC_FAWWPN_FABRIC
;
1245 lpfc_linkdown(struct lpfc_hba
*phba
)
1247 struct lpfc_vport
*vport
= phba
->pport
;
1248 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1249 struct lpfc_vport
**vports
;
1254 if (phba
->link_state
== LPFC_LINK_DOWN
)
1257 /* Block all SCSI stack I/Os */
1258 lpfc_scsi_dev_block(phba
);
1259 offline
= pci_channel_offline(phba
->pcidev
);
1261 /* Decrement the held ndlp if there is a deferred flogi acc */
1262 if (phba
->defer_flogi_acc
.flag
) {
1263 if (phba
->defer_flogi_acc
.ndlp
) {
1264 lpfc_nlp_put(phba
->defer_flogi_acc
.ndlp
);
1265 phba
->defer_flogi_acc
.ndlp
= NULL
;
1268 phba
->defer_flogi_acc
.flag
= false;
1270 /* Clear external loopback plug detected flag */
1271 phba
->link_flag
&= ~LS_EXTERNAL_LOOPBACK
;
1273 spin_lock_irq(&phba
->hbalock
);
1274 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1275 spin_unlock_irq(&phba
->hbalock
);
1276 if (phba
->link_state
> LPFC_LINK_DOWN
) {
1277 phba
->link_state
= LPFC_LINK_DOWN
;
1278 if (phba
->sli4_hba
.conf_trunk
) {
1279 phba
->trunk_link
.link0
.state
= 0;
1280 phba
->trunk_link
.link1
.state
= 0;
1281 phba
->trunk_link
.link2
.state
= 0;
1282 phba
->trunk_link
.link3
.state
= 0;
1283 phba
->trunk_link
.phy_lnk_speed
=
1284 LPFC_LINK_SPEED_UNKNOWN
;
1285 phba
->sli4_hba
.link_state
.logical_speed
=
1286 LPFC_LINK_SPEED_UNKNOWN
;
1288 clear_bit(FC_LBIT
, &phba
->pport
->fc_flag
);
1290 vports
= lpfc_create_vport_work_array(phba
);
1291 if (vports
!= NULL
) {
1292 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
1293 /* Issue a LINK DOWN event to all nodes */
1294 lpfc_linkdown_port(vports
[i
]);
1296 vports
[i
]->fc_myDID
= 0;
1298 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
1299 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
1300 if (phba
->nvmet_support
)
1301 lpfc_nvmet_update_targetport(phba
);
1303 lpfc_nvme_update_localport(vports
[i
]);
1307 lpfc_destroy_vport_work_array(phba
, vports
);
1309 /* Clean up any SLI3 firmware default rpi's */
1310 if (phba
->sli_rev
> LPFC_SLI_REV3
|| offline
)
1311 goto skip_unreg_did
;
1313 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1315 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
1317 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1318 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
1319 == MBX_NOT_FINISHED
) {
1320 mempool_free(mb
, phba
->mbox_mem_pool
);
1325 /* Setup myDID for link up if we are in pt2pt mode */
1326 if (test_bit(FC_PT2PT
, &phba
->pport
->fc_flag
)) {
1327 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1329 lpfc_config_link(phba
, mb
);
1330 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1332 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
1333 == MBX_NOT_FINISHED
) {
1334 mempool_free(mb
, phba
->mbox_mem_pool
);
1337 clear_bit(FC_PT2PT
, &phba
->pport
->fc_flag
);
1338 clear_bit(FC_PT2PT_PLOGI
, &phba
->pport
->fc_flag
);
1339 spin_lock_irq(shost
->host_lock
);
1340 phba
->pport
->rcv_flogi_cnt
= 0;
1341 spin_unlock_irq(shost
->host_lock
);
1347 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
1349 struct lpfc_nodelist
*ndlp
;
1351 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
1352 ndlp
->nlp_fc4_type
&= ~(NLP_FC4_FCP
| NLP_FC4_NVME
);
1354 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
1356 if (ndlp
->nlp_type
& NLP_FABRIC
) {
1357 /* On Linkup its safe to clean up the ndlp
1358 * from Fabric connections.
1360 if (ndlp
->nlp_DID
!= Fabric_DID
)
1361 lpfc_unreg_rpi(vport
, ndlp
);
1362 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1363 } else if (!test_bit(NLP_NPR_ADISC
, &ndlp
->nlp_flag
)) {
1364 /* Fail outstanding IO now since device is
1367 lpfc_unreg_rpi(vport
, ndlp
);
1373 lpfc_linkup_port(struct lpfc_vport
*vport
)
1375 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1376 struct lpfc_hba
*phba
= vport
->phba
;
1378 if (test_bit(FC_UNLOADING
, &vport
->load_flag
))
1381 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1382 "Link Up: top:x%x speed:x%x flg:x%x",
1383 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
1385 /* If NPIV is not enabled, only bring the physical port up */
1386 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
1387 (vport
!= phba
->pport
))
1390 if (phba
->defer_flogi_acc
.flag
) {
1391 clear_bit(FC_ABORT_DISCOVERY
, &vport
->fc_flag
);
1392 clear_bit(FC_RSCN_MODE
, &vport
->fc_flag
);
1393 clear_bit(FC_NLP_MORE
, &vport
->fc_flag
);
1394 clear_bit(FC_RSCN_DISCOVERY
, &vport
->fc_flag
);
1396 clear_bit(FC_PT2PT
, &vport
->fc_flag
);
1397 clear_bit(FC_PT2PT_PLOGI
, &vport
->fc_flag
);
1398 clear_bit(FC_ABORT_DISCOVERY
, &vport
->fc_flag
);
1399 clear_bit(FC_RSCN_MODE
, &vport
->fc_flag
);
1400 clear_bit(FC_NLP_MORE
, &vport
->fc_flag
);
1401 clear_bit(FC_RSCN_DISCOVERY
, &vport
->fc_flag
);
1403 set_bit(FC_NDISC_ACTIVE
, &vport
->fc_flag
);
1405 spin_lock_irq(shost
->host_lock
);
1406 vport
->fc_ns_retry
= 0;
1407 spin_unlock_irq(shost
->host_lock
);
1408 lpfc_setup_fdmi_mask(vport
);
1410 lpfc_linkup_cleanup_nodes(vport
);
1414 lpfc_linkup(struct lpfc_hba
*phba
)
1416 struct lpfc_vport
**vports
;
1418 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
1420 phba
->link_state
= LPFC_LINK_UP
;
1422 /* Unblock fabric iocbs if they are blocked */
1423 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
1424 del_timer_sync(&phba
->fabric_block_timer
);
1426 vports
= lpfc_create_vport_work_array(phba
);
1428 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1429 lpfc_linkup_port(vports
[i
]);
1430 lpfc_destroy_vport_work_array(phba
, vports
);
1432 /* Clear the pport flogi counter in case the link down was
1433 * absorbed without an ACQE. No lock here - in worker thread
1434 * and discovery is synchronized.
1436 spin_lock_irq(shost
->host_lock
);
1437 phba
->pport
->rcv_flogi_cnt
= 0;
1438 spin_unlock_irq(shost
->host_lock
);
1440 /* reinitialize initial HBA flag */
1441 clear_bit(HBA_FLOGI_ISSUED
, &phba
->hba_flag
);
1442 clear_bit(HBA_RHBA_CMPL
, &phba
->hba_flag
);
1448 * This routine handles processing a CLEAR_LA mailbox
1449 * command upon completion. It is setup in the LPFC_MBOXQ
1450 * as the completion routine when the command is
1451 * handed off to the SLI layer. SLI3 only.
1454 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1456 struct lpfc_vport
*vport
= pmb
->vport
;
1457 struct lpfc_sli
*psli
= &phba
->sli
;
1458 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1461 /* Since we don't do discovery right now, turn these off here */
1462 psli
->sli3_ring
[LPFC_EXTRA_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1463 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1465 /* Check for error */
1466 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1467 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1468 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1469 "0320 CLEAR_LA mbxStatus error x%x hba "
1471 mb
->mbxStatus
, vport
->port_state
);
1472 phba
->link_state
= LPFC_HBA_ERROR
;
1476 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1477 phba
->link_state
= LPFC_HBA_READY
;
1479 spin_lock_irq(&phba
->hbalock
);
1480 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1481 control
= readl(phba
->HCregaddr
);
1482 control
|= HC_LAINT_ENA
;
1483 writel(control
, phba
->HCregaddr
);
1484 readl(phba
->HCregaddr
); /* flush */
1485 spin_unlock_irq(&phba
->hbalock
);
1486 mempool_free(pmb
, phba
->mbox_mem_pool
);
1490 /* Device Discovery completes */
1491 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1492 "0225 Device Discovery completes\n");
1493 mempool_free(pmb
, phba
->mbox_mem_pool
);
1495 clear_bit(FC_ABORT_DISCOVERY
, &vport
->fc_flag
);
1497 lpfc_can_disctmo(vport
);
1499 /* turn on Link Attention interrupts */
1501 spin_lock_irq(&phba
->hbalock
);
1502 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1503 control
= readl(phba
->HCregaddr
);
1504 control
|= HC_LAINT_ENA
;
1505 writel(control
, phba
->HCregaddr
);
1506 readl(phba
->HCregaddr
); /* flush */
1507 spin_unlock_irq(&phba
->hbalock
);
1513 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1515 struct lpfc_vport
*vport
= pmb
->vport
;
1516 LPFC_MBOXQ_t
*sparam_mb
;
1517 u16 status
= pmb
->u
.mb
.mbxStatus
;
1520 mempool_free(pmb
, phba
->mbox_mem_pool
);
1525 /* don't perform discovery for SLI4 loopback diagnostic test */
1526 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1527 !test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
) &&
1528 (phba
->link_flag
& LS_LOOPBACK_MODE
))
1531 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1532 test_bit(FC_PUBLIC_LOOP
, &vport
->fc_flag
) &&
1533 !test_bit(FC_LBIT
, &vport
->fc_flag
)) {
1534 /* Need to wait for FAN - use discovery timer
1535 * for timeout. port_state is identically
1536 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1538 lpfc_set_disctmo(vport
);
1542 /* Start discovery by sending a FLOGI. port_state is identically
1543 * LPFC_FLOGI while waiting for FLOGI cmpl.
1545 if (vport
->port_state
!= LPFC_FLOGI
) {
1546 /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
1547 * bb-credit recovery is in place.
1549 if (phba
->bbcredit_support
&& phba
->cfg_enable_bbcr
&&
1550 !(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
1551 sparam_mb
= mempool_alloc(phba
->mbox_mem_pool
,
1556 rc
= lpfc_read_sparam(phba
, sparam_mb
, 0);
1558 mempool_free(sparam_mb
, phba
->mbox_mem_pool
);
1561 sparam_mb
->vport
= vport
;
1562 sparam_mb
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
1563 rc
= lpfc_sli_issue_mbox(phba
, sparam_mb
, MBX_NOWAIT
);
1564 if (rc
== MBX_NOT_FINISHED
) {
1565 lpfc_mbox_rsrc_cleanup(phba
, sparam_mb
,
1570 set_bit(HBA_DEFER_FLOGI
, &phba
->hba_flag
);
1572 lpfc_initial_flogi(vport
);
1575 if (test_bit(FC_PT2PT
, &vport
->fc_flag
))
1576 lpfc_disc_start(vport
);
1581 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1582 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
1583 status
, vport
->port_state
);
1586 lpfc_linkdown(phba
);
1588 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1589 "0200 CONFIG_LINK bad hba state x%x\n",
1592 lpfc_issue_clear_la(phba
, vport
);
1597 * lpfc_sli4_clear_fcf_rr_bmask
1598 * @phba: pointer to the struct lpfc_hba for this port.
1599 * This fucnction resets the round robin bit mask and clears the
1600 * fcf priority list. The list deletions are done while holding the
1601 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1602 * from the lpfc_fcf_pri record.
1605 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1607 struct lpfc_fcf_pri
*fcf_pri
;
1608 struct lpfc_fcf_pri
*next_fcf_pri
;
1609 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1610 spin_lock_irq(&phba
->hbalock
);
1611 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1612 &phba
->fcf
.fcf_pri_list
, list
) {
1613 list_del_init(&fcf_pri
->list
);
1614 fcf_pri
->fcf_rec
.flag
= 0;
1616 spin_unlock_irq(&phba
->hbalock
);
1619 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1621 struct lpfc_vport
*vport
= mboxq
->vport
;
1623 if (mboxq
->u
.mb
.mbxStatus
) {
1624 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1625 "2017 REG_FCFI mbxStatus error x%x "
1626 "HBA state x%x\n", mboxq
->u
.mb
.mbxStatus
,
1631 /* Start FCoE discovery by sending a FLOGI. */
1632 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1633 /* Set the FCFI registered flag */
1634 spin_lock_irq(&phba
->hbalock
);
1635 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1636 spin_unlock_irq(&phba
->hbalock
);
1638 /* If there is a pending FCoE event, restart FCF table scan. */
1639 if (!test_bit(FCF_RR_INPROG
, &phba
->hba_flag
) &&
1640 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1643 /* Mark successful completion of FCF table scan */
1644 spin_lock_irq(&phba
->hbalock
);
1645 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1646 spin_unlock_irq(&phba
->hbalock
);
1647 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
1648 if (vport
->port_state
!= LPFC_FLOGI
) {
1649 set_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
1650 lpfc_issue_init_vfi(vport
);
1655 clear_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
1657 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1661 * lpfc_fab_name_match - Check if the fcf fabric name match.
1662 * @fab_name: pointer to fabric name.
1663 * @new_fcf_record: pointer to fcf record.
1665 * This routine compare the fcf record's fabric name with provided
1666 * fabric name. If the fabric name are identical this function
1667 * returns 1 else return 0.
1670 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1672 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1674 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1676 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1678 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1680 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1682 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1684 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1686 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1692 * lpfc_sw_name_match - Check if the fcf switch name match.
1693 * @sw_name: pointer to switch name.
1694 * @new_fcf_record: pointer to fcf record.
1696 * This routine compare the fcf record's switch name with provided
1697 * switch name. If the switch name are identical this function
1698 * returns 1 else return 0.
1701 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1703 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1705 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1707 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1709 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1711 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1713 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1715 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1717 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1723 * lpfc_mac_addr_match - Check if the fcf mac address match.
1724 * @mac_addr: pointer to mac address.
1725 * @new_fcf_record: pointer to fcf record.
1727 * This routine compare the fcf record's mac address with HBA's
1728 * FCF mac address. If the mac addresses are identical this function
1729 * returns 1 else return 0.
1732 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1734 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1736 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1738 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1740 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1742 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1744 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1750 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1752 return (curr_vlan_id
== new_vlan_id
);
1756 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1757 * @phba: pointer to lpfc hba data structure.
1758 * @fcf_index: Index for the lpfc_fcf_record.
1759 * @new_fcf_record: pointer to hba fcf record.
1761 * This routine updates the driver FCF priority record from the new HBA FCF
1762 * record. The hbalock is asserted held in the code path calling this
1766 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1767 struct fcf_record
*new_fcf_record
1770 struct lpfc_fcf_pri
*fcf_pri
;
1772 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1773 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1774 /* FCF record priority */
1775 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1780 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1781 * @fcf_rec: pointer to driver fcf record.
1782 * @new_fcf_record: pointer to fcf record.
1784 * This routine copies the FCF information from the FCF
1785 * record to lpfc_hba data structure.
1788 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1789 struct fcf_record
*new_fcf_record
)
1792 fcf_rec
->fabric_name
[0] =
1793 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1794 fcf_rec
->fabric_name
[1] =
1795 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1796 fcf_rec
->fabric_name
[2] =
1797 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1798 fcf_rec
->fabric_name
[3] =
1799 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1800 fcf_rec
->fabric_name
[4] =
1801 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1802 fcf_rec
->fabric_name
[5] =
1803 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1804 fcf_rec
->fabric_name
[6] =
1805 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1806 fcf_rec
->fabric_name
[7] =
1807 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1809 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1810 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1811 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1812 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1813 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1814 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1815 /* FCF record index */
1816 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1817 /* FCF record priority */
1818 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1820 fcf_rec
->switch_name
[0] =
1821 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1822 fcf_rec
->switch_name
[1] =
1823 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1824 fcf_rec
->switch_name
[2] =
1825 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1826 fcf_rec
->switch_name
[3] =
1827 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1828 fcf_rec
->switch_name
[4] =
1829 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1830 fcf_rec
->switch_name
[5] =
1831 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1832 fcf_rec
->switch_name
[6] =
1833 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1834 fcf_rec
->switch_name
[7] =
1835 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1839 * __lpfc_update_fcf_record - Update driver fcf record
1840 * @phba: pointer to lpfc hba data structure.
1841 * @fcf_rec: pointer to driver fcf record.
1842 * @new_fcf_record: pointer to hba fcf record.
1843 * @addr_mode: address mode to be set to the driver fcf record.
1844 * @vlan_id: vlan tag to be set to the driver fcf record.
1845 * @flag: flag bits to be set to the driver fcf record.
1847 * This routine updates the driver FCF record from the new HBA FCF record
1848 * together with the address mode, vlan_id, and other informations. This
1849 * routine is called with the hbalock held.
1852 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1853 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1854 uint16_t vlan_id
, uint32_t flag
)
1856 lockdep_assert_held(&phba
->hbalock
);
1858 /* Copy the fields from the HBA's FCF record */
1859 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1860 /* Update other fields of driver FCF record */
1861 fcf_rec
->addr_mode
= addr_mode
;
1862 fcf_rec
->vlan_id
= vlan_id
;
1863 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1864 __lpfc_update_fcf_record_pri(phba
,
1865 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1870 * lpfc_register_fcf - Register the FCF with hba.
1871 * @phba: pointer to lpfc hba data structure.
1873 * This routine issues a register fcfi mailbox command to register
1877 lpfc_register_fcf(struct lpfc_hba
*phba
)
1879 LPFC_MBOXQ_t
*fcf_mbxq
;
1882 spin_lock_irq(&phba
->hbalock
);
1883 /* If the FCF is not available do nothing. */
1884 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1885 spin_unlock_irq(&phba
->hbalock
);
1886 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
1887 clear_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
1891 /* The FCF is already registered, start discovery */
1892 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1893 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1894 spin_unlock_irq(&phba
->hbalock
);
1895 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
1896 if (phba
->pport
->port_state
!= LPFC_FLOGI
&&
1897 test_bit(FC_FABRIC
, &phba
->pport
->fc_flag
)) {
1898 set_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
1899 lpfc_initial_flogi(phba
->pport
);
1904 spin_unlock_irq(&phba
->hbalock
);
1906 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1908 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
1909 clear_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
1913 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1914 fcf_mbxq
->vport
= phba
->pport
;
1915 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1916 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1917 if (rc
== MBX_NOT_FINISHED
) {
1918 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
1919 clear_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
1920 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1927 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1928 * @phba: pointer to lpfc hba data structure.
1929 * @new_fcf_record: pointer to fcf record.
1930 * @boot_flag: Indicates if this record used by boot bios.
1931 * @addr_mode: The address mode to be used by this FCF
1932 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1934 * This routine compare the fcf record with connect list obtained from the
1935 * config region to decide if this FCF can be used for SAN discovery. It returns
1936 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1937 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1938 * is used by boot bios and addr_mode will indicate the addressing mode to be
1939 * used for this FCF when the function returns.
1940 * If the FCF record need to be used with a particular vlan id, the vlan is
1941 * set in the vlan_id on return of the function. If not VLAN tagging need to
1942 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1945 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1946 struct fcf_record
*new_fcf_record
,
1947 uint32_t *boot_flag
, uint32_t *addr_mode
,
1950 struct lpfc_fcf_conn_entry
*conn_entry
;
1951 int i
, j
, fcf_vlan_id
= 0;
1953 /* Find the lowest VLAN id in the FCF record */
1954 for (i
= 0; i
< 512; i
++) {
1955 if (new_fcf_record
->vlan_bitmap
[i
]) {
1956 fcf_vlan_id
= i
* 8;
1958 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1966 /* FCF not valid/available or solicitation in progress */
1967 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1968 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
) ||
1969 bf_get(lpfc_fcf_record_fcf_sol
, new_fcf_record
))
1972 if (!test_bit(HBA_FIP_SUPPORT
, &phba
->hba_flag
)) {
1974 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1976 if (phba
->valid_vlan
)
1977 *vlan_id
= phba
->vlan_id
;
1979 *vlan_id
= LPFC_FCOE_NULL_VID
;
1984 * If there are no FCF connection table entry, driver connect to all
1987 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1989 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1993 * When there are no FCF connect entries, use driver's default
1994 * addressing mode - FPMA.
1996 if (*addr_mode
& LPFC_FCF_FPMA
)
1997 *addr_mode
= LPFC_FCF_FPMA
;
1999 /* If FCF record report a vlan id use that vlan id */
2001 *vlan_id
= fcf_vlan_id
;
2003 *vlan_id
= LPFC_FCOE_NULL_VID
;
2007 list_for_each_entry(conn_entry
,
2008 &phba
->fcf_conn_rec_list
, list
) {
2009 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
2012 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
2013 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
2016 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
2017 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
2020 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
2022 * If the vlan bit map does not have the bit set for the
2023 * vlan id to be used, then it is not a match.
2025 if (!(new_fcf_record
->vlan_bitmap
2026 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
2027 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
2032 * If connection record does not support any addressing mode,
2033 * skip the FCF record.
2035 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
2036 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
2040 * Check if the connection record specifies a required
2043 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
2044 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
2047 * If SPMA required but FCF not support this continue.
2049 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
2050 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
2051 new_fcf_record
) & LPFC_FCF_SPMA
))
2055 * If FPMA required but FCF not support this continue.
2057 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
2058 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
2059 new_fcf_record
) & LPFC_FCF_FPMA
))
2064 * This fcf record matches filtering criteria.
2066 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
2072 * If user did not specify any addressing mode, or if the
2073 * preferred addressing mode specified by user is not supported
2074 * by FCF, allow fabric to pick the addressing mode.
2076 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
2079 * If the user specified a required address mode, assign that
2082 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
2083 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
2084 *addr_mode
= (conn_entry
->conn_rec
.flags
&
2086 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
2088 * If the user specified a preferred address mode, use the
2089 * addr mode only if FCF support the addr_mode.
2091 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
2092 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
2093 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
2094 (*addr_mode
& LPFC_FCF_SPMA
))
2095 *addr_mode
= LPFC_FCF_SPMA
;
2096 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
2097 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
2098 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
2099 (*addr_mode
& LPFC_FCF_FPMA
))
2100 *addr_mode
= LPFC_FCF_FPMA
;
2102 /* If matching connect list has a vlan id, use it */
2103 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
2104 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
2106 * If no vlan id is specified in connect list, use the vlan id
2109 else if (fcf_vlan_id
)
2110 *vlan_id
= fcf_vlan_id
;
2112 *vlan_id
= LPFC_FCOE_NULL_VID
;
2121 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
2122 * @phba: pointer to lpfc hba data structure.
2123 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
2125 * This function check if there is any fcoe event pending while driver
2126 * scan FCF entries. If there is any pending event, it will restart the
2127 * FCF saning and return 1 else return 0.
2130 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
2133 * If the Link is up and no FCoE events while in the
2134 * FCF discovery, no need to restart FCF discovery.
2136 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
2137 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
2140 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2141 "2768 Pending link or FCF event during current "
2142 "handling of the previous event: link_state:x%x, "
2143 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
2144 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
2145 phba
->fcoe_eventtag
);
2147 spin_lock_irq(&phba
->hbalock
);
2148 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
2149 spin_unlock_irq(&phba
->hbalock
);
2151 if (phba
->link_state
>= LPFC_LINK_UP
) {
2152 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
2153 "2780 Restart FCF table scan due to "
2154 "pending FCF event:evt_tag_at_scan:x%x, "
2155 "evt_tag_current:x%x\n",
2156 phba
->fcoe_eventtag_at_fcf_scan
,
2157 phba
->fcoe_eventtag
);
2158 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
2161 * Do not continue FCF discovery and clear FCF_TS_INPROG
2164 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
2165 "2833 Stop FCF discovery process due to link "
2166 "state change (x%x)\n", phba
->link_state
);
2167 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
2168 clear_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
2169 spin_lock_irq(&phba
->hbalock
);
2170 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
2171 spin_unlock_irq(&phba
->hbalock
);
2174 /* Unregister the currently registered FCF if required */
2176 spin_lock_irq(&phba
->hbalock
);
2177 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
2178 spin_unlock_irq(&phba
->hbalock
);
2179 lpfc_sli4_unregister_fcf(phba
);
2185 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
2186 * @phba: pointer to lpfc hba data structure.
2187 * @fcf_cnt: number of eligible fcf record seen so far.
2189 * This function makes an running random selection decision on FCF record to
2190 * use through a sequence of @fcf_cnt eligible FCF records with equal
2191 * probability. To perform integer manunipulation of random numbers with
2192 * size unit32_t, a 16-bit random number returned from get_random_u16() is
2193 * taken as the random random number generated.
2195 * Returns true when outcome is for the newly read FCF record should be
2196 * chosen; otherwise, return false when outcome is for keeping the previously
2197 * chosen FCF record.
2200 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
2204 /* Get 16-bit uniform random number */
2205 rand_num
= get_random_u16();
2207 /* Decision with probability 1/fcf_cnt */
2208 if ((fcf_cnt
* rand_num
) < 0xFFFF)
2215 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
2216 * @phba: pointer to lpfc hba data structure.
2217 * @mboxq: pointer to mailbox object.
2218 * @next_fcf_index: pointer to holder of next fcf index.
2220 * This routine parses the non-embedded fcf mailbox command by performing the
2221 * necessarily error checking, non-embedded read FCF record mailbox command
2222 * SGE parsing, and endianness swapping.
2224 * Returns the pointer to the new FCF record in the non-embedded mailbox
2225 * command DMA memory if successfully, other NULL.
2227 static struct fcf_record
*
2228 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
2229 uint16_t *next_fcf_index
)
2232 struct lpfc_mbx_sge sge
;
2233 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
2234 uint32_t shdr_status
, shdr_add_status
, if_type
;
2235 union lpfc_sli4_cfg_shdr
*shdr
;
2236 struct fcf_record
*new_fcf_record
;
2238 /* Get the first SGE entry from the non-embedded DMA memory. This
2239 * routine only uses a single SGE.
2241 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
2242 if (unlikely(!mboxq
->sge_array
)) {
2243 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2244 "2524 Failed to get the non-embedded SGE "
2245 "virtual address\n");
2248 virt_addr
= mboxq
->sge_array
->addr
[0];
2250 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
2251 lpfc_sli_pcimem_bcopy(shdr
, shdr
,
2252 sizeof(union lpfc_sli4_cfg_shdr
));
2253 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
2254 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
2255 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
2256 if (shdr_status
|| shdr_add_status
) {
2257 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
||
2258 if_type
== LPFC_SLI_INTF_IF_TYPE_2
)
2259 lpfc_printf_log(phba
, KERN_ERR
,
2261 "2726 READ_FCF_RECORD Indicates empty "
2264 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2265 "2521 READ_FCF_RECORD mailbox failed "
2266 "with status x%x add_status x%x, "
2267 "mbx\n", shdr_status
, shdr_add_status
);
2271 /* Interpreting the returned information of the FCF record */
2272 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
2273 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
2274 sizeof(struct lpfc_mbx_read_fcf_tbl
));
2275 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
2276 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
2277 sizeof(struct lpfc_mbx_read_fcf_tbl
));
2278 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
2279 offsetof(struct fcf_record
, vlan_bitmap
));
2280 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
2281 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
2283 return new_fcf_record
;
2287 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
2288 * @phba: pointer to lpfc hba data structure.
2289 * @fcf_record: pointer to the fcf record.
2290 * @vlan_id: the lowest vlan identifier associated to this fcf record.
2291 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
2293 * This routine logs the detailed FCF record if the LOG_FIP loggin is
2297 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
2298 struct fcf_record
*fcf_record
,
2300 uint16_t next_fcf_index
)
2302 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2303 "2764 READ_FCF_RECORD:\n"
2304 "\tFCF_Index : x%x\n"
2305 "\tFCF_Avail : x%x\n"
2306 "\tFCF_Valid : x%x\n"
2308 "\tFIP_Priority : x%x\n"
2309 "\tMAC_Provider : x%x\n"
2310 "\tLowest VLANID : x%x\n"
2311 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
2312 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
2313 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
2314 "\tNext_FCF_Index: x%x\n",
2315 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
2316 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
2317 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
2318 bf_get(lpfc_fcf_record_fcf_sol
, fcf_record
),
2319 fcf_record
->fip_priority
,
2320 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
2322 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
2323 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
2324 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
2325 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
2326 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
2327 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
2328 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
2329 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
2330 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
2331 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
2332 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
2333 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
2334 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
2335 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
2336 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
2337 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
2338 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
2339 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
2340 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
2341 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
2342 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
2343 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
2348 * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
2349 * @phba: pointer to lpfc hba data structure.
2350 * @fcf_rec: pointer to an existing FCF record.
2351 * @new_fcf_record: pointer to a new FCF record.
2352 * @new_vlan_id: vlan id from the new FCF record.
2354 * This function performs matching test of a new FCF record against an existing
2355 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
2356 * will not be used as part of the FCF record matching criteria.
2358 * Returns true if all the fields matching, otherwise returns false.
2361 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
2362 struct lpfc_fcf_rec
*fcf_rec
,
2363 struct fcf_record
*new_fcf_record
,
2364 uint16_t new_vlan_id
)
2366 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
2367 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
2369 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
2371 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
2373 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
2375 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
2381 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
2382 * @vport: Pointer to vport object.
2383 * @fcf_index: index to next fcf.
2385 * This function processing the roundrobin fcf failover to next fcf index.
2386 * When this function is invoked, there will be a current fcf registered
2388 * Return: 0 for continue retrying flogi on currently registered fcf;
2389 * 1 for stop flogi on currently registered fcf;
2391 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
2393 struct lpfc_hba
*phba
= vport
->phba
;
2396 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
2397 if (test_bit(HBA_DEVLOSS_TMO
, &phba
->hba_flag
)) {
2398 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2399 "2872 Devloss tmo with no eligible "
2400 "FCF, unregister in-use FCF (x%x) "
2401 "and rescan FCF table\n",
2402 phba
->fcf
.current_rec
.fcf_indx
);
2403 lpfc_unregister_fcf_rescan(phba
);
2404 goto stop_flogi_current_fcf
;
2406 /* Mark the end to FLOGI roundrobin failover */
2407 clear_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
2408 /* Allow action to new fcf asynchronous event */
2409 spin_lock_irq(&phba
->hbalock
);
2410 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
2411 spin_unlock_irq(&phba
->hbalock
);
2412 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2413 "2865 No FCF available, stop roundrobin FCF "
2414 "failover and change port state:x%x/x%x\n",
2415 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
2416 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
2418 if (!phba
->fcf
.fcf_redisc_attempted
) {
2419 lpfc_unregister_fcf(phba
);
2421 rc
= lpfc_sli4_redisc_fcf_table(phba
);
2423 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2424 "3195 Rediscover FCF table\n");
2425 phba
->fcf
.fcf_redisc_attempted
= 1;
2426 lpfc_sli4_clear_fcf_rr_bmask(phba
);
2428 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2429 "3196 Rediscover FCF table "
2430 "failed. Status:x%x\n", rc
);
2433 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2434 "3197 Already rediscover FCF table "
2435 "attempted. No more retry\n");
2437 goto stop_flogi_current_fcf
;
2439 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
2440 "2794 Try FLOGI roundrobin FCF failover to "
2441 "(x%x)\n", fcf_index
);
2442 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
2444 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
2445 "2761 FLOGI roundrobin FCF failover "
2446 "failed (rc:x%x) to read FCF (x%x)\n",
2447 rc
, phba
->fcf
.current_rec
.fcf_indx
);
2449 goto stop_flogi_current_fcf
;
2453 stop_flogi_current_fcf
:
2454 lpfc_can_disctmo(vport
);
2459 * lpfc_sli4_fcf_pri_list_del
2460 * @phba: pointer to lpfc hba data structure.
2461 * @fcf_index: the index of the fcf record to delete
2462 * This routine checks the on list flag of the fcf_index to be deleted.
2463 * If it is one the list then it is removed from the list, and the flag
2464 * is cleared. This routine grab the hbalock before removing the fcf
2465 * record from the list.
2467 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
2470 struct lpfc_fcf_pri
*new_fcf_pri
;
2472 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2473 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2474 "3058 deleting idx x%x pri x%x flg x%x\n",
2475 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
2476 new_fcf_pri
->fcf_rec
.flag
);
2477 spin_lock_irq(&phba
->hbalock
);
2478 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
2479 if (phba
->fcf
.current_rec
.priority
==
2480 new_fcf_pri
->fcf_rec
.priority
)
2481 phba
->fcf
.eligible_fcf_cnt
--;
2482 list_del_init(&new_fcf_pri
->list
);
2483 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
2485 spin_unlock_irq(&phba
->hbalock
);
2489 * lpfc_sli4_set_fcf_flogi_fail
2490 * @phba: pointer to lpfc hba data structure.
2491 * @fcf_index: the index of the fcf record to update
2492 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2493 * flag so the round robin selection for the particular priority level
2494 * will try a different fcf record that does not have this bit set.
2495 * If the fcf record is re-read for any reason this flag is cleared brfore
2496 * adding it to the priority list.
2499 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
2501 struct lpfc_fcf_pri
*new_fcf_pri
;
2502 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2503 spin_lock_irq(&phba
->hbalock
);
2504 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
2505 spin_unlock_irq(&phba
->hbalock
);
2509 * lpfc_sli4_fcf_pri_list_add
2510 * @phba: pointer to lpfc hba data structure.
2511 * @fcf_index: the index of the fcf record to add
2512 * @new_fcf_record: pointer to a new FCF record.
2513 * This routine checks the priority of the fcf_index to be added.
2514 * If it is a lower priority than the current head of the fcf_pri list
2515 * then it is added to the list in the right order.
2516 * If it is the same priority as the current head of the list then it
2517 * is added to the head of the list and its bit in the rr_bmask is set.
2518 * If the fcf_index to be added is of a higher priority than the current
2519 * head of the list then the rr_bmask is cleared, its bit is set in the
2520 * rr_bmask and it is added to the head of the list.
2522 * 0=success 1=failure
2524 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
,
2526 struct fcf_record
*new_fcf_record
)
2528 uint16_t current_fcf_pri
;
2529 uint16_t last_index
;
2530 struct lpfc_fcf_pri
*fcf_pri
;
2531 struct lpfc_fcf_pri
*next_fcf_pri
;
2532 struct lpfc_fcf_pri
*new_fcf_pri
;
2535 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2536 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2537 "3059 adding idx x%x pri x%x flg x%x\n",
2538 fcf_index
, new_fcf_record
->fip_priority
,
2539 new_fcf_pri
->fcf_rec
.flag
);
2540 spin_lock_irq(&phba
->hbalock
);
2541 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2542 list_del_init(&new_fcf_pri
->list
);
2543 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2544 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2545 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2546 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2547 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2548 new_fcf_pri
->fcf_rec
.fcf_index
);
2552 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2553 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2554 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2555 ret
= 0; /* Empty rr list */
2558 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2559 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2560 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2561 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2562 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2563 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2564 /* fcfs_at_this_priority_level = 1; */
2565 phba
->fcf
.eligible_fcf_cnt
= 1;
2567 /* fcfs_at_this_priority_level++; */
2568 phba
->fcf
.eligible_fcf_cnt
++;
2569 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2570 new_fcf_pri
->fcf_rec
.fcf_index
);
2574 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2575 &phba
->fcf
.fcf_pri_list
, list
) {
2576 if (new_fcf_pri
->fcf_rec
.priority
<=
2577 fcf_pri
->fcf_rec
.priority
) {
2578 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2579 list_add(&new_fcf_pri
->list
,
2580 &phba
->fcf
.fcf_pri_list
);
2582 list_add(&new_fcf_pri
->list
,
2583 &((struct lpfc_fcf_pri
*)
2584 fcf_pri
->list
.prev
)->list
);
2587 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2588 || new_fcf_pri
->fcf_rec
.priority
<
2589 next_fcf_pri
->fcf_rec
.priority
) {
2590 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2594 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2600 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2601 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2602 spin_unlock_irq(&phba
->hbalock
);
2607 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2608 * @phba: pointer to lpfc hba data structure.
2609 * @mboxq: pointer to mailbox object.
2611 * This function iterates through all the fcf records available in
2612 * HBA and chooses the optimal FCF record for discovery. After finding
2613 * the FCF for discovery it registers the FCF record and kicks start
2615 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2616 * use an FCF record which matches fabric name and mac address of the
2617 * currently used FCF record.
2618 * If the driver supports only one FCF, it will try to use the FCF record
2619 * used by BOOT_BIOS.
2622 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2624 struct fcf_record
*new_fcf_record
;
2625 uint32_t boot_flag
, addr_mode
;
2626 uint16_t fcf_index
, next_fcf_index
;
2627 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2628 uint16_t vlan_id
= LPFC_FCOE_NULL_VID
;
2629 bool select_new_fcf
;
2632 /* If there is pending FCoE event restart FCF table scan */
2633 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2634 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2638 /* Parse the FCF record from the non-embedded mailbox command */
2639 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2641 if (!new_fcf_record
) {
2642 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2643 "2765 Mailbox command READ_FCF_RECORD "
2644 "failed to retrieve a FCF record.\n");
2645 /* Let next new FCF event trigger fast failover */
2646 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
2647 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2651 /* Check the FCF record against the connection list */
2652 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2653 &addr_mode
, &vlan_id
);
2655 /* Log the FCF record information if turned on */
2656 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2660 * If the fcf record does not match with connect list entries
2661 * read the next entry; otherwise, this is an eligible FCF
2662 * record for roundrobin FCF failover.
2665 lpfc_sli4_fcf_pri_list_del(phba
,
2666 bf_get(lpfc_fcf_record_fcf_index
,
2668 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2669 "2781 FCF (x%x) failed connection "
2670 "list check: (x%x/x%x/%x)\n",
2671 bf_get(lpfc_fcf_record_fcf_index
,
2673 bf_get(lpfc_fcf_record_fcf_avail
,
2675 bf_get(lpfc_fcf_record_fcf_valid
,
2677 bf_get(lpfc_fcf_record_fcf_sol
,
2679 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2680 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2681 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2682 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2683 phba
->fcf
.current_rec
.fcf_indx
) {
2684 lpfc_printf_log(phba
, KERN_ERR
,
2686 "2862 FCF (x%x) matches property "
2687 "of in-use FCF (x%x)\n",
2688 bf_get(lpfc_fcf_record_fcf_index
,
2690 phba
->fcf
.current_rec
.fcf_indx
);
2694 * In case the current in-use FCF record becomes
2695 * invalid/unavailable during FCF discovery that
2696 * was not triggered by fast FCF failover process,
2697 * treat it as fast FCF failover.
2699 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2700 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2701 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2702 "2835 Invalid in-use FCF "
2703 "(x%x), enter FCF failover "
2705 phba
->fcf
.current_rec
.fcf_indx
);
2706 spin_lock_irq(&phba
->hbalock
);
2707 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2708 spin_unlock_irq(&phba
->hbalock
);
2709 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2710 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2711 LPFC_FCOE_FCF_GET_FIRST
);
2717 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2718 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2725 * If this is not the first FCF discovery of the HBA, use last
2726 * FCF record for the discovery. The condition that a rescan
2727 * matches the in-use FCF record: fabric name, switch name, mac
2728 * address, and vlan_id.
2730 spin_lock_irq(&phba
->hbalock
);
2731 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2732 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2733 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2734 new_fcf_record
, vlan_id
)) {
2735 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2736 phba
->fcf
.current_rec
.fcf_indx
) {
2737 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2738 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2739 /* Stop FCF redisc wait timer */
2740 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2742 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2743 /* Fast failover, mark completed */
2744 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2745 spin_unlock_irq(&phba
->hbalock
);
2746 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2747 "2836 New FCF matches in-use "
2748 "FCF (x%x), port_state:x%x, "
2750 phba
->fcf
.current_rec
.fcf_indx
,
2751 phba
->pport
->port_state
,
2752 phba
->pport
->fc_flag
);
2755 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2756 "2863 New FCF (x%x) matches "
2757 "property of in-use FCF (x%x)\n",
2758 bf_get(lpfc_fcf_record_fcf_index
,
2760 phba
->fcf
.current_rec
.fcf_indx
);
2763 * Read next FCF record from HBA searching for the matching
2764 * with in-use record only if not during the fast failover
2765 * period. In case of fast failover period, it shall try to
2766 * determine whether the FCF record just read should be the
2769 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2770 spin_unlock_irq(&phba
->hbalock
);
2775 * Update on failover FCF record only if it's in FCF fast-failover
2776 * period; otherwise, update on current FCF record.
2778 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2779 fcf_rec
= &phba
->fcf
.failover_rec
;
2781 fcf_rec
= &phba
->fcf
.current_rec
;
2783 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2785 * If the driver FCF record does not have boot flag
2786 * set and new hba fcf record has boot flag set, use
2787 * the new hba fcf record.
2789 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2790 /* Choose this FCF record */
2791 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2792 "2837 Update current FCF record "
2793 "(x%x) with new FCF record (x%x)\n",
2795 bf_get(lpfc_fcf_record_fcf_index
,
2797 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2798 addr_mode
, vlan_id
, BOOT_ENABLE
);
2799 spin_unlock_irq(&phba
->hbalock
);
2803 * If the driver FCF record has boot flag set and the
2804 * new hba FCF record does not have boot flag, read
2805 * the next FCF record.
2807 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2808 spin_unlock_irq(&phba
->hbalock
);
2812 * If the new hba FCF record has lower priority value
2813 * than the driver FCF record, use the new record.
2815 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2816 /* Choose the new FCF record with lower priority */
2817 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2818 "2838 Update current FCF record "
2819 "(x%x) with new FCF record (x%x)\n",
2821 bf_get(lpfc_fcf_record_fcf_index
,
2823 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2824 addr_mode
, vlan_id
, 0);
2825 /* Reset running random FCF selection count */
2826 phba
->fcf
.eligible_fcf_cnt
= 1;
2827 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2828 /* Update running random FCF selection count */
2829 phba
->fcf
.eligible_fcf_cnt
++;
2830 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2831 phba
->fcf
.eligible_fcf_cnt
);
2832 if (select_new_fcf
) {
2833 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2834 "2839 Update current FCF record "
2835 "(x%x) with new FCF record (x%x)\n",
2837 bf_get(lpfc_fcf_record_fcf_index
,
2839 /* Choose the new FCF by random selection */
2840 __lpfc_update_fcf_record(phba
, fcf_rec
,
2842 addr_mode
, vlan_id
, 0);
2845 spin_unlock_irq(&phba
->hbalock
);
2849 * This is the first suitable FCF record, choose this record for
2850 * initial best-fit FCF.
2853 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2854 "2840 Update initial FCF candidate "
2856 bf_get(lpfc_fcf_record_fcf_index
,
2858 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2859 addr_mode
, vlan_id
, (boot_flag
?
2861 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2862 /* Setup initial running random FCF selection count */
2863 phba
->fcf
.eligible_fcf_cnt
= 1;
2865 spin_unlock_irq(&phba
->hbalock
);
2869 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2870 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2871 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2873 * Case of FCF fast failover scan
2877 * It has not found any suitable FCF record, cancel
2878 * FCF scan inprogress, and do nothing
2880 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2881 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2882 "2782 No suitable FCF found: "
2884 phba
->fcoe_eventtag_at_fcf_scan
,
2885 bf_get(lpfc_fcf_record_fcf_index
,
2887 if (test_bit(HBA_DEVLOSS_TMO
,
2889 clear_bit(FCF_TS_INPROG
,
2891 /* Unregister in-use FCF and rescan */
2892 lpfc_printf_log(phba
, KERN_INFO
,
2894 "2864 On devloss tmo "
2895 "unreg in-use FCF and "
2896 "rescan FCF table\n");
2897 lpfc_unregister_fcf_rescan(phba
);
2901 * Let next new FCF event trigger fast failover
2903 clear_bit(FCF_TS_INPROG
, &phba
->hba_flag
);
2907 * It has found a suitable FCF record that is not
2908 * the same as in-use FCF record, unregister the
2909 * in-use FCF record, replace the in-use FCF record
2910 * with the new FCF record, mark FCF fast failover
2911 * completed, and then start register the new FCF
2915 /* Unregister the current in-use FCF record */
2916 lpfc_unregister_fcf(phba
);
2918 /* Replace in-use record with the new record */
2919 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2920 "2842 Replace in-use FCF (x%x) "
2921 "with failover FCF (x%x)\n",
2922 phba
->fcf
.current_rec
.fcf_indx
,
2923 phba
->fcf
.failover_rec
.fcf_indx
);
2924 memcpy(&phba
->fcf
.current_rec
,
2925 &phba
->fcf
.failover_rec
,
2926 sizeof(struct lpfc_fcf_rec
));
2928 * Mark the fast FCF failover rediscovery completed
2929 * and the start of the first round of the roundrobin
2932 spin_lock_irq(&phba
->hbalock
);
2933 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2934 spin_unlock_irq(&phba
->hbalock
);
2935 /* Register to the new FCF record */
2936 lpfc_register_fcf(phba
);
2939 * In case of transaction period to fast FCF failover,
2940 * do nothing when search to the end of the FCF table.
2942 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2943 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2946 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2947 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2949 * In case the current in-use FCF record no
2950 * longer existed during FCF discovery that
2951 * was not triggered by fast FCF failover
2952 * process, treat it as fast FCF failover.
2954 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2955 "2841 In-use FCF record (x%x) "
2956 "not reported, entering fast "
2957 "FCF failover mode scanning.\n",
2958 phba
->fcf
.current_rec
.fcf_indx
);
2959 spin_lock_irq(&phba
->hbalock
);
2960 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2961 spin_unlock_irq(&phba
->hbalock
);
2962 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2963 LPFC_FCOE_FCF_GET_FIRST
);
2966 /* Register to the new FCF record */
2967 lpfc_register_fcf(phba
);
2970 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2974 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2975 lpfc_register_fcf(phba
);
2981 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2982 * @phba: pointer to lpfc hba data structure.
2983 * @mboxq: pointer to mailbox object.
2985 * This is the callback function for FLOGI failure roundrobin FCF failover
2986 * read FCF record mailbox command from the eligible FCF record bmask for
2987 * performing the failover. If the FCF read back is not valid/available, it
2988 * fails through to retrying FLOGI to the currently registered FCF again.
2989 * Otherwise, if the FCF read back is valid and available, it will set the
2990 * newly read FCF record to the failover FCF record, unregister currently
2991 * registered FCF record, copy the failover FCF record to the current
2992 * FCF record, and then register the current FCF record before proceeding
2993 * to trying FLOGI on the new failover FCF.
2996 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2998 struct fcf_record
*new_fcf_record
;
2999 uint32_t boot_flag
, addr_mode
;
3000 uint16_t next_fcf_index
, fcf_index
;
3001 uint16_t current_fcf_index
;
3002 uint16_t vlan_id
= LPFC_FCOE_NULL_VID
;
3005 /* If link state is not up, stop the roundrobin failover process */
3006 if (phba
->link_state
< LPFC_LINK_UP
) {
3007 spin_lock_irq(&phba
->hbalock
);
3008 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
3009 spin_unlock_irq(&phba
->hbalock
);
3010 clear_bit(FCF_RR_INPROG
, &phba
->hba_flag
);
3014 /* Parse the FCF record from the non-embedded mailbox command */
3015 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
3017 if (!new_fcf_record
) {
3018 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
3019 "2766 Mailbox command READ_FCF_RECORD "
3020 "failed to retrieve a FCF record. "
3021 "hba_flg x%lx fcf_flg x%x\n", phba
->hba_flag
,
3022 phba
->fcf
.fcf_flag
);
3023 lpfc_unregister_fcf_rescan(phba
);
3027 /* Get the needed parameters from FCF record */
3028 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
3029 &addr_mode
, &vlan_id
);
3031 /* Log the FCF record information if turned on */
3032 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
3035 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
3037 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
3038 "2848 Remove ineligible FCF (x%x) from "
3039 "from roundrobin bmask\n", fcf_index
);
3040 /* Clear roundrobin bmask bit for ineligible FCF */
3041 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
3042 /* Perform next round of roundrobin FCF failover */
3043 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
3044 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
3050 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
3051 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
3052 "2760 Perform FLOGI roundrobin FCF failover: "
3053 "FCF (x%x) back to FCF (x%x)\n",
3054 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
3055 /* Wait 500 ms before retrying FLOGI to current FCF */
3057 lpfc_issue_init_vfi(phba
->pport
);
3061 /* Upload new FCF record to the failover FCF record */
3062 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
3063 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
3064 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
3065 spin_lock_irq(&phba
->hbalock
);
3066 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
3067 new_fcf_record
, addr_mode
, vlan_id
,
3068 (boot_flag
? BOOT_ENABLE
: 0));
3069 spin_unlock_irq(&phba
->hbalock
);
3071 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
3073 /* Unregister the current in-use FCF record */
3074 lpfc_unregister_fcf(phba
);
3076 /* Replace in-use record with the new record */
3077 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
3078 sizeof(struct lpfc_fcf_rec
));
3080 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
3081 "2783 Perform FLOGI roundrobin FCF failover: FCF "
3082 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
3085 lpfc_register_fcf(phba
);
3087 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
3091 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
3092 * @phba: pointer to lpfc hba data structure.
3093 * @mboxq: pointer to mailbox object.
3095 * This is the callback function of read FCF record mailbox command for
3096 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
3097 * failover when a new FCF event happened. If the FCF read back is
3098 * valid/available and it passes the connection list check, it updates
3099 * the bmask for the eligible FCF record for roundrobin failover.
3102 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
3104 struct fcf_record
*new_fcf_record
;
3105 uint32_t boot_flag
, addr_mode
;
3106 uint16_t fcf_index
, next_fcf_index
;
3107 uint16_t vlan_id
= LPFC_FCOE_NULL_VID
;
3110 /* If link state is not up, no need to proceed */
3111 if (phba
->link_state
< LPFC_LINK_UP
)
3114 /* If FCF discovery period is over, no need to proceed */
3115 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
3118 /* Parse the FCF record from the non-embedded mailbox command */
3119 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
3121 if (!new_fcf_record
) {
3122 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
3123 "2767 Mailbox command READ_FCF_RECORD "
3124 "failed to retrieve a FCF record.\n");
3128 /* Check the connection list for eligibility */
3129 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
3130 &addr_mode
, &vlan_id
);
3132 /* Log the FCF record information if turned on */
3133 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
3139 /* Update the eligible FCF record index bmask */
3140 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
3142 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
3145 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
3149 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
3150 * @phba: pointer to lpfc hba data structure.
3151 * @mboxq: pointer to mailbox data structure.
3153 * This function handles completion of init vfi mailbox command.
3156 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
3158 struct lpfc_vport
*vport
= mboxq
->vport
;
3161 * VFI not supported on interface type 0, just do the flogi
3162 * Also continue if the VFI is in use - just use the same one.
3164 if (mboxq
->u
.mb
.mbxStatus
&&
3165 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
3166 LPFC_SLI_INTF_IF_TYPE_0
) &&
3167 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
3168 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3169 "2891 Init VFI mailbox failed 0x%x\n",
3170 mboxq
->u
.mb
.mbxStatus
);
3171 mempool_free(mboxq
, phba
->mbox_mem_pool
);
3172 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3176 lpfc_initial_flogi(vport
);
3177 mempool_free(mboxq
, phba
->mbox_mem_pool
);
3182 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
3183 * @vport: pointer to lpfc_vport data structure.
3185 * This function issue a init_vfi mailbox command to initialize the VFI and
3186 * VPI for the physical port.
3189 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
3191 LPFC_MBOXQ_t
*mboxq
;
3193 struct lpfc_hba
*phba
= vport
->phba
;
3195 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3197 lpfc_printf_vlog(vport
, KERN_ERR
,
3198 LOG_TRACE_EVENT
, "2892 Failed to allocate "
3199 "init_vfi mailbox\n");
3202 lpfc_init_vfi(mboxq
, vport
);
3203 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
3204 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
3205 if (rc
== MBX_NOT_FINISHED
) {
3206 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3207 "2893 Failed to issue init_vfi mailbox\n");
3208 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
3213 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
3214 * @phba: pointer to lpfc hba data structure.
3215 * @mboxq: pointer to mailbox data structure.
3217 * This function handles completion of init vpi mailbox command.
3220 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
3222 struct lpfc_vport
*vport
= mboxq
->vport
;
3223 struct lpfc_nodelist
*ndlp
;
3225 if (mboxq
->u
.mb
.mbxStatus
) {
3226 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3227 "2609 Init VPI mailbox failed 0x%x\n",
3228 mboxq
->u
.mb
.mbxStatus
);
3229 mempool_free(mboxq
, phba
->mbox_mem_pool
);
3230 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3233 clear_bit(FC_VPORT_NEEDS_INIT_VPI
, &vport
->fc_flag
);
3235 /* If this port is physical port or FDISC is done, do reg_vpi */
3236 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
3237 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
3239 lpfc_printf_vlog(vport
, KERN_ERR
,
3241 "2731 Cannot find fabric "
3242 "controller node\n");
3244 lpfc_register_new_vport(phba
, vport
, ndlp
);
3245 mempool_free(mboxq
, phba
->mbox_mem_pool
);
3249 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
3250 lpfc_initial_fdisc(vport
);
3252 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
3253 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3254 "2606 No NPIV Fabric support\n");
3256 mempool_free(mboxq
, phba
->mbox_mem_pool
);
3261 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
3262 * @vport: pointer to lpfc_vport data structure.
3264 * This function issue a init_vpi mailbox command to initialize
3265 * VPI for the vport.
3268 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
3270 LPFC_MBOXQ_t
*mboxq
;
3273 if ((vport
->port_type
!= LPFC_PHYSICAL_PORT
) && (!vport
->vpi
)) {
3274 vpi
= lpfc_alloc_vpi(vport
->phba
);
3276 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3277 "3303 Failed to obtain vport vpi\n");
3278 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3284 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
3286 lpfc_printf_vlog(vport
, KERN_ERR
,
3287 LOG_TRACE_EVENT
, "2607 Failed to allocate "
3288 "init_vpi mailbox\n");
3291 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
3292 mboxq
->vport
= vport
;
3293 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
3294 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
3295 if (rc
== MBX_NOT_FINISHED
) {
3296 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3297 "2608 Failed to issue init_vpi mailbox\n");
3298 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
3303 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
3304 * @phba: pointer to lpfc hba data structure.
3306 * This function loops through the list of vports on the @phba and issues an
3307 * FDISC if possible.
3310 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
3312 struct lpfc_vport
**vports
;
3315 vports
= lpfc_create_vport_work_array(phba
);
3316 if (vports
!= NULL
) {
3317 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3318 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
3320 /* There are no vpi for this vport */
3321 if (vports
[i
]->vpi
> phba
->max_vpi
) {
3322 lpfc_vport_set_state(vports
[i
],
3326 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3327 lpfc_vport_set_state(vports
[i
],
3331 if (test_bit(FC_VPORT_NEEDS_INIT_VPI
,
3332 &vports
[i
]->fc_flag
)) {
3333 lpfc_issue_init_vpi(vports
[i
]);
3336 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
3337 lpfc_initial_fdisc(vports
[i
]);
3339 lpfc_vport_set_state(vports
[i
],
3340 FC_VPORT_NO_FABRIC_SUPP
);
3341 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
3344 "Fabric support\n");
3348 lpfc_destroy_vport_work_array(phba
, vports
);
3352 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
3354 struct lpfc_vport
*vport
= mboxq
->vport
;
3355 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3358 * VFI not supported for interface type 0, so ignore any mailbox
3359 * error (except VFI in use) and continue with the discovery.
3361 if (mboxq
->u
.mb
.mbxStatus
&&
3362 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
3363 LPFC_SLI_INTF_IF_TYPE_0
) &&
3364 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
3365 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3366 "2018 REG_VFI mbxStatus error x%x "
3368 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
3369 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3370 /* FLOGI failed, use loop map to make discovery list */
3371 lpfc_disc_list_loopmap(vport
);
3372 /* Start discovery */
3373 lpfc_disc_start(vport
);
3376 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3380 /* If the VFI is already registered, there is nothing else to do
3381 * Unless this was a VFI update and we are in PT2PT mode, then
3382 * we should drop through to set the port state to ready.
3384 if (test_bit(FC_VFI_REGISTERED
, &vport
->fc_flag
))
3385 if (!(phba
->sli_rev
== LPFC_SLI_REV4
&&
3386 test_bit(FC_PT2PT
, &vport
->fc_flag
)))
3389 /* The VPI is implicitly registered when the VFI is registered */
3390 set_bit(FC_VFI_REGISTERED
, &vport
->fc_flag
);
3391 clear_bit(FC_VPORT_NEEDS_REG_VPI
, &vport
->fc_flag
);
3392 clear_bit(FC_VPORT_NEEDS_INIT_VPI
, &vport
->fc_flag
);
3393 spin_lock_irq(shost
->host_lock
);
3394 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3395 spin_unlock_irq(shost
->host_lock
);
3397 /* In case SLI4 FC loopback test, we are ready */
3398 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
3399 (phba
->link_flag
& LS_LOOPBACK_MODE
)) {
3400 phba
->link_state
= LPFC_HBA_READY
;
3404 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3405 "3313 cmpl reg vfi port_state:%x fc_flag:%lx "
3406 "myDid:%x alpacnt:%d LinkState:%x topology:%x\n",
3407 vport
->port_state
, vport
->fc_flag
, vport
->fc_myDID
,
3408 vport
->phba
->alpa_map
[0],
3409 phba
->link_state
, phba
->fc_topology
);
3411 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3413 * For private loop or for NPort pt2pt,
3414 * just start discovery and we are done.
3416 if (test_bit(FC_PT2PT
, &vport
->fc_flag
) ||
3417 (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
3418 !test_bit(FC_PUBLIC_LOOP
, &vport
->fc_flag
))) {
3420 /* Use loop map to make discovery list */
3421 lpfc_disc_list_loopmap(vport
);
3422 /* Start discovery */
3423 if (test_bit(FC_PT2PT
, &vport
->fc_flag
))
3424 vport
->port_state
= LPFC_VPORT_READY
;
3426 lpfc_disc_start(vport
);
3428 lpfc_start_fdiscs(phba
);
3429 lpfc_do_scr_ns_plogi(phba
, vport
);
3434 lpfc_mbox_rsrc_cleanup(phba
, mboxq
, MBOX_THD_UNLOCKED
);
3438 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3440 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3441 struct lpfc_dmabuf
*mp
= pmb
->ctx_buf
;
3442 struct lpfc_vport
*vport
= pmb
->vport
;
3443 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3444 struct serv_parm
*sp
= &vport
->fc_sparam
;
3447 /* Check for error */
3448 if (mb
->mbxStatus
) {
3449 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3450 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3451 "0319 READ_SPARAM mbxStatus error x%x "
3453 mb
->mbxStatus
, vport
->port_state
);
3454 lpfc_linkdown(phba
);
3458 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
3459 sizeof (struct serv_parm
));
3461 ed_tov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
3462 if (sp
->cmn
.edtovResolution
) /* E_D_TOV ticks are in nanoseconds */
3463 ed_tov
= (ed_tov
+ 999999) / 1000000;
3465 phba
->fc_edtov
= ed_tov
;
3466 phba
->fc_ratov
= (2 * ed_tov
) / 1000;
3467 if (phba
->fc_ratov
< FF_DEF_RATOV
) {
3468 /* RA_TOV should be atleast 10sec for initial flogi */
3469 phba
->fc_ratov
= FF_DEF_RATOV
;
3472 lpfc_update_vport_wwn(vport
);
3473 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
3474 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
3475 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
3476 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
3479 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
3481 /* Check if sending the FLOGI is being deferred to after we get
3482 * up to date CSPs from MBX_READ_SPARAM.
3484 if (test_bit(HBA_DEFER_FLOGI
, &phba
->hba_flag
)) {
3485 lpfc_initial_flogi(vport
);
3486 clear_bit(HBA_DEFER_FLOGI
, &phba
->hba_flag
);
3491 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
3492 lpfc_issue_clear_la(phba
, vport
);
3496 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
3498 struct lpfc_vport
*vport
= phba
->pport
;
3499 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
3502 struct fcf_record
*fcf_record
;
3503 unsigned long iflags
;
3505 spin_lock_irqsave(&phba
->hbalock
, iflags
);
3506 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
3508 if (!test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
)) {
3509 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
3510 case LPFC_LINK_SPEED_1GHZ
:
3511 case LPFC_LINK_SPEED_2GHZ
:
3512 case LPFC_LINK_SPEED_4GHZ
:
3513 case LPFC_LINK_SPEED_8GHZ
:
3514 case LPFC_LINK_SPEED_10GHZ
:
3515 case LPFC_LINK_SPEED_16GHZ
:
3516 case LPFC_LINK_SPEED_32GHZ
:
3517 case LPFC_LINK_SPEED_64GHZ
:
3518 case LPFC_LINK_SPEED_128GHZ
:
3519 case LPFC_LINK_SPEED_256GHZ
:
3522 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
3527 if (phba
->fc_topology
&&
3528 phba
->fc_topology
!= bf_get(lpfc_mbx_read_top_topology
, la
)) {
3529 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3530 "3314 Toplogy changed was 0x%x is 0x%x\n",
3532 bf_get(lpfc_mbx_read_top_topology
, la
));
3533 phba
->fc_topology_changed
= 1;
3536 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
3537 phba
->link_flag
&= ~(LS_NPIV_FAB_SUPPORTED
| LS_CT_VEN_RPA
);
3539 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3540 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
3542 /* if npiv is enabled and this adapter supports npiv log
3543 * a message that npiv is not supported in this topology
3545 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
3546 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3547 "1309 Link Up Event npiv not supported in loop "
3549 /* Get Loop Map information */
3550 if (bf_get(lpfc_mbx_read_top_il
, la
))
3551 set_bit(FC_LBIT
, &vport
->fc_flag
);
3553 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
3554 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
3557 phba
->alpa_map
[0] = 0;
3559 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
3570 numalpa
= phba
->alpa_map
[0];
3572 while (j
< numalpa
) {
3573 memset(un
.pamap
, 0, 16);
3574 for (k
= 1; j
< numalpa
; k
++) {
3576 phba
->alpa_map
[j
+ 1];
3581 /* Link Up Event ALPA map */
3582 lpfc_printf_log(phba
,
3585 "1304 Link Up Event "
3586 "ALPA map Data: x%x "
3588 un
.pa
.wd1
, un
.pa
.wd2
,
3589 un
.pa
.wd3
, un
.pa
.wd4
);
3594 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3595 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3596 (phba
->sli_rev
>= LPFC_SLI_REV3
))
3597 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3599 vport
->fc_myDID
= phba
->fc_pref_DID
;
3600 set_bit(FC_LBIT
, &vport
->fc_flag
);
3602 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
3607 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3611 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3613 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3616 sparam_mbox
->vport
= vport
;
3617 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3618 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3619 if (rc
== MBX_NOT_FINISHED
) {
3620 lpfc_mbox_rsrc_cleanup(phba
, sparam_mbox
, MBOX_THD_UNLOCKED
);
3624 if (!test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
)) {
3625 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3628 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3629 lpfc_config_link(phba
, cfglink_mbox
);
3630 cfglink_mbox
->vport
= vport
;
3631 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3632 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3633 if (rc
== MBX_NOT_FINISHED
) {
3634 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3638 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3640 * Add the driver's default FCF record at FCF index 0 now. This
3641 * is phase 1 implementation that support FCF index 0 and driver
3644 if (!test_bit(HBA_FIP_SUPPORT
, &phba
->hba_flag
)) {
3645 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3647 if (unlikely(!fcf_record
)) {
3648 lpfc_printf_log(phba
, KERN_ERR
,
3650 "2554 Could not allocate memory for "
3656 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3657 LPFC_FCOE_FCF_DEF_INDEX
);
3658 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3660 lpfc_printf_log(phba
, KERN_ERR
,
3662 "2013 Could not manually add FCF "
3663 "record 0, status %d\n", rc
);
3671 * The driver is expected to do FIP/FCF. Call the port
3672 * and get the FCF Table.
3674 if (test_bit(FCF_TS_INPROG
, &phba
->hba_flag
))
3676 /* This is the initial FCF discovery scan */
3677 spin_lock_irqsave(&phba
->hbalock
, iflags
);
3678 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3679 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
3680 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3681 "2778 Start FCF table scan at linkup\n");
3682 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3683 LPFC_FCOE_FCF_GET_FIRST
);
3685 spin_lock_irqsave(&phba
->hbalock
, iflags
);
3686 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3687 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
3690 /* Reset FCF roundrobin bmask for new discovery */
3691 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3694 /* Prepare for LINK up registrations */
3695 memset(phba
->os_host_name
, 0, sizeof(phba
->os_host_name
));
3696 scnprintf(phba
->os_host_name
, sizeof(phba
->os_host_name
), "%s",
3697 init_utsname()->nodename
);
3700 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3701 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3702 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3703 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3704 lpfc_issue_clear_la(phba
, vport
);
3709 lpfc_enable_la(struct lpfc_hba
*phba
)
3712 struct lpfc_sli
*psli
= &phba
->sli
;
3713 spin_lock_irq(&phba
->hbalock
);
3714 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3715 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3716 control
= readl(phba
->HCregaddr
);
3717 control
|= HC_LAINT_ENA
;
3718 writel(control
, phba
->HCregaddr
);
3719 readl(phba
->HCregaddr
); /* flush */
3721 spin_unlock_irq(&phba
->hbalock
);
3725 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3727 lpfc_linkdown(phba
);
3728 lpfc_enable_la(phba
);
3729 lpfc_unregister_unused_fcf(phba
);
3730 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3735 * This routine handles processing a READ_TOPOLOGY mailbox
3736 * command upon completion. It is setup in the LPFC_MBOXQ
3737 * as the completion routine when the command is
3738 * handed off to the SLI layer. SLI4 only.
3741 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3743 struct lpfc_vport
*vport
= pmb
->vport
;
3744 struct lpfc_mbx_read_top
*la
;
3745 struct lpfc_sli_ring
*pring
;
3746 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3747 struct lpfc_dmabuf
*mp
= pmb
->ctx_buf
;
3750 /* Unblock ELS traffic */
3751 pring
= lpfc_phba_elsring(phba
);
3753 pring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3755 /* Check for error */
3756 if (mb
->mbxStatus
) {
3757 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3758 "1307 READ_LA mbox error x%x state x%x\n",
3759 mb
->mbxStatus
, vport
->port_state
);
3760 lpfc_mbx_issue_link_down(phba
);
3761 phba
->link_state
= LPFC_HBA_ERROR
;
3762 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3765 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3766 attn_type
= bf_get(lpfc_mbx_read_top_att_type
, la
);
3768 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3770 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3771 set_bit(FC_BYPASSED_MODE
, &vport
->fc_flag
);
3773 clear_bit(FC_BYPASSED_MODE
, &vport
->fc_flag
);
3775 if (phba
->fc_eventTag
<= la
->eventTag
) {
3776 phba
->fc_stat
.LinkMultiEvent
++;
3777 if (attn_type
== LPFC_ATT_LINK_UP
)
3778 if (phba
->fc_eventTag
!= 0)
3779 lpfc_linkdown(phba
);
3782 phba
->fc_eventTag
= la
->eventTag
;
3783 phba
->link_events
++;
3784 if (attn_type
== LPFC_ATT_LINK_UP
) {
3785 phba
->fc_stat
.LinkUp
++;
3786 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3787 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3788 "1306 Link Up Event in loop back mode "
3789 "x%x received Data: x%x x%x x%x x%x\n",
3790 la
->eventTag
, phba
->fc_eventTag
,
3791 bf_get(lpfc_mbx_read_top_alpa_granted
,
3793 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3796 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3797 "1303 Link Up Event x%x received "
3798 "Data: x%x x%x x%x x%x x%x\n",
3799 la
->eventTag
, phba
->fc_eventTag
,
3800 bf_get(lpfc_mbx_read_top_alpa_granted
,
3802 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3804 bf_get(lpfc_mbx_read_top_fa
, la
));
3806 lpfc_mbx_process_link_up(phba
, la
);
3808 if (phba
->cmf_active_mode
!= LPFC_CFG_OFF
)
3809 lpfc_cmf_signal_init(phba
);
3811 if (phba
->lmt
& LMT_64Gb
)
3812 lpfc_read_lds_params(phba
);
3814 } else if (attn_type
== LPFC_ATT_LINK_DOWN
||
3815 attn_type
== LPFC_ATT_UNEXP_WWPN
) {
3816 phba
->fc_stat
.LinkDown
++;
3817 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
3818 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3819 "1308 Link Down Event in loop back mode "
3821 "Data: x%x x%x x%lx\n",
3822 la
->eventTag
, phba
->fc_eventTag
,
3823 phba
->pport
->port_state
, vport
->fc_flag
);
3824 else if (attn_type
== LPFC_ATT_UNEXP_WWPN
)
3825 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3826 "1313 Link Down Unexpected FA WWPN Event x%x "
3827 "received Data: x%x x%x x%lx x%x\n",
3828 la
->eventTag
, phba
->fc_eventTag
,
3829 phba
->pport
->port_state
, vport
->fc_flag
,
3830 bf_get(lpfc_mbx_read_top_fa
, la
));
3832 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3833 "1305 Link Down Event x%x received "
3834 "Data: x%x x%x x%lx x%x\n",
3835 la
->eventTag
, phba
->fc_eventTag
,
3836 phba
->pport
->port_state
, vport
->fc_flag
,
3837 bf_get(lpfc_mbx_read_top_fa
, la
));
3838 lpfc_mbx_issue_link_down(phba
);
3841 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3842 bf_get(lpfc_mbx_read_top_fa
, la
))
3843 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3845 bf_get(lpfc_mbx_read_top_fa
, la
));
3847 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3848 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
3852 * This routine handles processing a REG_LOGIN mailbox
3853 * command upon completion. It is setup in the LPFC_MBOXQ
3854 * as the completion routine when the command is
3855 * handed off to the SLI layer.
3858 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3860 struct lpfc_vport
*vport
= pmb
->vport
;
3861 struct lpfc_dmabuf
*mp
= pmb
->ctx_buf
;
3862 struct lpfc_nodelist
*ndlp
= pmb
->ctx_ndlp
;
3864 /* The driver calls the state machine with the pmb pointer
3865 * but wants to make sure a stale ctx_buf isn't acted on.
3866 * The ctx_buf is restored later and cleaned up.
3868 pmb
->ctx_buf
= NULL
;
3869 pmb
->ctx_ndlp
= NULL
;
3871 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
| LOG_NODE
| LOG_DISCOVERY
,
3872 "0002 rpi:%x DID:%x flg:%lx %d x%px\n",
3873 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3874 kref_read(&ndlp
->kref
),
3876 clear_bit(NLP_REG_LOGIN_SEND
, &ndlp
->nlp_flag
);
3878 if (test_bit(NLP_IGNR_REG_CMPL
, &ndlp
->nlp_flag
) ||
3879 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3880 /* We rcvd a rscn after issuing this
3881 * mbox reg login, we may have cycled
3882 * back through the state and be
3883 * back at reg login state so this
3884 * mbox needs to be ignored becase
3885 * there is another reg login in
3888 clear_bit(NLP_IGNR_REG_CMPL
, &ndlp
->nlp_flag
);
3891 * We cannot leave the RPI registered because
3892 * if we go thru discovery again for this ndlp
3893 * a subsequent REG_RPI will fail.
3895 set_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
);
3896 lpfc_unreg_rpi(vport
, ndlp
);
3899 /* Call state machine */
3900 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
3902 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
3904 /* decrement the node reference count held for this callback
3913 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3915 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3916 struct lpfc_vport
*vport
= pmb
->vport
;
3917 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3919 switch (mb
->mbxStatus
) {
3922 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3923 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3926 /* If VPI is busy, reset the HBA */
3928 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3929 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3930 vport
->vpi
, mb
->mbxStatus
);
3931 if (!test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
3932 lpfc_workq_post_event(phba
, NULL
, NULL
,
3933 LPFC_EVT_RESET_HBA
);
3936 set_bit(FC_VPORT_NEEDS_REG_VPI
, &vport
->fc_flag
);
3937 spin_lock_irq(shost
->host_lock
);
3938 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3939 spin_unlock_irq(shost
->host_lock
);
3940 mempool_free(pmb
, phba
->mbox_mem_pool
);
3941 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3943 * This shost reference might have been taken at the beginning of
3944 * lpfc_vport_delete()
3946 if (test_bit(FC_UNLOADING
, &vport
->load_flag
) && vport
!= phba
->pport
)
3947 scsi_host_put(shost
);
3951 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3953 struct lpfc_hba
*phba
= vport
->phba
;
3957 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3961 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3962 mbox
->vport
= vport
;
3963 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3964 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3965 if (rc
== MBX_NOT_FINISHED
) {
3966 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3967 "1800 Could not issue unreg_vpi\n");
3968 mempool_free(mbox
, phba
->mbox_mem_pool
);
3975 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3977 struct lpfc_vport
*vport
= pmb
->vport
;
3978 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3979 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3981 switch (mb
->mbxStatus
) {
3985 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3986 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3988 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3989 clear_bit(FC_FABRIC
, &vport
->fc_flag
);
3990 clear_bit(FC_PUBLIC_LOOP
, &vport
->fc_flag
);
3991 vport
->fc_myDID
= 0;
3993 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3994 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
3995 if (phba
->nvmet_support
)
3996 lpfc_nvmet_update_targetport(phba
);
3998 lpfc_nvme_update_localport(vport
);
4003 clear_bit(FC_VPORT_NEEDS_REG_VPI
, &vport
->fc_flag
);
4004 spin_lock_irq(shost
->host_lock
);
4005 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
4006 spin_unlock_irq(shost
->host_lock
);
4007 vport
->num_disc_nodes
= 0;
4008 /* go thru NPR list and issue ELS PLOGIs */
4009 if (atomic_read(&vport
->fc_npr_cnt
))
4010 lpfc_els_disc_plogi(vport
);
4012 if (!vport
->num_disc_nodes
) {
4013 clear_bit(FC_NDISC_ACTIVE
, &vport
->fc_flag
);
4014 lpfc_can_disctmo(vport
);
4016 vport
->port_state
= LPFC_VPORT_READY
;
4019 mempool_free(pmb
, phba
->mbox_mem_pool
);
4024 * lpfc_create_static_vport - Read HBA config region to create static vports.
4025 * @phba: pointer to lpfc hba data structure.
4027 * This routine issue a DUMP mailbox command for config region 22 to get
4028 * the list of static vports to be created. The function create vports
4029 * based on the information returned from the HBA.
4032 lpfc_create_static_vport(struct lpfc_hba
*phba
)
4034 LPFC_MBOXQ_t
*pmb
= NULL
;
4036 struct static_vport_info
*vport_info
;
4037 int mbx_wait_rc
= 0, i
;
4038 struct fc_vport_identifiers vport_id
;
4039 struct fc_vport
*new_fc_vport
;
4040 struct Scsi_Host
*shost
;
4041 struct lpfc_vport
*vport
;
4042 uint16_t offset
= 0;
4043 uint8_t *vport_buff
;
4044 struct lpfc_dmabuf
*mp
;
4045 uint32_t byte_count
= 0;
4047 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4049 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4050 "0542 lpfc_create_static_vport failed to"
4051 " allocate mailbox memory\n");
4054 memset(pmb
, 0, sizeof(LPFC_MBOXQ_t
));
4057 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
4059 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4060 "0543 lpfc_create_static_vport failed to"
4061 " allocate vport_info\n");
4062 mempool_free(pmb
, phba
->mbox_mem_pool
);
4066 vport_buff
= (uint8_t *) vport_info
;
4068 /* While loop iteration forces a free dma buffer from
4069 * the previous loop because the mbox is reused and
4070 * the dump routine is a single-use construct.
4074 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4076 pmb
->ctx_buf
= NULL
;
4078 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
4081 pmb
->vport
= phba
->pport
;
4082 mbx_wait_rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
,
4085 if ((mbx_wait_rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
4086 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4087 "0544 lpfc_create_static_vport failed to"
4088 " issue dump mailbox command ret 0x%x "
4090 mbx_wait_rc
, mb
->mbxStatus
);
4094 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4095 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
4097 if (byte_count
> sizeof(struct static_vport_info
) -
4099 byte_count
= sizeof(struct static_vport_info
)
4101 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
4102 offset
+= byte_count
;
4104 if (mb
->un
.varDmp
.word_cnt
>
4105 sizeof(struct static_vport_info
) - offset
)
4106 mb
->un
.varDmp
.word_cnt
=
4107 sizeof(struct static_vport_info
)
4109 byte_count
= mb
->un
.varDmp
.word_cnt
;
4110 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
4111 vport_buff
+ offset
,
4114 offset
+= byte_count
;
4117 } while (byte_count
&&
4118 offset
< sizeof(struct static_vport_info
));
4121 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
4122 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
4123 != VPORT_INFO_REV
)) {
4124 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4125 "0545 lpfc_create_static_vport bad"
4126 " information header 0x%x 0x%x\n",
4127 le32_to_cpu(vport_info
->signature
),
4128 le32_to_cpu(vport_info
->rev
) &
4129 VPORT_INFO_REV_MASK
);
4134 shost
= lpfc_shost_from_vport(phba
->pport
);
4136 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
4137 memset(&vport_id
, 0, sizeof(vport_id
));
4138 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
4139 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
4140 if (!vport_id
.port_name
|| !vport_id
.node_name
)
4143 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
4144 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
4145 vport_id
.disable
= false;
4146 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
4148 if (!new_fc_vport
) {
4149 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4150 "0546 lpfc_create_static_vport failed to"
4155 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
4156 vport
->vport_flag
|= STATIC_VPORT
;
4161 if (mbx_wait_rc
!= MBX_TIMEOUT
)
4162 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
4166 * This routine handles processing a Fabric REG_LOGIN mailbox
4167 * command upon completion. It is setup in the LPFC_MBOXQ
4168 * as the completion routine when the command is
4169 * handed off to the SLI layer.
4172 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4174 struct lpfc_vport
*vport
= pmb
->vport
;
4175 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4176 struct lpfc_nodelist
*ndlp
= pmb
->ctx_ndlp
;
4178 pmb
->ctx_ndlp
= NULL
;
4180 if (mb
->mbxStatus
) {
4181 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
4182 "0258 Register Fabric login error: 0x%x\n",
4184 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
4185 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
4186 /* FLOGI failed, use loop map to make discovery list */
4187 lpfc_disc_list_loopmap(vport
);
4189 /* Start discovery */
4190 lpfc_disc_start(vport
);
4191 /* Decrement the reference count to ndlp after the
4192 * reference to the ndlp are done.
4198 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
4199 /* Decrement the reference count to ndlp after the reference
4200 * to the ndlp are done.
4206 if (phba
->sli_rev
< LPFC_SLI_REV4
)
4207 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4208 set_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
);
4209 ndlp
->nlp_type
|= NLP_FABRIC
;
4210 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4212 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
4213 /* when physical port receive logo donot start
4214 * vport discovery */
4215 if (!test_and_clear_bit(FC_LOGO_RCVD_DID_CHNG
, &vport
->fc_flag
))
4216 lpfc_start_fdiscs(phba
);
4217 lpfc_do_scr_ns_plogi(phba
, vport
);
4220 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
4222 /* Drop the reference count from the mbox at the end after
4223 * all the current reference to the ndlp have been done.
4230 * This routine will issue a GID_FT for each FC4 Type supported
4231 * by the driver. ALL GID_FTs must complete before discovery is started.
4234 lpfc_issue_gidft(struct lpfc_vport
*vport
)
4236 /* Good status, issue CT Request to NameServer */
4237 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
4238 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
)) {
4239 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_FCP
)) {
4240 /* Cannot issue NameServer FCP Query, so finish up
4243 lpfc_printf_vlog(vport
, KERN_ERR
,
4245 "0604 %s FC TYPE %x %s\n",
4246 "Failed to issue GID_FT to ",
4248 "Finishing discovery.");
4254 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
4255 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
4256 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_NVME
)) {
4257 /* Cannot issue NameServer NVME Query, so finish up
4260 lpfc_printf_vlog(vport
, KERN_ERR
,
4262 "0605 %s FC_TYPE %x %s %d\n",
4263 "Failed to issue GID_FT to ",
4265 "Finishing discovery: gidftinp ",
4267 if (vport
->gidft_inp
== 0)
4272 return vport
->gidft_inp
;
4276 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
4277 * @vport: The virtual port for which this call is being executed.
4279 * This routine will issue a GID_PT to get a list of all N_Ports
4282 * 0 - Failure to issue a GID_PT
4286 lpfc_issue_gidpt(struct lpfc_vport
*vport
)
4288 /* Good status, issue CT Request to NameServer */
4289 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_PT
, 0, GID_PT_N_PORT
)) {
4290 /* Cannot issue NameServer FCP Query, so finish up
4293 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
4294 "0606 %s Port TYPE %x %s\n",
4295 "Failed to issue GID_PT to ",
4297 "Finishing discovery.");
4305 * This routine handles processing a NameServer REG_LOGIN mailbox
4306 * command upon completion. It is setup in the LPFC_MBOXQ
4307 * as the completion routine when the command is
4308 * handed off to the SLI layer.
4311 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4313 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4314 struct lpfc_nodelist
*ndlp
= pmb
->ctx_ndlp
;
4315 struct lpfc_vport
*vport
= pmb
->vport
;
4318 pmb
->ctx_ndlp
= NULL
;
4319 vport
->gidft_inp
= 0;
4321 if (mb
->mbxStatus
) {
4322 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
4323 "0260 Register NameServer error: 0x%x\n",
4327 /* decrement the node reference count held for this
4328 * callback function.
4331 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
4333 /* If the node is not registered with the scsi or nvme
4334 * transport, remove the fabric node. The failed reg_login
4335 * is terminal and forces the removal of the last node
4338 if (!(ndlp
->fc4_xpt_flags
& (SCSI_XPT_REGD
| NVME_XPT_REGD
))) {
4339 clear_bit(NLP_NPR_2B_DISC
, &ndlp
->nlp_flag
);
4343 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
4345 * RegLogin failed, use loop map to make discovery
4348 lpfc_disc_list_loopmap(vport
);
4350 /* Start discovery */
4351 lpfc_disc_start(vport
);
4354 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
4358 if (phba
->sli_rev
< LPFC_SLI_REV4
)
4359 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4360 set_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
);
4361 ndlp
->nlp_type
|= NLP_FABRIC
;
4362 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4363 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
| LOG_DISCOVERY
,
4364 "0003 rpi:%x DID:%x flg:%lx %d x%px\n",
4365 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4366 kref_read(&ndlp
->kref
),
4369 if (vport
->port_state
< LPFC_VPORT_READY
) {
4370 /* Link up discovery requires Fabric registration. */
4371 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
4372 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
4373 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
4374 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
4376 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
4377 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
))
4378 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, FC_TYPE_FCP
);
4380 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
4381 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
))
4382 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0,
4385 /* Issue SCR just before NameServer GID_FT Query */
4386 lpfc_issue_els_scr(vport
, 0);
4388 /* Link was bounced or a Fabric LOGO occurred. Start EDC
4389 * with initial FW values provided the congestion mode is
4390 * not off. Note that signals may or may not be supported
4391 * by the adapter but FPIN is provided by default for 1
4392 * or both missing signals support.
4394 if (phba
->cmf_active_mode
!= LPFC_CFG_OFF
) {
4395 phba
->cgn_reg_fpin
= phba
->cgn_init_reg_fpin
;
4396 phba
->cgn_reg_signal
= phba
->cgn_init_reg_signal
;
4397 rc
= lpfc_issue_els_edc(vport
, 0);
4398 lpfc_printf_log(phba
, KERN_INFO
,
4399 LOG_INIT
| LOG_ELS
| LOG_DISCOVERY
,
4400 "4220 Issue EDC status x%x Data x%x\n",
4401 rc
, phba
->cgn_init_reg_signal
);
4402 } else if (phba
->lmt
& LMT_64Gb
) {
4403 /* may send link fault capability descriptor */
4404 lpfc_issue_els_edc(vport
, 0);
4406 lpfc_issue_els_rdf(vport
, 0);
4410 vport
->fc_ns_retry
= 0;
4411 if (lpfc_issue_gidft(vport
) == 0)
4415 * At this point in time we may need to wait for multiple
4416 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
4418 * decrement the node reference count held for this
4419 * callback function.
4422 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
4427 * This routine handles processing a Fabric Controller REG_LOGIN mailbox
4428 * command upon completion. It is setup in the LPFC_MBOXQ
4429 * as the completion routine when the command is handed off to the SLI layer.
4432 lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4434 struct lpfc_vport
*vport
= pmb
->vport
;
4435 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4436 struct lpfc_nodelist
*ndlp
= pmb
->ctx_ndlp
;
4438 pmb
->ctx_ndlp
= NULL
;
4439 if (mb
->mbxStatus
) {
4440 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
4441 "0933 %s: Register FC login error: 0x%x\n",
4442 __func__
, mb
->mbxStatus
);
4446 lpfc_check_nlp_post_devloss(vport
, ndlp
);
4448 if (phba
->sli_rev
< LPFC_SLI_REV4
)
4449 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4451 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4452 "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
4453 __func__
, ndlp
->nlp_DID
, ndlp
->nlp_rpi
,
4456 set_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
);
4457 clear_bit(NLP_REG_LOGIN_SEND
, &ndlp
->nlp_flag
);
4458 ndlp
->nlp_type
|= NLP_FABRIC
;
4459 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4462 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
4464 /* Drop the reference count from the mbox at the end after
4465 * all the current reference to the ndlp have been done.
4471 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4473 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4474 struct fc_rport
*rport
;
4475 struct lpfc_rport_data
*rdata
;
4476 struct fc_rport_identifiers rport_ids
;
4477 struct lpfc_hba
*phba
= vport
->phba
;
4478 unsigned long flags
;
4480 if (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4483 /* Remote port has reappeared. Re-register w/ FC transport */
4484 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
4485 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
4486 rport_ids
.port_id
= ndlp
->nlp_DID
;
4487 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
4490 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4491 "rport add: did:x%x flg:x%lx type x%x",
4492 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4494 /* Don't add the remote port if unloading. */
4495 if (test_bit(FC_UNLOADING
, &vport
->load_flag
))
4498 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
4500 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
4501 "Warning: fc_remote_port_add failed\n");
4505 /* Successful port add. Complete initializing node data */
4506 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
4507 rport
->supported_classes
= ndlp
->nlp_class_sup
;
4508 rdata
= rport
->dd_data
;
4509 rdata
->pnode
= lpfc_nlp_get(ndlp
);
4510 if (!rdata
->pnode
) {
4511 dev_warn(&phba
->pcidev
->dev
,
4512 "Warning - node ref failed. Unreg rport\n");
4513 fc_remote_port_delete(rport
);
4518 spin_lock_irqsave(&ndlp
->lock
, flags
);
4519 ndlp
->fc4_xpt_flags
|= SCSI_XPT_REGD
;
4520 spin_unlock_irqrestore(&ndlp
->lock
, flags
);
4522 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
4523 rport_ids
.roles
|= FC_PORT_ROLE_FCP_TARGET
;
4524 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
4525 rport_ids
.roles
|= FC_PORT_ROLE_FCP_INITIATOR
;
4526 if (ndlp
->nlp_type
& NLP_NVME_INITIATOR
)
4527 rport_ids
.roles
|= FC_PORT_ROLE_NVME_INITIATOR
;
4528 if (ndlp
->nlp_type
& NLP_NVME_TARGET
)
4529 rport_ids
.roles
|= FC_PORT_ROLE_NVME_TARGET
;
4530 if (ndlp
->nlp_type
& NLP_NVME_DISCOVERY
)
4531 rport_ids
.roles
|= FC_PORT_ROLE_NVME_DISCOVERY
;
4533 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
4534 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
4536 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4537 "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
4538 __func__
, rport
, rport
->port_id
, rport
->roles
,
4539 kref_read(&ndlp
->kref
));
4541 if ((rport
->scsi_target_id
!= -1) &&
4542 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
4543 ndlp
->nlp_sid
= rport
->scsi_target_id
;
4550 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
4552 struct fc_rport
*rport
= ndlp
->rport
;
4553 struct lpfc_vport
*vport
= ndlp
->vport
;
4555 if (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4558 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4559 "rport delete: did:x%x flg:x%lx type x%x",
4560 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4562 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4563 "3184 rport unregister x%06x, rport x%px "
4564 "xptflg x%x refcnt %d\n",
4565 ndlp
->nlp_DID
, rport
, ndlp
->fc4_xpt_flags
,
4566 kref_read(&ndlp
->kref
));
4568 fc_remote_port_delete(rport
);
4573 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
4576 case NLP_STE_UNUSED_NODE
:
4577 atomic_add(count
, &vport
->fc_unused_cnt
);
4579 case NLP_STE_PLOGI_ISSUE
:
4580 atomic_add(count
, &vport
->fc_plogi_cnt
);
4582 case NLP_STE_ADISC_ISSUE
:
4583 atomic_add(count
, &vport
->fc_adisc_cnt
);
4585 case NLP_STE_REG_LOGIN_ISSUE
:
4586 atomic_add(count
, &vport
->fc_reglogin_cnt
);
4588 case NLP_STE_PRLI_ISSUE
:
4589 atomic_add(count
, &vport
->fc_prli_cnt
);
4591 case NLP_STE_UNMAPPED_NODE
:
4592 atomic_add(count
, &vport
->fc_unmap_cnt
);
4594 case NLP_STE_MAPPED_NODE
:
4595 atomic_add(count
, &vport
->fc_map_cnt
);
4597 case NLP_STE_NPR_NODE
:
4598 if (!atomic_read(&vport
->fc_npr_cnt
) && count
== -1)
4599 atomic_set(&vport
->fc_npr_cnt
, 0);
4601 atomic_add(count
, &vport
->fc_npr_cnt
);
4606 /* Register a node with backend if not already done */
4608 lpfc_nlp_reg_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4610 unsigned long iflags
;
4612 lpfc_check_nlp_post_devloss(vport
, ndlp
);
4614 spin_lock_irqsave(&ndlp
->lock
, iflags
);
4615 if (ndlp
->fc4_xpt_flags
& NLP_XPT_REGD
) {
4616 /* Already registered with backend, trigger rescan */
4617 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
4619 if (ndlp
->fc4_xpt_flags
& NVME_XPT_REGD
&&
4620 ndlp
->nlp_type
& (NLP_NVME_TARGET
| NLP_NVME_DISCOVERY
)) {
4621 lpfc_nvme_rescan_port(vport
, ndlp
);
4626 ndlp
->fc4_xpt_flags
|= NLP_XPT_REGD
;
4627 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
4629 if (lpfc_valid_xpt_node(ndlp
)) {
4630 vport
->phba
->nport_event_cnt
++;
4632 * Tell the fc transport about the port, if we haven't
4633 * already. If we have, and it's a scsi entity, be
4635 lpfc_register_remote_port(vport
, ndlp
);
4638 /* We are done if we do not have any NVME remote node */
4639 if (!(ndlp
->nlp_fc4_type
& NLP_FC4_NVME
))
4642 /* Notify the NVME transport of this new rport. */
4643 if (vport
->phba
->sli_rev
>= LPFC_SLI_REV4
&&
4644 ndlp
->nlp_fc4_type
& NLP_FC4_NVME
) {
4645 if (vport
->phba
->nvmet_support
== 0) {
4646 /* Register this rport with the transport.
4647 * Only NVME Target Rports are registered with
4650 if (ndlp
->nlp_type
& NLP_NVME_TARGET
) {
4651 vport
->phba
->nport_event_cnt
++;
4652 lpfc_nvme_register_port(vport
, ndlp
);
4655 /* Just take an NDLP ref count since the
4656 * target does not register rports.
4663 /* Unregister a node with backend if not already done */
4665 lpfc_nlp_unreg_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4667 unsigned long iflags
;
4669 spin_lock_irqsave(&ndlp
->lock
, iflags
);
4670 if (!(ndlp
->fc4_xpt_flags
& NLP_XPT_REGD
)) {
4671 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
4672 lpfc_printf_vlog(vport
, KERN_INFO
,
4673 LOG_ELS
| LOG_NODE
| LOG_DISCOVERY
,
4674 "0999 %s Not regd: ndlp x%px rport x%px DID "
4675 "x%x FLG x%lx XPT x%x\n",
4676 __func__
, ndlp
, ndlp
->rport
, ndlp
->nlp_DID
,
4677 ndlp
->nlp_flag
, ndlp
->fc4_xpt_flags
);
4681 ndlp
->fc4_xpt_flags
&= ~NLP_XPT_REGD
;
4682 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
4685 ndlp
->fc4_xpt_flags
& SCSI_XPT_REGD
) {
4686 vport
->phba
->nport_event_cnt
++;
4687 lpfc_unregister_remote_port(ndlp
);
4688 } else if (!ndlp
->rport
) {
4689 lpfc_printf_vlog(vport
, KERN_INFO
,
4690 LOG_ELS
| LOG_NODE
| LOG_DISCOVERY
,
4691 "1999 %s NDLP in devloss x%px DID x%x FLG x%lx"
4692 " XPT x%x refcnt %u\n",
4693 __func__
, ndlp
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4694 ndlp
->fc4_xpt_flags
,
4695 kref_read(&ndlp
->kref
));
4698 if (ndlp
->fc4_xpt_flags
& NVME_XPT_REGD
) {
4699 vport
->phba
->nport_event_cnt
++;
4700 if (vport
->phba
->nvmet_support
== 0) {
4701 /* Start devloss if target. */
4702 if (ndlp
->nlp_type
& NLP_NVME_TARGET
)
4703 lpfc_nvme_unregister_port(vport
, ndlp
);
4705 /* NVMET has no upcall. */
4713 * Adisc state change handling
4716 lpfc_handle_adisc_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4719 switch (new_state
) {
4721 * Any state to ADISC_ISSUE
4722 * Do nothing, adisc cmpl handling will trigger state changes
4724 case NLP_STE_ADISC_ISSUE
:
4728 * ADISC_ISSUE to mapped states
4729 * Trigger a registration with backend, it will be nop if
4730 * already registered
4732 case NLP_STE_UNMAPPED_NODE
:
4733 ndlp
->nlp_type
|= NLP_FC_NODE
;
4735 case NLP_STE_MAPPED_NODE
:
4736 clear_bit(NLP_NODEV_REMOVE
, &ndlp
->nlp_flag
);
4737 lpfc_nlp_reg_node(vport
, ndlp
);
4741 * ADISC_ISSUE to non-mapped states
4742 * We are moving from ADISC_ISSUE to a non-mapped state because
4743 * ADISC failed, we would have skipped unregistering with
4744 * backend, attempt it now
4746 case NLP_STE_NPR_NODE
:
4747 clear_bit(NLP_RCV_PLOGI
, &ndlp
->nlp_flag
);
4750 lpfc_nlp_unreg_node(vport
, ndlp
);
4757 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4758 int old_state
, int new_state
)
4760 /* Trap ADISC changes here */
4761 if (new_state
== NLP_STE_ADISC_ISSUE
||
4762 old_state
== NLP_STE_ADISC_ISSUE
) {
4763 lpfc_handle_adisc_state(vport
, ndlp
, new_state
);
4767 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
4768 clear_bit(NLP_NODEV_REMOVE
, &ndlp
->nlp_flag
);
4769 ndlp
->nlp_type
|= NLP_FC_NODE
;
4771 if (new_state
== NLP_STE_MAPPED_NODE
)
4772 clear_bit(NLP_NODEV_REMOVE
, &ndlp
->nlp_flag
);
4773 if (new_state
== NLP_STE_NPR_NODE
)
4774 clear_bit(NLP_RCV_PLOGI
, &ndlp
->nlp_flag
);
4776 /* Reg/Unreg for FCP and NVME Transport interface */
4777 if ((old_state
== NLP_STE_MAPPED_NODE
||
4778 old_state
== NLP_STE_UNMAPPED_NODE
)) {
4779 /* For nodes marked for ADISC, Handle unreg in ADISC cmpl
4780 * if linkup. In linkdown do unreg_node
4782 if (!test_bit(NLP_NPR_ADISC
, &ndlp
->nlp_flag
) ||
4783 !lpfc_is_link_up(vport
->phba
))
4784 lpfc_nlp_unreg_node(vport
, ndlp
);
4787 if (new_state
== NLP_STE_MAPPED_NODE
||
4788 new_state
== NLP_STE_UNMAPPED_NODE
)
4789 lpfc_nlp_reg_node(vport
, ndlp
);
4792 * If the node just added to Mapped list was an FCP target,
4793 * but the remote port registration failed or assigned a target
4794 * id outside the presentable range - move the node to the
4797 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4798 (ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4800 ndlp
->rport
->scsi_target_id
== -1 ||
4801 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
4802 set_bit(NLP_TGT_NO_SCSIID
, &ndlp
->nlp_flag
);
4803 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4808 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
4810 static char *states
[] = {
4811 [NLP_STE_UNUSED_NODE
] = "UNUSED",
4812 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
4813 [NLP_STE_ADISC_ISSUE
] = "ADISC",
4814 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
4815 [NLP_STE_PRLI_ISSUE
] = "PRLI",
4816 [NLP_STE_LOGO_ISSUE
] = "LOGO",
4817 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
4818 [NLP_STE_MAPPED_NODE
] = "MAPPED",
4819 [NLP_STE_NPR_NODE
] = "NPR",
4822 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
4823 strscpy(buffer
, states
[state
], size
);
4825 snprintf(buffer
, size
, "unknown (%d)", state
);
4830 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4833 int old_state
= ndlp
->nlp_state
;
4834 bool node_dropped
= test_bit(NLP_DROPPED
, &ndlp
->nlp_flag
);
4835 char name1
[16], name2
[16];
4836 unsigned long iflags
;
4838 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4839 "0904 NPort state transition x%06x, %s -> %s\n",
4841 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
4842 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
4844 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4845 "node statechg did:x%x old:%d ste:%d",
4846 ndlp
->nlp_DID
, old_state
, state
);
4848 if (node_dropped
&& old_state
== NLP_STE_UNUSED_NODE
&&
4849 state
!= NLP_STE_UNUSED_NODE
) {
4850 clear_bit(NLP_DROPPED
, &ndlp
->nlp_flag
);
4854 if (old_state
== NLP_STE_NPR_NODE
&&
4855 state
!= NLP_STE_NPR_NODE
)
4856 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4857 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
4858 clear_bit(NLP_TGT_NO_SCSIID
, &ndlp
->nlp_flag
);
4859 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
4862 if (list_empty(&ndlp
->nlp_listp
)) {
4863 spin_lock_irqsave(&vport
->fc_nodes_list_lock
, iflags
);
4864 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4865 spin_unlock_irqrestore(&vport
->fc_nodes_list_lock
, iflags
);
4866 } else if (old_state
)
4867 lpfc_nlp_counters(vport
, old_state
, -1);
4869 ndlp
->nlp_state
= state
;
4870 lpfc_nlp_counters(vport
, state
, 1);
4871 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
4875 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4877 unsigned long iflags
;
4879 if (list_empty(&ndlp
->nlp_listp
)) {
4880 spin_lock_irqsave(&vport
->fc_nodes_list_lock
, iflags
);
4881 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4882 spin_unlock_irqrestore(&vport
->fc_nodes_list_lock
, iflags
);
4887 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4889 unsigned long iflags
;
4891 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4892 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4893 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4894 spin_lock_irqsave(&vport
->fc_nodes_list_lock
, iflags
);
4895 list_del_init(&ndlp
->nlp_listp
);
4896 spin_unlock_irqrestore(&vport
->fc_nodes_list_lock
, iflags
);
4897 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4898 NLP_STE_UNUSED_NODE
);
4902 * lpfc_initialize_node - Initialize all fields of node object
4903 * @vport: Pointer to Virtual Port object.
4904 * @ndlp: Pointer to FC node object.
4905 * @did: FC_ID of the node.
4907 * This function is always called when node object need to be initialized.
4908 * It initializes all the fields of the node object. Although the reference
4909 * to phba from @ndlp can be obtained indirectly through it's reference to
4910 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4911 * to the life-span of the @ndlp might go beyond the existence of @vport as
4912 * the final release of ndlp is determined by its reference count. And, the
4913 * operation on @ndlp needs the reference to phba.
4916 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4919 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4920 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4921 timer_setup(&ndlp
->nlp_delayfunc
, lpfc_els_retry_delay
, 0);
4922 INIT_LIST_HEAD(&ndlp
->recovery_evt
.evt_listp
);
4924 ndlp
->nlp_DID
= did
;
4925 ndlp
->vport
= vport
;
4926 ndlp
->phba
= vport
->phba
;
4927 ndlp
->nlp_sid
= NLP_NO_SID
;
4928 ndlp
->nlp_fc4_type
= NLP_FC4_NONE
;
4929 kref_init(&ndlp
->kref
);
4930 atomic_set(&ndlp
->cmd_pending
, 0);
4931 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4932 ndlp
->nlp_defer_did
= NLP_EVT_NOTHING_PENDING
;
4936 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4939 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4940 * be used when lpfc wants to remove the "last" lpfc_nlp_put() to
4941 * release the ndlp from the vport when conditions are correct.
4943 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4945 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4946 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4947 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4948 lpfc_unreg_rpi(vport
, ndlp
);
4951 /* NLP_DROPPED means another thread already removed the initial
4952 * reference from lpfc_nlp_init. If set, don't drop it again and
4953 * introduce an imbalance.
4955 if (!test_and_set_bit(NLP_DROPPED
, &ndlp
->nlp_flag
))
4960 * Start / ReStart rescue timer for Discovery / RSCN handling
4963 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4965 struct lpfc_hba
*phba
= vport
->phba
;
4968 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4969 /* For FAN, timeout should be greater than edtov */
4970 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4972 /* Normal discovery timeout should be > than ELS/CT timeout
4973 * FC spec states we need 3 * ratov for CT requests
4975 tmo
= ((phba
->fc_ratov
* 3) + 3);
4979 if (!timer_pending(&vport
->fc_disctmo
)) {
4980 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4981 "set disc timer: tmo:x%x state:x%x flg:x%x",
4982 tmo
, vport
->port_state
, vport
->fc_flag
);
4985 mod_timer(&vport
->fc_disctmo
, jiffies
+ msecs_to_jiffies(1000 * tmo
));
4986 set_bit(FC_DISC_TMO
, &vport
->fc_flag
);
4988 /* Start Discovery Timer state <hba_state> */
4989 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4990 "0247 Start Discovery Timer state x%x "
4991 "Data: x%x x%lx x%x x%x\n",
4992 vport
->port_state
, tmo
,
4993 (unsigned long)&vport
->fc_disctmo
,
4994 atomic_read(&vport
->fc_plogi_cnt
),
4995 atomic_read(&vport
->fc_adisc_cnt
));
5001 * Cancel rescue timer for Discovery / RSCN handling
5004 lpfc_can_disctmo(struct lpfc_vport
*vport
)
5006 unsigned long iflags
;
5008 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5009 "can disc timer: state:x%x rtry:x%x flg:x%x",
5010 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5012 /* Turn off discovery timer if its running */
5013 if (test_bit(FC_DISC_TMO
, &vport
->fc_flag
) ||
5014 timer_pending(&vport
->fc_disctmo
)) {
5015 clear_bit(FC_DISC_TMO
, &vport
->fc_flag
);
5016 del_timer_sync(&vport
->fc_disctmo
);
5017 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
5018 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
5019 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
5022 /* Cancel Discovery Timer state <hba_state> */
5023 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5024 "0248 Cancel Discovery Timer state x%x "
5025 "Data: x%lx x%x x%x\n",
5026 vport
->port_state
, vport
->fc_flag
,
5027 atomic_read(&vport
->fc_plogi_cnt
),
5028 atomic_read(&vport
->fc_adisc_cnt
));
5033 * Check specified ring for outstanding IOCB on the SLI queue
5034 * Return true if iocb matches the specified nport
5037 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
5038 struct lpfc_sli_ring
*pring
,
5039 struct lpfc_iocbq
*iocb
,
5040 struct lpfc_nodelist
*ndlp
)
5042 struct lpfc_vport
*vport
= ndlp
->vport
;
5047 if (iocb
->vport
!= vport
)
5050 ulp_command
= get_job_cmnd(phba
, iocb
);
5051 ulp_context
= get_job_ulpcontext(phba
, iocb
);
5052 remote_id
= get_job_els_rsp64_did(phba
, iocb
);
5054 if (pring
->ringno
== LPFC_ELS_RING
) {
5055 switch (ulp_command
) {
5056 case CMD_GEN_REQUEST64_CR
:
5057 if (iocb
->ndlp
== ndlp
)
5060 case CMD_ELS_REQUEST64_CR
:
5061 if (remote_id
== ndlp
->nlp_DID
)
5064 case CMD_XMIT_ELS_RSP64_CX
:
5065 if (iocb
->ndlp
== ndlp
)
5068 } else if (pring
->ringno
== LPFC_FCP_RING
) {
5069 /* Skip match check if waiting to relogin to FCP target */
5070 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
5071 test_bit(NLP_DELAY_TMO
, &ndlp
->nlp_flag
))
5074 if (ulp_context
== ndlp
->nlp_rpi
)
5081 __lpfc_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
5082 struct lpfc_nodelist
*ndlp
, struct lpfc_sli_ring
*pring
,
5083 struct list_head
*dequeue_list
)
5085 struct lpfc_iocbq
*iocb
, *next_iocb
;
5087 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5088 /* Check to see if iocb matches the nport */
5089 if (lpfc_check_sli_ndlp(phba
, pring
, iocb
, ndlp
))
5090 /* match, dequeue */
5091 list_move_tail(&iocb
->list
, dequeue_list
);
5096 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
5097 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
5099 struct lpfc_sli
*psli
= &phba
->sli
;
5102 spin_lock_irq(&phba
->hbalock
);
5103 for (i
= 0; i
< psli
->num_rings
; i
++)
5104 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, &psli
->sli3_ring
[i
],
5106 spin_unlock_irq(&phba
->hbalock
);
5110 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
5111 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
5113 struct lpfc_sli_ring
*pring
;
5114 struct lpfc_queue
*qp
= NULL
;
5116 spin_lock_irq(&phba
->hbalock
);
5117 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
5121 spin_lock(&pring
->ring_lock
);
5122 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, pring
, dequeue_list
);
5123 spin_unlock(&pring
->ring_lock
);
5125 spin_unlock_irq(&phba
->hbalock
);
5129 * Free resources / clean up outstanding I/Os
5130 * associated with nlp_rpi in the LPFC_NODELIST entry.
5133 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5135 LIST_HEAD(completions
);
5137 lpfc_fabric_abort_nport(ndlp
);
5140 * Everything that matches on txcmplq will be returned
5141 * by firmware with a no rpi error.
5143 if (test_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
)) {
5144 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
5145 lpfc_sli3_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
5147 lpfc_sli4_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
5150 /* Cancel all the IOCBs from the completions list */
5151 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5158 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
5159 * @phba: Pointer to HBA context object.
5160 * @pmb: Pointer to mailbox object.
5162 * This function will issue an ELS LOGO command after completing
5166 lpfc_nlp_logo_unreg(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5168 struct lpfc_vport
*vport
= pmb
->vport
;
5169 struct lpfc_nodelist
*ndlp
;
5171 ndlp
= pmb
->ctx_ndlp
;
5174 lpfc_issue_els_logo(vport
, ndlp
, 0);
5176 /* Check to see if there are any deferred events to process */
5177 if (test_bit(NLP_UNREG_INP
, &ndlp
->nlp_flag
) &&
5178 ndlp
->nlp_defer_did
!= NLP_EVT_NOTHING_PENDING
) {
5179 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5180 "1434 UNREG cmpl deferred logo x%x "
5181 "on NPort x%x Data: x%x x%px\n",
5182 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5183 ndlp
->nlp_defer_did
, ndlp
);
5185 clear_bit(NLP_UNREG_INP
, &ndlp
->nlp_flag
);
5186 ndlp
->nlp_defer_did
= NLP_EVT_NOTHING_PENDING
;
5187 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
5189 clear_bit(NLP_UNREG_INP
, &ndlp
->nlp_flag
);
5192 /* The node has an outstanding reference for the unreg. Now
5193 * that the LOGO action and cleanup are finished, release
5197 mempool_free(pmb
, phba
->mbox_mem_pool
);
5201 * Sets the mailbox completion handler to be used for the
5202 * unreg_rpi command. The handler varies based on the state of
5203 * the port and what will be happening to the rpi next.
5206 lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
,
5207 struct lpfc_nodelist
*ndlp
, LPFC_MBOXQ_t
*mbox
)
5209 /* Driver always gets a reference on the mailbox job
5210 * in support of async jobs.
5212 mbox
->ctx_ndlp
= lpfc_nlp_get(ndlp
);
5213 if (!mbox
->ctx_ndlp
)
5216 if (test_bit(NLP_ISSUE_LOGO
, &ndlp
->nlp_flag
)) {
5217 mbox
->mbox_cmpl
= lpfc_nlp_logo_unreg
;
5218 } else if (phba
->sli_rev
== LPFC_SLI_REV4
&&
5219 !test_bit(FC_UNLOADING
, &vport
->load_flag
) &&
5220 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) >=
5221 LPFC_SLI_INTF_IF_TYPE_2
) &&
5222 (kref_read(&ndlp
->kref
) > 0)) {
5223 mbox
->mbox_cmpl
= lpfc_sli4_unreg_rpi_cmpl_clr
;
5225 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5230 * Free rpi associated with LPFC_NODELIST entry.
5231 * This routine is called from lpfc_freenode(), when we are removing
5232 * a LPFC_NODELIST entry. It is also called if the driver initiates a
5233 * LOGO that completes successfully, and we are waiting to PLOGI back
5234 * to the remote NPort. In addition, it is called after we receive
5235 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
5236 * we are waiting to PLOGI back to the remote NPort.
5239 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
5241 struct lpfc_hba
*phba
= vport
->phba
;
5243 int rc
, acc_plogi
= 1;
5246 if (test_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
) ||
5247 test_bit(NLP_REG_LOGIN_SEND
, &ndlp
->nlp_flag
)) {
5248 if (test_bit(NLP_REG_LOGIN_SEND
, &ndlp
->nlp_flag
))
5249 lpfc_printf_vlog(vport
, KERN_INFO
,
5250 LOG_NODE
| LOG_DISCOVERY
,
5251 "3366 RPI x%x needs to be "
5252 "unregistered nlp_flag x%lx "
5254 ndlp
->nlp_rpi
, ndlp
->nlp_flag
,
5257 /* If there is already an UNREG in progress for this ndlp,
5258 * no need to queue up another one.
5260 if (test_bit(NLP_UNREG_INP
, &ndlp
->nlp_flag
)) {
5261 lpfc_printf_vlog(vport
, KERN_INFO
,
5262 LOG_NODE
| LOG_DISCOVERY
,
5263 "1436 unreg_rpi SKIP UNREG x%x on "
5264 "NPort x%x deferred x%x flg x%lx "
5266 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5267 ndlp
->nlp_defer_did
,
5268 ndlp
->nlp_flag
, ndlp
);
5272 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5274 /* SLI4 ports require the physical rpi value. */
5275 rpi
= ndlp
->nlp_rpi
;
5276 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5277 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
5279 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
5280 mbox
->vport
= vport
;
5281 lpfc_set_unreg_login_mbx_cmpl(phba
, vport
, ndlp
, mbox
);
5282 if (!mbox
->ctx_ndlp
) {
5283 mempool_free(mbox
, phba
->mbox_mem_pool
);
5287 /* Accept PLOGIs after unreg_rpi_cmpl. */
5288 if (mbox
->mbox_cmpl
== lpfc_sli4_unreg_rpi_cmpl_clr
)
5291 if (!test_bit(FC_OFFLINE_MODE
, &vport
->fc_flag
))
5292 set_bit(NLP_UNREG_INP
, &ndlp
->nlp_flag
);
5294 lpfc_printf_vlog(vport
, KERN_INFO
,
5295 LOG_NODE
| LOG_DISCOVERY
,
5296 "1433 unreg_rpi UNREG x%x on "
5297 "NPort x%x deferred flg x%lx "
5299 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5300 ndlp
->nlp_flag
, ndlp
);
5302 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5303 if (rc
== MBX_NOT_FINISHED
) {
5304 clear_bit(NLP_UNREG_INP
, &ndlp
->nlp_flag
);
5305 mempool_free(mbox
, phba
->mbox_mem_pool
);
5310 lpfc_printf_vlog(vport
, KERN_INFO
,
5311 LOG_NODE
| LOG_DISCOVERY
,
5312 "1444 Failed to allocate mempool "
5313 "unreg_rpi UNREG x%x, "
5314 "DID x%x, flag x%lx, "
5316 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5317 ndlp
->nlp_flag
, ndlp
);
5319 /* Because mempool_alloc failed, we
5320 * will issue a LOGO here and keep the rpi alive if
5323 if (!test_bit(FC_UNLOADING
, &vport
->load_flag
)) {
5324 clear_bit(NLP_UNREG_INP
, &ndlp
->nlp_flag
);
5325 lpfc_issue_els_logo(vport
, ndlp
, 0);
5326 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
5327 lpfc_nlp_set_state(vport
, ndlp
,
5333 lpfc_no_rpi(phba
, ndlp
);
5335 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
5337 clear_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
);
5338 clear_bit(NLP_NPR_ADISC
, &ndlp
->nlp_flag
);
5340 clear_bit(NLP_LOGO_ACC
, &ndlp
->nlp_flag
);
5343 clear_bit(NLP_LOGO_ACC
, &ndlp
->nlp_flag
);
5348 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
5349 * @phba: pointer to lpfc hba data structure.
5351 * This routine is invoked to unregister all the currently registered RPIs
5355 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
5357 struct lpfc_vport
**vports
;
5358 struct lpfc_nodelist
*ndlp
;
5360 unsigned long iflags
;
5362 vports
= lpfc_create_vport_work_array(phba
);
5364 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5365 "2884 Vport array allocation failed \n");
5368 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5369 spin_lock_irqsave(&vports
[i
]->fc_nodes_list_lock
, iflags
);
5370 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
5371 if (test_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
)) {
5372 /* The mempool_alloc might sleep */
5373 spin_unlock_irqrestore(&vports
[i
]->fc_nodes_list_lock
,
5375 lpfc_unreg_rpi(vports
[i
], ndlp
);
5376 spin_lock_irqsave(&vports
[i
]->fc_nodes_list_lock
,
5380 spin_unlock_irqrestore(&vports
[i
]->fc_nodes_list_lock
, iflags
);
5382 lpfc_destroy_vport_work_array(phba
, vports
);
5386 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
5388 struct lpfc_hba
*phba
= vport
->phba
;
5392 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
5393 lpfc_sli4_unreg_all_rpis(vport
);
5397 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5399 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
5401 mbox
->vport
= vport
;
5402 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5403 mbox
->ctx_ndlp
= NULL
;
5404 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
5405 if (rc
!= MBX_TIMEOUT
)
5406 mempool_free(mbox
, phba
->mbox_mem_pool
);
5408 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
5409 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5410 "1836 Could not issue "
5411 "unreg_login(all_rpis) status %d\n",
5417 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
5419 struct lpfc_hba
*phba
= vport
->phba
;
5423 /* Unreg DID is an SLI3 operation. */
5424 if (phba
->sli_rev
> LPFC_SLI_REV3
)
5427 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5429 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
5431 mbox
->vport
= vport
;
5432 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5433 mbox
->ctx_ndlp
= NULL
;
5434 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
5435 if (rc
!= MBX_TIMEOUT
)
5436 mempool_free(mbox
, phba
->mbox_mem_pool
);
5438 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
5439 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5440 "1815 Could not issue "
5441 "unreg_did (default rpis) status %d\n",
5447 * Free resources associated with LPFC_NODELIST entry
5448 * so it can be freed.
5451 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
5453 struct lpfc_hba
*phba
= vport
->phba
;
5454 LPFC_MBOXQ_t
*mb
, *nextmb
;
5456 /* Cleanup node for NPort <nlp_DID> */
5457 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5458 "0900 Cleanup node for NPort x%x "
5459 "Data: x%lx x%x x%x\n",
5460 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5461 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
5462 lpfc_dequeue_node(vport
, ndlp
);
5464 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
5466 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5467 if ((mb
= phba
->sli
.mbox_active
)) {
5468 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
5469 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
5470 (ndlp
== mb
->ctx_ndlp
)) {
5471 mb
->ctx_ndlp
= NULL
;
5472 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5476 spin_lock_irq(&phba
->hbalock
);
5477 /* Cleanup REG_LOGIN completions which are not yet processed */
5478 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
5479 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
5480 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) ||
5481 (ndlp
!= mb
->ctx_ndlp
))
5484 mb
->ctx_ndlp
= NULL
;
5485 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5488 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
5489 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
5490 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
5491 (ndlp
== mb
->ctx_ndlp
)) {
5492 list_del(&mb
->list
);
5493 lpfc_mbox_rsrc_cleanup(phba
, mb
, MBOX_THD_LOCKED
);
5495 /* Don't invoke lpfc_nlp_put. The driver is in
5496 * lpfc_nlp_release context.
5500 spin_unlock_irq(&phba
->hbalock
);
5502 lpfc_els_abort(phba
, ndlp
);
5504 clear_bit(NLP_DELAY_TMO
, &ndlp
->nlp_flag
);
5506 ndlp
->nlp_last_elscmd
= 0;
5507 del_timer_sync(&ndlp
->nlp_delayfunc
);
5509 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
5510 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
5511 list_del_init(&ndlp
->recovery_evt
.evt_listp
);
5512 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
5517 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5520 D_ID mydid
, ndlpdid
, matchdid
;
5522 if (did
== Bcast_DID
)
5525 /* First check for Direct match */
5526 if (ndlp
->nlp_DID
== did
)
5529 /* Next check for area/domain identically equals 0 match */
5530 mydid
.un
.word
= vport
->fc_myDID
;
5531 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
5535 matchdid
.un
.word
= did
;
5536 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
5537 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
5538 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
5539 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
5540 /* This code is supposed to match the ID
5541 * for a private loop device that is
5542 * connect to fl_port. But we need to
5543 * check that the port did not just go
5544 * from pt2pt to fabric or we could end
5545 * up matching ndlp->nlp_DID 000001 to
5546 * fabric DID 0x20101
5548 if ((ndlpdid
.un
.b
.domain
== 0) &&
5549 (ndlpdid
.un
.b
.area
== 0)) {
5550 if (ndlpdid
.un
.b
.id
&&
5551 vport
->phba
->fc_topology
==
5558 matchdid
.un
.word
= ndlp
->nlp_DID
;
5559 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
5560 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
5561 if ((matchdid
.un
.b
.domain
== 0) &&
5562 (matchdid
.un
.b
.area
== 0)) {
5563 if (matchdid
.un
.b
.id
)
5571 /* Search for a nodelist entry */
5572 static struct lpfc_nodelist
*
5573 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5575 struct lpfc_nodelist
*ndlp
;
5578 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5579 if (lpfc_matchdid(vport
, ndlp
, did
)) {
5580 data1
= (((uint32_t)ndlp
->nlp_state
<< 24) |
5581 ((uint32_t)ndlp
->nlp_xri
<< 16) |
5582 ((uint32_t)ndlp
->nlp_type
<< 8)
5584 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE_VERBOSE
,
5585 "0929 FIND node DID "
5586 "Data: x%px x%x x%lx x%x x%x x%px\n",
5587 ndlp
, ndlp
->nlp_DID
,
5588 ndlp
->nlp_flag
, data1
, ndlp
->nlp_rpi
,
5589 ndlp
->active_rrqs_xri_bitmap
);
5594 /* FIND node did <did> NOT FOUND */
5595 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5596 "0932 FIND node did x%x NOT FOUND.\n", did
);
5600 struct lpfc_nodelist
*
5601 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5603 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5604 struct lpfc_nodelist
*ndlp
;
5605 unsigned long iflags
;
5607 spin_lock_irqsave(shost
->host_lock
, iflags
);
5608 ndlp
= __lpfc_findnode_did(vport
, did
);
5609 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
5613 struct lpfc_nodelist
*
5614 lpfc_findnode_mapped(struct lpfc_vport
*vport
)
5616 struct lpfc_nodelist
*ndlp
;
5618 unsigned long iflags
;
5620 spin_lock_irqsave(&vport
->fc_nodes_list_lock
, iflags
);
5622 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5623 if (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
||
5624 ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
5625 data1
= (((uint32_t)ndlp
->nlp_state
<< 24) |
5626 ((uint32_t)ndlp
->nlp_xri
<< 16) |
5627 ((uint32_t)ndlp
->nlp_type
<< 8) |
5628 ((uint32_t)ndlp
->nlp_rpi
& 0xff));
5629 spin_unlock_irqrestore(&vport
->fc_nodes_list_lock
,
5631 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE_VERBOSE
,
5632 "2025 FIND node DID MAPPED "
5633 "Data: x%px x%x x%lx x%x x%px\n",
5634 ndlp
, ndlp
->nlp_DID
,
5635 ndlp
->nlp_flag
, data1
,
5636 ndlp
->active_rrqs_xri_bitmap
);
5640 spin_unlock_irqrestore(&vport
->fc_nodes_list_lock
, iflags
);
5642 /* FIND node did <did> NOT FOUND */
5643 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5644 "2026 FIND mapped did NOT FOUND.\n");
5648 struct lpfc_nodelist
*
5649 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
5651 struct lpfc_nodelist
*ndlp
;
5653 ndlp
= lpfc_findnode_did(vport
, did
);
5655 if (vport
->phba
->nvmet_support
)
5657 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
) &&
5658 lpfc_rscn_payload_check(vport
, did
) == 0)
5660 ndlp
= lpfc_nlp_init(vport
, did
);
5663 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5665 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5666 "6453 Setup New Node 2B_DISC x%x "
5667 "Data:x%lx x%x x%lx\n",
5668 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5669 ndlp
->nlp_state
, vport
->fc_flag
);
5671 set_bit(NLP_NPR_2B_DISC
, &ndlp
->nlp_flag
);
5675 /* The NVME Target does not want to actively manage an rport.
5676 * The goal is to allow the target to reset its state and clear
5677 * pending IO in preparation for the initiator to recover.
5679 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
) &&
5680 !test_bit(FC_NDISC_ACTIVE
, &vport
->fc_flag
)) {
5681 if (lpfc_rscn_payload_check(vport
, did
)) {
5683 /* Since this node is marked for discovery,
5684 * delay timeout is not needed.
5686 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
5688 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5689 "6455 Setup RSCN Node 2B_DISC x%x "
5690 "Data:x%lx x%x x%lx\n",
5691 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5692 ndlp
->nlp_state
, vport
->fc_flag
);
5694 /* NVME Target mode waits until rport is known to be
5695 * impacted by the RSCN before it transitions. No
5696 * active management - just go to NPR provided the
5697 * node had a valid login.
5699 if (vport
->phba
->nvmet_support
)
5702 if (ndlp
->nlp_state
> NLP_STE_UNUSED_NODE
&&
5703 ndlp
->nlp_state
<= NLP_STE_PRLI_ISSUE
) {
5704 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
5705 NLP_EVT_DEVICE_RECOVERY
);
5708 set_bit(NLP_NPR_2B_DISC
, &ndlp
->nlp_flag
);
5710 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5711 "6456 Skip Setup RSCN Node x%x "
5712 "Data:x%lx x%x x%lx\n",
5713 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5714 ndlp
->nlp_state
, vport
->fc_flag
);
5718 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5719 "6457 Setup Active Node 2B_DISC x%x "
5720 "Data:x%lx x%x x%lx\n",
5721 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5722 ndlp
->nlp_state
, vport
->fc_flag
);
5724 /* If the initiator received a PLOGI from this NPort or if the
5725 * initiator is already in the process of discovery on it,
5726 * there's no need to try to discover it again.
5728 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
5729 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5730 (!vport
->phba
->nvmet_support
&&
5731 test_bit(NLP_RCV_PLOGI
, &ndlp
->nlp_flag
)))
5734 if (vport
->phba
->nvmet_support
)
5737 /* Moving to NPR state clears unsolicited flags and
5738 * allows for rediscovery
5740 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5741 set_bit(NLP_NPR_2B_DISC
, &ndlp
->nlp_flag
);
5746 /* Build a list of nodes to discover based on the loopmap */
5748 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
5750 struct lpfc_hba
*phba
= vport
->phba
;
5752 uint32_t alpa
, index
;
5754 if (!lpfc_is_link_up(phba
))
5757 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
5760 /* Check for loop map present or not */
5761 if (phba
->alpa_map
[0]) {
5762 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
5763 alpa
= phba
->alpa_map
[j
];
5764 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
5766 lpfc_setup_disc_node(vport
, alpa
);
5769 /* No alpamap, so try all alpa's */
5770 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
5771 /* If cfg_scan_down is set, start from highest
5772 * ALPA (0xef) to lowest (0x1).
5774 if (vport
->cfg_scan_down
)
5777 index
= FC_MAXLOOP
- j
- 1;
5778 alpa
= lpfcAlpaArray
[index
];
5779 if ((vport
->fc_myDID
& 0xff) == alpa
)
5781 lpfc_setup_disc_node(vport
, alpa
);
5789 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5792 struct lpfc_sli
*psli
= &phba
->sli
;
5793 struct lpfc_sli_ring
*extra_ring
= &psli
->sli3_ring
[LPFC_EXTRA_RING
];
5794 struct lpfc_sli_ring
*fcp_ring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
5798 * if it's not a physical port or if we already send
5799 * clear_la then don't send it.
5801 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
5802 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
5803 (phba
->sli_rev
== LPFC_SLI_REV4
))
5806 /* Link up discovery */
5807 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
5808 phba
->link_state
= LPFC_CLEAR_LA
;
5809 lpfc_clear_la(phba
, mbox
);
5810 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
5811 mbox
->vport
= vport
;
5812 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5813 if (rc
== MBX_NOT_FINISHED
) {
5814 mempool_free(mbox
, phba
->mbox_mem_pool
);
5815 lpfc_disc_flush_list(vport
);
5816 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5817 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5818 phba
->link_state
= LPFC_HBA_ERROR
;
5823 /* Reg_vpi to tell firmware to resume normal operations */
5825 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5827 LPFC_MBOXQ_t
*regvpimbox
;
5829 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5831 lpfc_reg_vpi(vport
, regvpimbox
);
5832 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
5833 regvpimbox
->vport
= vport
;
5834 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
5835 == MBX_NOT_FINISHED
) {
5836 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
5841 /* Start Link up / RSCN discovery on NPR nodes */
5843 lpfc_disc_start(struct lpfc_vport
*vport
)
5845 struct lpfc_hba
*phba
= vport
->phba
;
5847 uint32_t clear_la_pending
;
5849 if (!lpfc_is_link_up(phba
)) {
5850 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5851 "3315 Link is not up %x\n",
5856 if (phba
->link_state
== LPFC_CLEAR_LA
)
5857 clear_la_pending
= 1;
5859 clear_la_pending
= 0;
5861 if (vport
->port_state
< LPFC_VPORT_READY
)
5862 vport
->port_state
= LPFC_DISC_AUTH
;
5864 lpfc_set_disctmo(vport
);
5866 vport
->fc_prevDID
= vport
->fc_myDID
;
5867 vport
->num_disc_nodes
= 0;
5869 /* Start Discovery state <hba_state> */
5870 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5871 "0202 Start Discovery port state x%x "
5872 "flg x%lx Data: x%x x%x x%x\n",
5873 vport
->port_state
, vport
->fc_flag
,
5874 atomic_read(&vport
->fc_plogi_cnt
),
5875 atomic_read(&vport
->fc_adisc_cnt
),
5876 atomic_read(&vport
->fc_npr_cnt
));
5878 /* First do ADISCs - if any */
5879 num_sent
= lpfc_els_disc_adisc(vport
);
5884 /* Register the VPI for SLI3, NPIV only. */
5885 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
5886 !test_bit(FC_PT2PT
, &vport
->fc_flag
) &&
5887 !test_bit(FC_RSCN_MODE
, &vport
->fc_flag
) &&
5888 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
5889 lpfc_issue_clear_la(phba
, vport
);
5890 lpfc_issue_reg_vpi(phba
, vport
);
5895 * For SLI2, we need to set port_state to READY and continue
5898 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
5899 /* If we get here, there is nothing to ADISC */
5900 lpfc_issue_clear_la(phba
, vport
);
5902 if (!test_bit(FC_ABORT_DISCOVERY
, &vport
->fc_flag
)) {
5903 vport
->num_disc_nodes
= 0;
5904 /* go thru NPR nodes and issue ELS PLOGIs */
5905 if (atomic_read(&vport
->fc_npr_cnt
))
5906 lpfc_els_disc_plogi(vport
);
5908 if (!vport
->num_disc_nodes
) {
5909 clear_bit(FC_NDISC_ACTIVE
, &vport
->fc_flag
);
5910 lpfc_can_disctmo(vport
);
5913 vport
->port_state
= LPFC_VPORT_READY
;
5915 /* Next do PLOGIs - if any */
5916 num_sent
= lpfc_els_disc_plogi(vport
);
5921 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
)) {
5922 /* Check to see if more RSCNs came in while we
5923 * were processing this one.
5925 if (vport
->fc_rscn_id_cnt
== 0 &&
5926 !test_bit(FC_RSCN_DISCOVERY
, &vport
->fc_flag
)) {
5927 clear_bit(FC_RSCN_MODE
, &vport
->fc_flag
);
5928 lpfc_can_disctmo(vport
);
5930 lpfc_els_handle_rscn(vport
);
5938 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5939 * ring the match the sppecified nodelist.
5942 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5944 LIST_HEAD(completions
);
5945 struct lpfc_iocbq
*iocb
, *next_iocb
;
5946 struct lpfc_sli_ring
*pring
;
5949 pring
= lpfc_phba_elsring(phba
);
5950 if (unlikely(!pring
))
5953 /* Error matching iocb on txq or txcmplq
5954 * First check the txq.
5956 spin_lock_irq(&phba
->hbalock
);
5957 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5958 if (iocb
->ndlp
!= ndlp
)
5961 ulp_command
= get_job_cmnd(phba
, iocb
);
5963 if (ulp_command
== CMD_ELS_REQUEST64_CR
||
5964 ulp_command
== CMD_XMIT_ELS_RSP64_CX
) {
5966 list_move_tail(&iocb
->list
, &completions
);
5970 /* Next check the txcmplq */
5971 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
5972 if (iocb
->ndlp
!= ndlp
)
5975 ulp_command
= get_job_cmnd(phba
, iocb
);
5977 if (ulp_command
== CMD_ELS_REQUEST64_CR
||
5978 ulp_command
== CMD_XMIT_ELS_RSP64_CX
) {
5979 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
, NULL
);
5982 spin_unlock_irq(&phba
->hbalock
);
5984 /* Make sure HBA is alive */
5985 lpfc_issue_hb_tmo(phba
);
5987 /* Cancel all the IOCBs from the completions list */
5988 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5993 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
5995 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5996 struct lpfc_hba
*phba
= vport
->phba
;
5998 if (atomic_read(&vport
->fc_plogi_cnt
) ||
5999 atomic_read(&vport
->fc_adisc_cnt
)) {
6000 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
6002 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
6003 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
6004 lpfc_free_tx(phba
, ndlp
);
6011 * lpfc_notify_xport_npr - notifies xport of node disappearance
6012 * @vport: Pointer to Virtual Port object.
6014 * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
6015 * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
6016 * and transport notified that the node is gone.
6021 lpfc_notify_xport_npr(struct lpfc_vport
*vport
)
6023 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
6025 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
6027 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
6031 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
6033 lpfc_els_flush_rscn(vport
);
6034 lpfc_els_flush_cmd(vport
);
6035 lpfc_disc_flush_list(vport
);
6036 if (pci_channel_offline(vport
->phba
->pcidev
))
6037 lpfc_notify_xport_npr(vport
);
6040 /*****************************************************************************/
6042 * NAME: lpfc_disc_timeout
6044 * FUNCTION: Fibre Channel driver discovery timeout routine.
6046 * EXECUTION ENVIRONMENT: interrupt only
6054 /*****************************************************************************/
6056 lpfc_disc_timeout(struct timer_list
*t
)
6058 struct lpfc_vport
*vport
= from_timer(vport
, t
, fc_disctmo
);
6059 struct lpfc_hba
*phba
= vport
->phba
;
6060 uint32_t tmo_posted
;
6061 unsigned long flags
= 0;
6063 if (unlikely(!phba
))
6066 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
6067 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
6069 vport
->work_port_events
|= WORKER_DISC_TMO
;
6070 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
6073 lpfc_worker_wake_up(phba
);
6078 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
6080 struct lpfc_hba
*phba
= vport
->phba
;
6081 struct lpfc_sli
*psli
= &phba
->sli
;
6082 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
6083 LPFC_MBOXQ_t
*initlinkmbox
;
6084 int rc
, clrlaerr
= 0;
6086 if (!test_and_clear_bit(FC_DISC_TMO
, &vport
->fc_flag
))
6089 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
6090 "disc timeout: state:x%x rtry:x%x flg:x%x",
6091 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
6093 switch (vport
->port_state
) {
6095 case LPFC_LOCAL_CFG_LINK
:
6097 * port_state is identically LPFC_LOCAL_CFG_LINK while
6098 * waiting for FAN timeout
6100 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
6101 "0221 FAN timeout\n");
6103 /* Start discovery by sending FLOGI, clean up old rpis */
6104 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
6106 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
6108 if (ndlp
->nlp_type
& NLP_FABRIC
) {
6109 /* Clean up the ndlp on Fabric connections */
6110 lpfc_drop_node(vport
, ndlp
);
6112 } else if (!test_bit(NLP_NPR_ADISC
, &ndlp
->nlp_flag
)) {
6113 /* Fail outstanding IO now since device
6114 * is marked for PLOGI.
6116 lpfc_unreg_rpi(vport
, ndlp
);
6119 if (vport
->port_state
!= LPFC_FLOGI
) {
6120 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
6121 lpfc_initial_flogi(vport
);
6123 lpfc_issue_init_vfi(vport
);
6130 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
6131 /* Initial FLOGI timeout */
6132 lpfc_printf_vlog(vport
, KERN_ERR
,
6134 "0222 Initial %s timeout\n",
6135 vport
->vpi
? "FDISC" : "FLOGI");
6137 /* Assume no Fabric and go on with discovery.
6138 * Check for outstanding ELS FLOGI to abort.
6141 /* FLOGI failed, so just use loop map to make discovery list */
6142 lpfc_disc_list_loopmap(vport
);
6144 /* Start discovery */
6145 lpfc_disc_start(vport
);
6148 case LPFC_FABRIC_CFG_LINK
:
6149 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
6151 lpfc_printf_vlog(vport
, KERN_ERR
,
6153 "0223 Timeout while waiting for "
6154 "NameServer login\n");
6155 /* Next look for NameServer ndlp */
6156 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
6158 lpfc_els_abort(phba
, ndlp
);
6160 /* ReStart discovery */
6164 /* Check for wait for NameServer Rsp timeout */
6165 lpfc_printf_vlog(vport
, KERN_ERR
,
6167 "0224 NameServer Query timeout "
6169 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
6171 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
6172 /* Try it one more time */
6173 vport
->fc_ns_retry
++;
6174 vport
->gidft_inp
= 0;
6175 rc
= lpfc_issue_gidft(vport
);
6179 vport
->fc_ns_retry
= 0;
6183 * Discovery is over.
6184 * set port_state to PORT_READY if SLI2.
6185 * cmpl_reg_vpi will set port_state to READY for SLI3.
6187 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
6188 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
6189 lpfc_issue_reg_vpi(phba
, vport
);
6191 lpfc_issue_clear_la(phba
, vport
);
6192 vport
->port_state
= LPFC_VPORT_READY
;
6196 /* Setup and issue mailbox INITIALIZE LINK command */
6197 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6198 if (!initlinkmbox
) {
6199 lpfc_printf_vlog(vport
, KERN_ERR
,
6201 "0206 Device Discovery "
6202 "completion error\n");
6203 phba
->link_state
= LPFC_HBA_ERROR
;
6207 lpfc_linkdown(phba
);
6208 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
6209 phba
->cfg_link_speed
);
6210 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
6211 initlinkmbox
->vport
= vport
;
6212 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
6213 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
6214 lpfc_set_loopback_flag(phba
);
6215 if (rc
== MBX_NOT_FINISHED
)
6216 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
6220 case LPFC_DISC_AUTH
:
6221 /* Node Authentication timeout */
6222 lpfc_printf_vlog(vport
, KERN_ERR
,
6224 "0227 Node Authentication timeout\n");
6225 lpfc_disc_flush_list(vport
);
6228 * set port_state to PORT_READY if SLI2.
6229 * cmpl_reg_vpi will set port_state to READY for SLI3.
6231 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
6232 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
6233 lpfc_issue_reg_vpi(phba
, vport
);
6234 else { /* NPIV Not enabled */
6235 lpfc_issue_clear_la(phba
, vport
);
6236 vport
->port_state
= LPFC_VPORT_READY
;
6241 case LPFC_VPORT_READY
:
6242 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
)) {
6243 lpfc_printf_vlog(vport
, KERN_ERR
,
6245 "0231 RSCN timeout Data: x%x "
6247 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
,
6248 vport
->port_state
, vport
->gidft_inp
);
6250 /* Cleanup any outstanding ELS commands */
6251 lpfc_els_flush_cmd(vport
);
6253 lpfc_els_flush_rscn(vport
);
6254 lpfc_disc_flush_list(vport
);
6259 lpfc_printf_vlog(vport
, KERN_ERR
,
6261 "0273 Unexpected discovery timeout, "
6262 "vport State x%x\n", vport
->port_state
);
6266 switch (phba
->link_state
) {
6268 /* CLEAR LA timeout */
6269 lpfc_printf_vlog(vport
, KERN_ERR
,
6271 "0228 CLEAR LA timeout\n");
6276 lpfc_issue_clear_la(phba
, vport
);
6278 case LPFC_LINK_UNKNOWN
:
6279 case LPFC_WARM_START
:
6280 case LPFC_INIT_START
:
6281 case LPFC_INIT_MBX_CMDS
:
6282 case LPFC_LINK_DOWN
:
6283 case LPFC_HBA_ERROR
:
6284 lpfc_printf_vlog(vport
, KERN_ERR
,
6286 "0230 Unexpected timeout, hba link "
6287 "state x%x\n", phba
->link_state
);
6291 case LPFC_HBA_READY
:
6296 lpfc_disc_flush_list(vport
);
6297 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
6298 psli
->sli3_ring
[(LPFC_EXTRA_RING
)].flag
&=
6299 ~LPFC_STOP_IOCB_EVENT
;
6300 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&=
6301 ~LPFC_STOP_IOCB_EVENT
;
6303 vport
->port_state
= LPFC_VPORT_READY
;
6309 * This routine handles processing a NameServer REG_LOGIN mailbox
6310 * command upon completion. It is setup in the LPFC_MBOXQ
6311 * as the completion routine when the command is
6312 * handed off to the SLI layer.
6315 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
6317 MAILBOX_t
*mb
= &pmb
->u
.mb
;
6318 struct lpfc_nodelist
*ndlp
= pmb
->ctx_ndlp
;
6319 struct lpfc_vport
*vport
= pmb
->vport
;
6321 pmb
->ctx_ndlp
= NULL
;
6323 if (phba
->sli_rev
< LPFC_SLI_REV4
)
6324 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
6325 set_bit(NLP_RPI_REGISTERED
, &ndlp
->nlp_flag
);
6326 ndlp
->nlp_type
|= NLP_FABRIC
;
6327 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
6328 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
| LOG_DISCOVERY
,
6329 "0004 rpi:%x DID:%x flg:%lx %d x%px\n",
6330 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6331 kref_read(&ndlp
->kref
),
6334 * Start issuing Fabric-Device Management Interface (FDMI) command to
6335 * 0xfffffa (FDMI well known port).
6336 * DHBA -> DPRT -> RHBA -> RPA (physical port)
6337 * DPRT -> RPRT (vports)
6339 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
6340 phba
->link_flag
&= ~LS_CT_VEN_RPA
; /* For extra Vendor RPA */
6341 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
, 0);
6343 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DPRT
, 0);
6347 /* decrement the node reference count held for this callback
6351 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
6356 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
6358 uint16_t *rpi
= param
;
6360 return ndlp
->nlp_rpi
== *rpi
;
6364 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
6366 return memcmp(&ndlp
->nlp_portname
, param
,
6367 sizeof(ndlp
->nlp_portname
)) == 0;
6370 static struct lpfc_nodelist
*
6371 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
6373 struct lpfc_nodelist
*ndlp
;
6375 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
6376 if (filter(ndlp
, param
)) {
6377 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE_VERBOSE
,
6378 "3185 FIND node filter %ps DID "
6379 "ndlp x%px did x%x flg x%lx st x%x "
6380 "xri x%x type x%x rpi x%x\n",
6381 filter
, ndlp
, ndlp
->nlp_DID
,
6382 ndlp
->nlp_flag
, ndlp
->nlp_state
,
6383 ndlp
->nlp_xri
, ndlp
->nlp_type
,
6388 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
6389 "3186 FIND node filter %ps NOT FOUND.\n", filter
);
6394 * This routine looks up the ndlp lists for the given RPI. If rpi found it
6395 * returns the node list element pointer else return NULL.
6397 struct lpfc_nodelist
*
6398 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
6400 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
6404 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
6405 * returns the node element list pointer else return NULL.
6407 struct lpfc_nodelist
*
6408 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
6410 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6411 struct lpfc_nodelist
*ndlp
;
6413 spin_lock_irq(shost
->host_lock
);
6414 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
6415 spin_unlock_irq(shost
->host_lock
);
6420 * This routine looks up the ndlp lists for the given RPI. If the rpi
6421 * is found, the routine returns the node element list pointer else
6424 struct lpfc_nodelist
*
6425 lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
6427 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6428 struct lpfc_nodelist
*ndlp
;
6429 unsigned long flags
;
6431 spin_lock_irqsave(shost
->host_lock
, flags
);
6432 ndlp
= __lpfc_findnode_rpi(vport
, rpi
);
6433 spin_unlock_irqrestore(shost
->host_lock
, flags
);
6438 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6439 * @phba: pointer to lpfc hba data structure.
6440 * @vpi: the physical host virtual N_Port identifier.
6442 * This routine finds a vport on a HBA (referred by @phba) through a
6443 * @vpi. The function walks the HBA's vport list and returns the address
6444 * of the vport with the matching @vpi.
6447 * NULL - No vport with the matching @vpi found
6448 * Otherwise - Address to the vport with the matching @vpi.
6451 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
6453 struct lpfc_vport
*vport
;
6454 unsigned long flags
;
6457 /* The physical ports are always vpi 0 - translate is unnecessary. */
6460 * Translate the physical vpi to the logical vpi. The
6461 * vport stores the logical vpi.
6463 for (i
= 0; i
<= phba
->max_vpi
; i
++) {
6464 if (vpi
== phba
->vpi_ids
[i
])
6468 if (i
> phba
->max_vpi
) {
6469 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6470 "2936 Could not find Vport mapped "
6471 "to vpi %d\n", vpi
);
6476 spin_lock_irqsave(&phba
->port_list_lock
, flags
);
6477 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
6478 if (vport
->vpi
== i
) {
6479 spin_unlock_irqrestore(&phba
->port_list_lock
, flags
);
6483 spin_unlock_irqrestore(&phba
->port_list_lock
, flags
);
6487 struct lpfc_nodelist
*
6488 lpfc_nlp_init(struct lpfc_vport
*vport
, uint32_t did
)
6490 struct lpfc_nodelist
*ndlp
;
6491 int rpi
= LPFC_RPI_ALLOC_ERROR
;
6493 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
6494 rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
6495 if (rpi
== LPFC_RPI_ALLOC_ERROR
)
6499 ndlp
= mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
6501 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
6502 lpfc_sli4_free_rpi(vport
->phba
, rpi
);
6506 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
6508 spin_lock_init(&ndlp
->lock
);
6510 lpfc_initialize_node(vport
, ndlp
, did
);
6511 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
6512 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
6513 ndlp
->nlp_rpi
= rpi
;
6514 lpfc_printf_vlog(vport
, KERN_INFO
,
6515 LOG_ELS
| LOG_NODE
| LOG_DISCOVERY
,
6516 "0007 Init New ndlp x%px, rpi:x%x DID:x%x "
6517 "flg:x%lx refcnt:%d\n",
6518 ndlp
, ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
6519 ndlp
->nlp_flag
, kref_read(&ndlp
->kref
));
6521 ndlp
->active_rrqs_xri_bitmap
=
6522 mempool_alloc(vport
->phba
->active_rrq_pool
,
6524 if (ndlp
->active_rrqs_xri_bitmap
)
6525 memset(ndlp
->active_rrqs_xri_bitmap
, 0,
6526 ndlp
->phba
->cfg_rrq_xri_bitmap_sz
);
6531 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
6532 "node init: did:x%x",
6533 ndlp
->nlp_DID
, 0, 0);
6538 /* This routine releases all resources associated with a specifc NPort's ndlp
6539 * and mempool_free's the nodelist.
6542 lpfc_nlp_release(struct kref
*kref
)
6544 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
6546 struct lpfc_vport
*vport
= ndlp
->vport
;
6548 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6549 "node release: did:x%x flg:x%lx type:x%x",
6550 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
6552 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
6553 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
6554 __func__
, ndlp
, ndlp
->nlp_DID
,
6555 kref_read(&ndlp
->kref
), ndlp
->nlp_rpi
);
6557 /* remove ndlp from action. */
6558 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
6559 lpfc_cleanup_node(vport
, ndlp
);
6561 /* All nodes are initialized with an RPI that needs to be released
6562 * now. All references are gone and the node has been dequeued.
6564 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
6565 lpfc_sli4_free_rpi(vport
->phba
, ndlp
->nlp_rpi
);
6566 ndlp
->nlp_rpi
= LPFC_RPI_ALLOC_ERROR
;
6569 /* The node is not freed back to memory, it is released to a pool so
6570 * the node fields need to be cleaned up.
6573 ndlp
->nlp_state
= NLP_STE_FREED_NODE
;
6575 ndlp
->fc4_xpt_flags
= 0;
6577 /* free ndlp memory for final ndlp release */
6578 if (ndlp
->phba
->sli_rev
== LPFC_SLI_REV4
)
6579 mempool_free(ndlp
->active_rrqs_xri_bitmap
,
6580 ndlp
->phba
->active_rrq_pool
);
6581 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
6584 /* This routine bumps the reference count for a ndlp structure to ensure
6585 * that one discovery thread won't free a ndlp while another discovery thread
6588 struct lpfc_nodelist
*
6589 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
6591 unsigned long flags
;
6594 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6595 "node get: did:x%x flg:x%lx refcnt:x%x",
6596 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6597 kref_read(&ndlp
->kref
));
6599 /* The check of ndlp usage to prevent incrementing the
6600 * ndlp reference count that is in the process of being
6603 spin_lock_irqsave(&ndlp
->lock
, flags
);
6604 if (!kref_get_unless_zero(&ndlp
->kref
)) {
6605 spin_unlock_irqrestore(&ndlp
->lock
, flags
);
6606 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6607 "0276 %s: ndlp:x%px refcnt:%d\n",
6608 __func__
, (void *)ndlp
, kref_read(&ndlp
->kref
));
6611 spin_unlock_irqrestore(&ndlp
->lock
, flags
);
6613 WARN_ONCE(!ndlp
, "**** %s, get ref on NULL ndlp!", __func__
);
6619 /* This routine decrements the reference count for a ndlp structure. If the
6620 * count goes to 0, this indicates the associated nodelist should be freed.
6623 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
6626 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6627 "node put: did:x%x flg:x%lx refcnt:x%x",
6628 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6629 kref_read(&ndlp
->kref
));
6631 WARN_ONCE(!ndlp
, "**** %s, put ref on NULL ndlp!", __func__
);
6634 return ndlp
? kref_put(&ndlp
->kref
, lpfc_nlp_release
) : 0;
6638 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6639 * @phba: Pointer to hba context object.
6641 * This function iterate through all FC nodes associated
6642 * will all vports to check if there is any node with
6643 * fc_rports associated with it. If there is an fc_rport
6644 * associated with the node, then the node is either in
6645 * discovered state or its devloss_timer is pending.
6648 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
6650 struct lpfc_vport
**vports
;
6652 struct lpfc_nodelist
*ndlp
;
6653 unsigned long iflags
;
6655 vports
= lpfc_create_vport_work_array(phba
);
6657 /* If driver cannot allocate memory, indicate fcf is in use */
6661 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6663 * IF the CVL_RCVD bit is not set then we have sent the
6665 * If dev_loss fires while we are waiting we do not want to
6668 if (!test_bit(FC_VPORT_CVL_RCVD
, &vports
[i
]->fc_flag
)) {
6672 spin_lock_irqsave(&vports
[i
]->fc_nodes_list_lock
, iflags
);
6673 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
6675 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
6677 spin_unlock_irqrestore(&vports
[i
]->fc_nodes_list_lock
,
6680 } else if (test_bit(NLP_RPI_REGISTERED
,
6683 lpfc_printf_log(phba
, KERN_INFO
,
6684 LOG_NODE
| LOG_DISCOVERY
,
6685 "2624 RPI %x DID %x flag %lx "
6686 "still logged in\n",
6687 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
6691 spin_unlock_irqrestore(&vports
[i
]->fc_nodes_list_lock
, iflags
);
6694 lpfc_destroy_vport_work_array(phba
, vports
);
6699 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6700 * @phba: Pointer to hba context object.
6701 * @mboxq: Pointer to mailbox object.
6703 * This function frees memory associated with the mailbox command.
6706 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6708 struct lpfc_vport
*vport
= mboxq
->vport
;
6710 if (mboxq
->u
.mb
.mbxStatus
) {
6711 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6712 "2555 UNREG_VFI mbxStatus error x%x "
6714 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6716 clear_bit(FC_VFI_REGISTERED
, &phba
->pport
->fc_flag
);
6717 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6722 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6723 * @phba: Pointer to hba context object.
6724 * @mboxq: Pointer to mailbox object.
6726 * This function frees memory associated with the mailbox command.
6729 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6731 struct lpfc_vport
*vport
= mboxq
->vport
;
6733 if (mboxq
->u
.mb
.mbxStatus
) {
6734 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6735 "2550 UNREG_FCFI mbxStatus error x%x "
6737 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6739 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6744 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6745 * @phba: Pointer to hba context object.
6747 * This function prepare the HBA for unregistering the currently registered
6748 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6752 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
6754 struct lpfc_vport
**vports
;
6755 struct lpfc_nodelist
*ndlp
;
6756 struct Scsi_Host
*shost
;
6759 /* Unregister RPIs */
6760 if (lpfc_fcf_inuse(phba
))
6761 lpfc_unreg_hba_rpis(phba
);
6763 /* At this point, all discovery is aborted */
6764 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
6766 /* Unregister VPIs */
6767 vports
= lpfc_create_vport_work_array(phba
);
6768 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
6769 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6770 /* Stop FLOGI/FDISC retries */
6771 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
6773 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
6774 lpfc_cleanup_pending_mbox(vports
[i
]);
6775 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6776 lpfc_sli4_unreg_all_rpis(vports
[i
]);
6777 lpfc_mbx_unreg_vpi(vports
[i
]);
6778 shost
= lpfc_shost_from_vport(vports
[i
]);
6779 spin_lock_irq(shost
->host_lock
);
6780 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6781 spin_unlock_irq(shost
->host_lock
);
6782 set_bit(FC_VPORT_NEEDS_INIT_VPI
, &vports
[i
]->fc_flag
);
6784 lpfc_destroy_vport_work_array(phba
, vports
);
6785 if (i
== 0 && (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))) {
6786 ndlp
= lpfc_findnode_did(phba
->pport
, Fabric_DID
);
6788 lpfc_cancel_retry_delay_tmo(phba
->pport
, ndlp
);
6789 lpfc_cleanup_pending_mbox(phba
->pport
);
6790 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6791 lpfc_sli4_unreg_all_rpis(phba
->pport
);
6792 lpfc_mbx_unreg_vpi(phba
->pport
);
6793 shost
= lpfc_shost_from_vport(phba
->pport
);
6794 spin_lock_irq(shost
->host_lock
);
6795 phba
->pport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6796 spin_unlock_irq(shost
->host_lock
);
6797 set_bit(FC_VPORT_NEEDS_INIT_VPI
, &phba
->pport
->fc_flag
);
6800 /* Cleanup any outstanding ELS commands */
6801 lpfc_els_flush_all_cmd(phba
);
6803 /* Unregister the physical port VFI */
6804 rc
= lpfc_issue_unreg_vfi(phba
->pport
);
6809 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6810 * @phba: Pointer to hba context object.
6812 * This function issues synchronous unregister FCF mailbox command to HBA to
6813 * unregister the currently registered FCF record. The driver does not reset
6814 * the driver FCF usage state flags.
6816 * Return 0 if successfully issued, none-zero otherwise.
6819 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
6824 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6826 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6827 "2551 UNREG_FCFI mbox allocation failed"
6828 "HBA state x%x\n", phba
->pport
->port_state
);
6831 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
6832 mbox
->vport
= phba
->pport
;
6833 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
6834 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
6836 if (rc
== MBX_NOT_FINISHED
) {
6837 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6838 "2552 Unregister FCFI command failed rc x%x "
6840 rc
, phba
->pport
->port_state
);
6847 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6848 * @phba: Pointer to hba context object.
6850 * This function unregisters the currently reigstered FCF. This function
6851 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6854 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
6858 /* Preparation for unregistering fcf */
6859 rc
= lpfc_unregister_fcf_prep(phba
);
6861 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6862 "2748 Failed to prepare for unregistering "
6863 "HBA's FCF record: rc=%d\n", rc
);
6867 /* Now, unregister FCF record and reset HBA FCF state */
6868 rc
= lpfc_sli4_unregister_fcf(phba
);
6871 /* Reset HBA FCF states after successful unregister FCF */
6872 spin_lock_irq(&phba
->hbalock
);
6873 phba
->fcf
.fcf_flag
= 0;
6874 spin_unlock_irq(&phba
->hbalock
);
6875 phba
->fcf
.current_rec
.flag
= 0;
6878 * If driver is not unloading, check if there is any other
6879 * FCF record that can be used for discovery.
6881 if (test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
) ||
6882 phba
->link_state
< LPFC_LINK_UP
)
6885 /* This is considered as the initial FCF discovery scan */
6886 spin_lock_irq(&phba
->hbalock
);
6887 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
6888 spin_unlock_irq(&phba
->hbalock
);
6890 /* Reset FCF roundrobin bmask for new discovery */
6891 lpfc_sli4_clear_fcf_rr_bmask(phba
);
6893 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
6896 spin_lock_irq(&phba
->hbalock
);
6897 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
6898 spin_unlock_irq(&phba
->hbalock
);
6899 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6900 "2553 lpfc_unregister_unused_fcf failed "
6901 "to read FCF record HBA state x%x\n",
6902 phba
->pport
->port_state
);
6907 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6908 * @phba: Pointer to hba context object.
6910 * This function just unregisters the currently reigstered FCF. It does not
6911 * try to find another FCF for discovery.
6914 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
6918 /* Preparation for unregistering fcf */
6919 rc
= lpfc_unregister_fcf_prep(phba
);
6921 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6922 "2749 Failed to prepare for unregistering "
6923 "HBA's FCF record: rc=%d\n", rc
);
6927 /* Now, unregister FCF record and reset HBA FCF state */
6928 rc
= lpfc_sli4_unregister_fcf(phba
);
6931 /* Set proper HBA FCF states after successful unregister FCF */
6932 spin_lock_irq(&phba
->hbalock
);
6933 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
6934 spin_unlock_irq(&phba
->hbalock
);
6938 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6939 * @phba: Pointer to hba context object.
6941 * This function check if there are any connected remote port for the FCF and
6942 * if all the devices are disconnected, this function unregister FCFI.
6943 * This function also tries to use another FCF for discovery.
6946 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
6949 * If HBA is not running in FIP mode, if HBA does not support
6950 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6951 * registered, do nothing.
6953 spin_lock_irq(&phba
->hbalock
);
6954 if (!test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
) ||
6955 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
6956 !test_bit(HBA_FIP_SUPPORT
, &phba
->hba_flag
) ||
6957 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
6958 phba
->pport
->port_state
== LPFC_FLOGI
) {
6959 spin_unlock_irq(&phba
->hbalock
);
6962 spin_unlock_irq(&phba
->hbalock
);
6964 if (lpfc_fcf_inuse(phba
))
6967 lpfc_unregister_fcf_rescan(phba
);
6971 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6972 * @phba: Pointer to hba context object.
6973 * @buff: Buffer containing the FCF connection table as in the config
6975 * This function create driver data structure for the FCF connection
6976 * record table read from config region 23.
6979 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
6982 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
6983 struct lpfc_fcf_conn_hdr
*conn_hdr
;
6984 struct lpfc_fcf_conn_rec
*conn_rec
;
6985 uint32_t record_count
;
6988 /* Free the current connect table */
6989 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
6990 &phba
->fcf_conn_rec_list
, list
) {
6991 list_del_init(&conn_entry
->list
);
6995 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
6996 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
6997 sizeof(struct lpfc_fcf_conn_rec
);
6999 conn_rec
= (struct lpfc_fcf_conn_rec
*)
7000 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
7002 for (i
= 0; i
< record_count
; i
++) {
7003 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
7005 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
7008 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7009 "2566 Failed to allocate connection"
7014 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
7015 sizeof(struct lpfc_fcf_conn_rec
));
7016 list_add_tail(&conn_entry
->list
,
7017 &phba
->fcf_conn_rec_list
);
7020 if (!list_empty(&phba
->fcf_conn_rec_list
)) {
7022 list_for_each_entry(conn_entry
, &phba
->fcf_conn_rec_list
,
7024 conn_rec
= &conn_entry
->conn_rec
;
7025 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7026 "3345 FCF connection list rec[%02d]: "
7027 "flags:x%04x, vtag:x%04x, "
7028 "fabric_name:x%02x:%02x:%02x:%02x:"
7029 "%02x:%02x:%02x:%02x, "
7030 "switch_name:x%02x:%02x:%02x:%02x:"
7031 "%02x:%02x:%02x:%02x\n", i
++,
7032 conn_rec
->flags
, conn_rec
->vlan_tag
,
7033 conn_rec
->fabric_name
[0],
7034 conn_rec
->fabric_name
[1],
7035 conn_rec
->fabric_name
[2],
7036 conn_rec
->fabric_name
[3],
7037 conn_rec
->fabric_name
[4],
7038 conn_rec
->fabric_name
[5],
7039 conn_rec
->fabric_name
[6],
7040 conn_rec
->fabric_name
[7],
7041 conn_rec
->switch_name
[0],
7042 conn_rec
->switch_name
[1],
7043 conn_rec
->switch_name
[2],
7044 conn_rec
->switch_name
[3],
7045 conn_rec
->switch_name
[4],
7046 conn_rec
->switch_name
[5],
7047 conn_rec
->switch_name
[6],
7048 conn_rec
->switch_name
[7]);
7054 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
7055 * @phba: Pointer to hba context object.
7056 * @buff: Buffer containing the FCoE parameter data structure.
7058 * This function update driver data structure with config
7059 * parameters read from config region 23.
7062 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
7065 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
7066 struct lpfc_fcoe_params
*fcoe_param
;
7068 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
7070 fcoe_param
= (struct lpfc_fcoe_params
*)
7071 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
7073 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
7074 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
7077 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
7078 phba
->valid_vlan
= 1;
7079 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
7083 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
7084 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
7085 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
7090 * lpfc_get_rec_conf23 - Get a record type in config region data.
7091 * @buff: Buffer containing config region 23 data.
7092 * @size: Size of the data buffer.
7093 * @rec_type: Record type to be searched.
7095 * This function searches config region data to find the beginning
7096 * of the record specified by record_type. If record found, this
7097 * function return pointer to the record else return NULL.
7100 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
7102 uint32_t offset
= 0, rec_length
;
7104 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
7105 (size
< sizeof(uint32_t)))
7108 rec_length
= buff
[offset
+ 1];
7111 * One TLV record has one word header and number of data words
7112 * specified in the rec_length field of the record header.
7114 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
7116 if (buff
[offset
] == rec_type
)
7117 return &buff
[offset
];
7119 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
7122 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
7123 rec_length
= buff
[offset
+ 1];
7129 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
7130 * @phba: Pointer to lpfc_hba data structure.
7131 * @buff: Buffer containing config region 23 data.
7132 * @size: Size of the data buffer.
7134 * This function parses the FCoE config parameters in config region 23 and
7135 * populate driver data structure with the parameters.
7138 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
7142 uint32_t offset
= 0;
7146 * If data size is less than 2 words signature and version cannot be
7149 if (size
< 2*sizeof(uint32_t))
7152 /* Check the region signature first */
7153 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
7154 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7155 "2567 Config region 23 has bad signature\n");
7161 /* Check the data structure version */
7162 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
7163 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7164 "2568 Config region 23 has bad version\n");
7169 /* Read FCoE param record */
7170 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
7171 size
- offset
, FCOE_PARAM_TYPE
);
7173 lpfc_read_fcoe_param(phba
, rec_ptr
);
7175 /* Read FCF connection table */
7176 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
7177 size
- offset
, FCOE_CONN_TBL_TYPE
);
7179 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);
7184 * lpfc_error_lost_link - IO failure from link event or FW reset check.
7186 * @vport: Pointer to lpfc_vport data structure.
7187 * @ulp_status: IO completion status.
7188 * @ulp_word4: Reason code for the ulp_status.
7190 * This function evaluates the ulp_status and ulp_word4 values
7191 * for specific error values that indicate an internal link fault
7192 * or fw reset event for the completing IO. Callers require this
7193 * common data to decide next steps on the IO.
7196 * false - No link or reset error occurred.
7197 * true - A link or reset error occurred.
7200 lpfc_error_lost_link(struct lpfc_vport
*vport
, u32 ulp_status
, u32 ulp_word4
)
7202 /* Mask off the extra port data to get just the reason code. */
7203 u32 rsn_code
= IOERR_PARAM_MASK
& ulp_word4
;
7205 if (ulp_status
== IOSTAT_LOCAL_REJECT
&&
7206 (rsn_code
== IOERR_SLI_ABORTED
||
7207 rsn_code
== IOERR_LINK_DOWN
||
7208 rsn_code
== IOERR_SLI_DOWN
)) {
7209 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_SLI
| LOG_ELS
,
7210 "0408 Report link error true: <x%x:x%x>\n",
7211 ulp_status
, ulp_word4
);