1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2015 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/interrupt.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
37 #include "lpfc_disc.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
47 /* AlpaArray for assignment of scsid for scan-down and bind_method */
48 static uint8_t lpfcAlpaArray
[] = {
49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
64 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
65 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
67 static int lpfc_fcf_inuse(struct lpfc_hba
*);
70 lpfc_terminate_rport_io(struct fc_rport
*rport
)
72 struct lpfc_rport_data
*rdata
;
73 struct lpfc_nodelist
* ndlp
;
74 struct lpfc_hba
*phba
;
76 rdata
= rport
->dd_data
;
79 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
80 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
81 printk(KERN_ERR
"Cannot find remote node"
82 " to terminate I/O Data x%x\n",
89 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
90 "rport terminate: sid:x%x did:x%x flg:x%x",
91 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
93 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
94 lpfc_sli_abort_iocb(ndlp
->vport
,
95 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
96 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
106 struct lpfc_rport_data
*rdata
;
107 struct lpfc_nodelist
* ndlp
;
108 struct lpfc_vport
*vport
;
109 struct Scsi_Host
*shost
;
110 struct lpfc_hba
*phba
;
111 struct lpfc_work_evt
*evtp
;
115 rdata
= rport
->dd_data
;
117 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
123 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
124 "rport devlosscb: sid:x%x did:x%x flg:x%x",
125 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
127 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
128 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
129 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
131 /* Don't defer this if we are in the process of deleting the vport
132 * or unloading the driver. The unload will cleanup the node
133 * appropriately we just need to cleanup the ndlp rport info here.
135 if (vport
->load_flag
& FC_UNLOADING
) {
136 put_node
= rdata
->pnode
!= NULL
;
137 put_rport
= ndlp
->rport
!= NULL
;
143 put_device(&rport
->dev
);
147 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
150 if (rport
->port_name
!= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
))
151 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
152 "6789 rport name %llx != node port name %llx",
154 wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
));
156 evtp
= &ndlp
->dev_loss_evt
;
158 if (!list_empty(&evtp
->evt_listp
)) {
159 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
160 "6790 rport name %llx dev_loss_evt pending",
165 shost
= lpfc_shost_from_vport(vport
);
166 spin_lock_irq(shost
->host_lock
);
167 ndlp
->nlp_flag
|= NLP_IN_DEV_LOSS
;
168 spin_unlock_irq(shost
->host_lock
);
170 /* We need to hold the node by incrementing the reference
171 * count until this queued work is done
173 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
175 spin_lock_irq(&phba
->hbalock
);
176 if (evtp
->evt_arg1
) {
177 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
178 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
179 lpfc_worker_wake_up(phba
);
181 spin_unlock_irq(&phba
->hbalock
);
187 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
188 * @ndlp: Pointer to remote node object.
190 * This function is called from the worker thread when devloss timeout timer
191 * expires. For SLI4 host, this routine shall return 1 when at lease one
192 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
193 * routine shall return 0 when there is no remote node is still in use of FCF
194 * when devloss timeout happened to this @ndlp.
197 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
199 struct lpfc_rport_data
*rdata
;
200 struct fc_rport
*rport
;
201 struct lpfc_vport
*vport
;
202 struct lpfc_hba
*phba
;
203 struct Scsi_Host
*shost
;
211 shost
= lpfc_shost_from_vport(vport
);
213 spin_lock_irq(shost
->host_lock
);
214 ndlp
->nlp_flag
&= ~NLP_IN_DEV_LOSS
;
215 spin_unlock_irq(shost
->host_lock
);
220 name
= (uint8_t *) &ndlp
->nlp_portname
;
223 if (phba
->sli_rev
== LPFC_SLI_REV4
)
224 fcf_inuse
= lpfc_fcf_inuse(phba
);
226 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
227 "rport devlosstmo:did:x%x type:x%x id:x%x",
228 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
230 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
231 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
232 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
235 * lpfc_nlp_remove if reached with dangling rport drops the
236 * reference. To make sure that does not happen clear rport
237 * pointer in ndlp before lpfc_nlp_put.
239 rdata
= rport
->dd_data
;
241 /* Don't defer this if we are in the process of deleting the vport
242 * or unloading the driver. The unload will cleanup the node
243 * appropriately we just need to cleanup the ndlp rport info here.
245 if (vport
->load_flag
& FC_UNLOADING
) {
246 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
247 /* flush the target */
248 lpfc_sli_abort_iocb(vport
,
249 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
250 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
252 put_node
= rdata
->pnode
!= NULL
;
257 put_device(&rport
->dev
);
262 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
263 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
264 "0284 Devloss timeout Ignored on "
265 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
267 *name
, *(name
+1), *(name
+2), *(name
+3),
268 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
273 put_node
= rdata
->pnode
!= NULL
;
278 put_device(&rport
->dev
);
280 if (ndlp
->nlp_type
& NLP_FABRIC
)
283 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
285 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
286 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
290 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
291 "0203 Devloss timeout on "
292 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
293 "NPort x%06x Data: x%x x%x x%x\n",
294 *name
, *(name
+1), *(name
+2), *(name
+3),
295 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
296 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
297 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
299 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
300 "0204 Devloss timeout on "
301 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
302 "NPort x%06x Data: x%x x%x x%x\n",
303 *name
, *(name
+1), *(name
+2), *(name
+3),
304 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
305 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
306 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
309 if (!(vport
->load_flag
& FC_UNLOADING
) &&
310 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
311 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
312 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
313 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
314 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
315 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
321 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
322 * @phba: Pointer to hba context object.
323 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
324 * @nlp_did: remote node identifer with devloss timeout.
326 * This function is called from the worker thread after invoking devloss
327 * timeout handler and releasing the reference count for the ndlp with
328 * which the devloss timeout was handled for SLI4 host. For the devloss
329 * timeout of the last remote node which had been in use of FCF, when this
330 * routine is invoked, it shall be guaranteed that none of the remote are
331 * in-use of FCF. When devloss timeout to the last remote using the FCF,
332 * if the FIP engine is neither in FCF table scan process nor roundrobin
333 * failover process, the in-use FCF shall be unregistered. If the FIP
334 * engine is in FCF discovery process, the devloss timeout state shall
335 * be set for either the FCF table scan process or roundrobin failover
336 * process to unregister the in-use FCF.
339 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
342 /* If devloss timeout happened to a remote node when FCF had no
343 * longer been in-use, do nothing.
348 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
349 spin_lock_irq(&phba
->hbalock
);
350 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
351 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
352 spin_unlock_irq(&phba
->hbalock
);
355 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
356 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
357 "2847 Last remote node (x%x) using "
358 "FCF devloss tmo\n", nlp_did
);
360 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
361 spin_unlock_irq(&phba
->hbalock
);
362 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
363 "2868 Devloss tmo to FCF rediscovery "
367 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
368 spin_unlock_irq(&phba
->hbalock
);
369 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
370 "2869 Devloss tmo to idle FIP engine, "
371 "unreg in-use FCF and rescan.\n");
372 /* Unregister in-use FCF and rescan */
373 lpfc_unregister_fcf_rescan(phba
);
376 spin_unlock_irq(&phba
->hbalock
);
377 if (phba
->hba_flag
& FCF_TS_INPROG
)
378 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
379 "2870 FCF table scan in progress\n");
380 if (phba
->hba_flag
& FCF_RR_INPROG
)
381 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
382 "2871 FLOGI roundrobin FCF failover "
385 lpfc_unregister_unused_fcf(phba
);
389 * lpfc_alloc_fast_evt - Allocates data structure for posting event
390 * @phba: Pointer to hba context object.
392 * This function is called from the functions which need to post
393 * events from interrupt context. This function allocates data
394 * structure required for posting event. It also keeps track of
395 * number of events pending and prevent event storm when there are
398 struct lpfc_fast_path_event
*
399 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
400 struct lpfc_fast_path_event
*ret
;
402 /* If there are lot of fast event do not exhaust memory due to this */
403 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
406 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
409 atomic_inc(&phba
->fast_event_count
);
410 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
411 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
417 * lpfc_free_fast_evt - Frees event data structure
418 * @phba: Pointer to hba context object.
419 * @evt: Event object which need to be freed.
421 * This function frees the data structure required for posting
425 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
426 struct lpfc_fast_path_event
*evt
) {
428 atomic_dec(&phba
->fast_event_count
);
433 * lpfc_send_fastpath_evt - Posts events generated from fast path
434 * @phba: Pointer to hba context object.
435 * @evtp: Event data structure.
437 * This function is called from worker thread, when the interrupt
438 * context need to post an event. This function posts the event
439 * to fc transport netlink interface.
442 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
443 struct lpfc_work_evt
*evtp
)
445 unsigned long evt_category
, evt_sub_category
;
446 struct lpfc_fast_path_event
*fast_evt_data
;
448 uint32_t evt_data_size
;
449 struct Scsi_Host
*shost
;
451 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
454 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
455 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
456 fabric_evt
.subcategory
;
457 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
458 if (evt_category
== FC_REG_FABRIC_EVENT
) {
459 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
460 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
461 evt_data_size
= sizeof(fast_evt_data
->un
.
463 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
464 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
465 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
466 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
468 lpfc_free_fast_evt(phba
, fast_evt_data
);
471 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
472 switch (evt_sub_category
) {
473 case LPFC_EVENT_QFULL
:
474 case LPFC_EVENT_DEVBSY
:
475 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
476 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
478 case LPFC_EVENT_CHECK_COND
:
479 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
480 evt_data_size
= sizeof(fast_evt_data
->un
.
483 case LPFC_EVENT_VARQUEDEPTH
:
484 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
485 evt_data_size
= sizeof(fast_evt_data
->un
.
489 lpfc_free_fast_evt(phba
, fast_evt_data
);
493 lpfc_free_fast_evt(phba
, fast_evt_data
);
497 fc_host_post_vendor_event(shost
,
498 fc_get_event_number(),
503 lpfc_free_fast_evt(phba
, fast_evt_data
);
508 lpfc_work_list_done(struct lpfc_hba
*phba
)
510 struct lpfc_work_evt
*evtp
= NULL
;
511 struct lpfc_nodelist
*ndlp
;
516 spin_lock_irq(&phba
->hbalock
);
517 while (!list_empty(&phba
->work_list
)) {
518 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
520 spin_unlock_irq(&phba
->hbalock
);
523 case LPFC_EVT_ELS_RETRY
:
524 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
525 lpfc_els_retry_delay_handler(ndlp
);
526 free_evt
= 0; /* evt is part of ndlp */
527 /* decrement the node reference count held
528 * for this queued work
532 case LPFC_EVT_DEV_LOSS
:
533 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
534 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
536 /* decrement the node reference count held for
539 nlp_did
= ndlp
->nlp_DID
;
541 if (phba
->sli_rev
== LPFC_SLI_REV4
)
542 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
546 case LPFC_EVT_ONLINE
:
547 if (phba
->link_state
< LPFC_LINK_DOWN
)
548 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
550 *(int *) (evtp
->evt_arg1
) = 0;
551 complete((struct completion
*)(evtp
->evt_arg2
));
553 case LPFC_EVT_OFFLINE_PREP
:
554 if (phba
->link_state
>= LPFC_LINK_DOWN
)
555 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
556 *(int *)(evtp
->evt_arg1
) = 0;
557 complete((struct completion
*)(evtp
->evt_arg2
));
559 case LPFC_EVT_OFFLINE
:
561 lpfc_sli_brdrestart(phba
);
562 *(int *)(evtp
->evt_arg1
) =
563 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
564 lpfc_unblock_mgmt_io(phba
);
565 complete((struct completion
*)(evtp
->evt_arg2
));
567 case LPFC_EVT_WARM_START
:
569 lpfc_reset_barrier(phba
);
570 lpfc_sli_brdreset(phba
);
571 lpfc_hba_down_post(phba
);
572 *(int *)(evtp
->evt_arg1
) =
573 lpfc_sli_brdready(phba
, HS_MBRDY
);
574 lpfc_unblock_mgmt_io(phba
);
575 complete((struct completion
*)(evtp
->evt_arg2
));
579 *(int *)(evtp
->evt_arg1
)
580 = (phba
->pport
->stopped
)
581 ? 0 : lpfc_sli_brdkill(phba
);
582 lpfc_unblock_mgmt_io(phba
);
583 complete((struct completion
*)(evtp
->evt_arg2
));
585 case LPFC_EVT_FASTPATH_MGMT_EVT
:
586 lpfc_send_fastpath_evt(phba
, evtp
);
589 case LPFC_EVT_RESET_HBA
:
590 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
591 lpfc_reset_hba(phba
);
596 spin_lock_irq(&phba
->hbalock
);
598 spin_unlock_irq(&phba
->hbalock
);
603 lpfc_work_done(struct lpfc_hba
*phba
)
605 struct lpfc_sli_ring
*pring
;
606 uint32_t ha_copy
, status
, control
, work_port_events
;
607 struct lpfc_vport
**vports
;
608 struct lpfc_vport
*vport
;
611 spin_lock_irq(&phba
->hbalock
);
612 ha_copy
= phba
->work_ha
;
614 spin_unlock_irq(&phba
->hbalock
);
616 /* First, try to post the next mailbox command to SLI4 device */
617 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
618 lpfc_sli4_post_async_mbox(phba
);
620 if (ha_copy
& HA_ERATT
)
621 /* Handle the error attention event */
622 lpfc_handle_eratt(phba
);
624 if (ha_copy
& HA_MBATT
)
625 lpfc_sli_handle_mb_event(phba
);
627 if (ha_copy
& HA_LATT
)
628 lpfc_handle_latt(phba
);
630 /* Process SLI4 events */
631 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
632 if (phba
->hba_flag
& HBA_RRQ_ACTIVE
)
633 lpfc_handle_rrq_active(phba
);
634 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
635 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
636 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
637 lpfc_sli4_els_xri_abort_event_proc(phba
);
638 if (phba
->hba_flag
& ASYNC_EVENT
)
639 lpfc_sli4_async_event_proc(phba
);
640 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
641 spin_lock_irq(&phba
->hbalock
);
642 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
643 spin_unlock_irq(&phba
->hbalock
);
644 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
646 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
647 lpfc_sli4_fcf_redisc_event_proc(phba
);
650 vports
= lpfc_create_vport_work_array(phba
);
652 for (i
= 0; i
<= phba
->max_vports
; i
++) {
654 * We could have no vports in array if unloading, so if
655 * this happens then just use the pport
657 if (vports
[i
] == NULL
&& i
== 0)
663 spin_lock_irq(&vport
->work_port_lock
);
664 work_port_events
= vport
->work_port_events
;
665 vport
->work_port_events
&= ~work_port_events
;
666 spin_unlock_irq(&vport
->work_port_lock
);
667 if (work_port_events
& WORKER_DISC_TMO
)
668 lpfc_disc_timeout_handler(vport
);
669 if (work_port_events
& WORKER_ELS_TMO
)
670 lpfc_els_timeout_handler(vport
);
671 if (work_port_events
& WORKER_HB_TMO
)
672 lpfc_hb_timeout_handler(phba
);
673 if (work_port_events
& WORKER_MBOX_TMO
)
674 lpfc_mbox_timeout_handler(phba
);
675 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
676 lpfc_unblock_fabric_iocbs(phba
);
677 if (work_port_events
& WORKER_FDMI_TMO
)
678 lpfc_fdmi_timeout_handler(vport
);
679 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
680 lpfc_ramp_down_queue_handler(phba
);
681 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
682 lpfc_delayed_disc_timeout_handler(vport
);
684 lpfc_destroy_vport_work_array(phba
, vports
);
686 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
687 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
688 status
>>= (4*LPFC_ELS_RING
);
689 if ((status
& HA_RXMASK
) ||
690 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
691 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
692 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
693 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
694 /* Set the lpfc data pending flag */
695 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
697 if (phba
->link_state
>= LPFC_LINK_UP
) {
698 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
699 lpfc_sli_handle_slow_ring_event(phba
, pring
,
704 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
705 (!list_empty(&pring
->txq
)))
706 lpfc_drain_txq(phba
);
708 * Turn on Ring interrupts
710 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
711 spin_lock_irq(&phba
->hbalock
);
712 control
= readl(phba
->HCregaddr
);
713 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
714 lpfc_debugfs_slow_ring_trc(phba
,
715 "WRK Enable ring: cntl:x%x hacopy:x%x",
716 control
, ha_copy
, 0);
718 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
719 writel(control
, phba
->HCregaddr
);
720 readl(phba
->HCregaddr
); /* flush */
722 lpfc_debugfs_slow_ring_trc(phba
,
723 "WRK Ring ok: cntl:x%x hacopy:x%x",
724 control
, ha_copy
, 0);
726 spin_unlock_irq(&phba
->hbalock
);
729 lpfc_work_list_done(phba
);
733 lpfc_do_work(void *p
)
735 struct lpfc_hba
*phba
= p
;
738 set_user_nice(current
, MIN_NICE
);
739 current
->flags
|= PF_NOFREEZE
;
740 phba
->data_flags
= 0;
742 while (!kthread_should_stop()) {
743 /* wait and check worker queue activities */
744 rc
= wait_event_interruptible(phba
->work_waitq
,
745 (test_and_clear_bit(LPFC_DATA_READY
,
747 || kthread_should_stop()));
748 /* Signal wakeup shall terminate the worker thread */
750 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
751 "0433 Wakeup on signal: rc=x%x\n", rc
);
755 /* Attend pending lpfc data processing */
756 lpfc_work_done(phba
);
758 phba
->worker_thread
= NULL
;
759 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
760 "0432 Worker thread stopped.\n");
765 * This is only called to handle FC worker events. Since this a rare
766 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
767 * embedding it in the IOCB.
770 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
773 struct lpfc_work_evt
*evtp
;
777 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
778 * be queued to worker thread for processing
780 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
784 evtp
->evt_arg1
= arg1
;
785 evtp
->evt_arg2
= arg2
;
788 spin_lock_irqsave(&phba
->hbalock
, flags
);
789 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
790 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
792 lpfc_worker_wake_up(phba
);
798 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
800 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
801 struct lpfc_hba
*phba
= vport
->phba
;
802 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
804 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
805 if (!NLP_CHK_NODE_ACT(ndlp
))
807 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
809 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
810 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
811 (ndlp
->nlp_DID
== NameServer_DID
)))
812 lpfc_unreg_rpi(vport
, ndlp
);
814 /* Leave Fabric nodes alone on link down */
815 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
816 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
818 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
821 : NLP_EVT_DEVICE_RECOVERY
);
823 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
824 if (phba
->sli_rev
== LPFC_SLI_REV4
)
825 lpfc_sli4_unreg_all_rpis(vport
);
826 lpfc_mbx_unreg_vpi(vport
);
827 spin_lock_irq(shost
->host_lock
);
828 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
829 spin_unlock_irq(shost
->host_lock
);
834 lpfc_port_link_failure(struct lpfc_vport
*vport
)
836 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
838 /* Cleanup any outstanding received buffers */
839 lpfc_cleanup_rcv_buffers(vport
);
841 /* Cleanup any outstanding RSCN activity */
842 lpfc_els_flush_rscn(vport
);
844 /* Cleanup any outstanding ELS commands */
845 lpfc_els_flush_cmd(vport
);
847 lpfc_cleanup_rpis(vport
, 0);
849 /* Turn off discovery timer if its running */
850 lpfc_can_disctmo(vport
);
854 lpfc_linkdown_port(struct lpfc_vport
*vport
)
856 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
858 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
860 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
861 "Link Down: state:x%x rtry:x%x flg:x%x",
862 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
864 lpfc_port_link_failure(vport
);
866 /* Stop delayed Nport discovery */
867 spin_lock_irq(shost
->host_lock
);
868 vport
->fc_flag
&= ~FC_DISC_DELAYED
;
869 spin_unlock_irq(shost
->host_lock
);
870 del_timer_sync(&vport
->delayed_disc_tmo
);
874 lpfc_linkdown(struct lpfc_hba
*phba
)
876 struct lpfc_vport
*vport
= phba
->pport
;
877 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
878 struct lpfc_vport
**vports
;
882 if (phba
->link_state
== LPFC_LINK_DOWN
)
885 /* Block all SCSI stack I/Os */
886 lpfc_scsi_dev_block(phba
);
888 spin_lock_irq(&phba
->hbalock
);
889 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
890 spin_unlock_irq(&phba
->hbalock
);
891 if (phba
->link_state
> LPFC_LINK_DOWN
) {
892 phba
->link_state
= LPFC_LINK_DOWN
;
893 spin_lock_irq(shost
->host_lock
);
894 phba
->pport
->fc_flag
&= ~FC_LBIT
;
895 spin_unlock_irq(shost
->host_lock
);
897 vports
= lpfc_create_vport_work_array(phba
);
899 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
900 /* Issue a LINK DOWN event to all nodes */
901 lpfc_linkdown_port(vports
[i
]);
903 lpfc_destroy_vport_work_array(phba
, vports
);
904 /* Clean up any firmware default rpi's */
905 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
907 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
909 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
910 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
911 == MBX_NOT_FINISHED
) {
912 mempool_free(mb
, phba
->mbox_mem_pool
);
916 /* Setup myDID for link up if we are in pt2pt mode */
917 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
918 phba
->pport
->fc_myDID
= 0;
919 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
921 lpfc_config_link(phba
, mb
);
922 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
924 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
925 == MBX_NOT_FINISHED
) {
926 mempool_free(mb
, phba
->mbox_mem_pool
);
929 spin_lock_irq(shost
->host_lock
);
930 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
931 spin_unlock_irq(shost
->host_lock
);
938 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
940 struct lpfc_nodelist
*ndlp
;
942 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
943 if (!NLP_CHK_NODE_ACT(ndlp
))
945 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
947 if (ndlp
->nlp_type
& NLP_FABRIC
) {
948 /* On Linkup its safe to clean up the ndlp
949 * from Fabric connections.
951 if (ndlp
->nlp_DID
!= Fabric_DID
)
952 lpfc_unreg_rpi(vport
, ndlp
);
953 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
954 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
955 /* Fail outstanding IO now since device is
958 lpfc_unreg_rpi(vport
, ndlp
);
964 lpfc_linkup_port(struct lpfc_vport
*vport
)
966 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
967 struct lpfc_hba
*phba
= vport
->phba
;
969 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
972 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
973 "Link Up: top:x%x speed:x%x flg:x%x",
974 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
976 /* If NPIV is not enabled, only bring the physical port up */
977 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
978 (vport
!= phba
->pport
))
981 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
983 spin_lock_irq(shost
->host_lock
);
984 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
985 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
986 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
987 vport
->fc_ns_retry
= 0;
988 spin_unlock_irq(shost
->host_lock
);
990 if (vport
->fc_flag
& FC_LBIT
)
991 lpfc_linkup_cleanup_nodes(vport
);
996 lpfc_linkup(struct lpfc_hba
*phba
)
998 struct lpfc_vport
**vports
;
1001 phba
->link_state
= LPFC_LINK_UP
;
1003 /* Unblock fabric iocbs if they are blocked */
1004 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
1005 del_timer_sync(&phba
->fabric_block_timer
);
1007 vports
= lpfc_create_vport_work_array(phba
);
1009 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1010 lpfc_linkup_port(vports
[i
]);
1011 lpfc_destroy_vport_work_array(phba
, vports
);
1017 * This routine handles processing a CLEAR_LA mailbox
1018 * command upon completion. It is setup in the LPFC_MBOXQ
1019 * as the completion routine when the command is
1020 * handed off to the SLI layer.
1023 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1025 struct lpfc_vport
*vport
= pmb
->vport
;
1026 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1027 struct lpfc_sli
*psli
= &phba
->sli
;
1028 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1031 /* Since we don't do discovery right now, turn these off here */
1032 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1033 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1034 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1036 /* Check for error */
1037 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1038 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1039 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1040 "0320 CLEAR_LA mbxStatus error x%x hba "
1042 mb
->mbxStatus
, vport
->port_state
);
1043 phba
->link_state
= LPFC_HBA_ERROR
;
1047 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1048 phba
->link_state
= LPFC_HBA_READY
;
1050 spin_lock_irq(&phba
->hbalock
);
1051 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1052 control
= readl(phba
->HCregaddr
);
1053 control
|= HC_LAINT_ENA
;
1054 writel(control
, phba
->HCregaddr
);
1055 readl(phba
->HCregaddr
); /* flush */
1056 spin_unlock_irq(&phba
->hbalock
);
1057 mempool_free(pmb
, phba
->mbox_mem_pool
);
1061 /* Device Discovery completes */
1062 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1063 "0225 Device Discovery completes\n");
1064 mempool_free(pmb
, phba
->mbox_mem_pool
);
1066 spin_lock_irq(shost
->host_lock
);
1067 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1068 spin_unlock_irq(shost
->host_lock
);
1070 lpfc_can_disctmo(vport
);
1072 /* turn on Link Attention interrupts */
1074 spin_lock_irq(&phba
->hbalock
);
1075 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1076 control
= readl(phba
->HCregaddr
);
1077 control
|= HC_LAINT_ENA
;
1078 writel(control
, phba
->HCregaddr
);
1079 readl(phba
->HCregaddr
); /* flush */
1080 spin_unlock_irq(&phba
->hbalock
);
1087 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1089 struct lpfc_vport
*vport
= pmb
->vport
;
1091 if (pmb
->u
.mb
.mbxStatus
)
1094 mempool_free(pmb
, phba
->mbox_mem_pool
);
1096 /* don't perform discovery for SLI4 loopback diagnostic test */
1097 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1098 !(phba
->hba_flag
& HBA_FCOE_MODE
) &&
1099 (phba
->link_flag
& LS_LOOPBACK_MODE
))
1102 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1103 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1104 !(vport
->fc_flag
& FC_LBIT
)) {
1105 /* Need to wait for FAN - use discovery timer
1106 * for timeout. port_state is identically
1107 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1109 lpfc_set_disctmo(vport
);
1113 /* Start discovery by sending a FLOGI. port_state is identically
1114 * LPFC_FLOGI while waiting for FLOGI cmpl
1116 if (vport
->port_state
!= LPFC_FLOGI
|| vport
->fc_flag
& FC_PT2PT_PLOGI
)
1117 lpfc_initial_flogi(vport
);
1121 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1122 "0306 CONFIG_LINK mbxStatus error x%x "
1124 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1125 mempool_free(pmb
, phba
->mbox_mem_pool
);
1127 lpfc_linkdown(phba
);
1129 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1130 "0200 CONFIG_LINK bad hba state x%x\n",
1133 lpfc_issue_clear_la(phba
, vport
);
1138 * lpfc_sli4_clear_fcf_rr_bmask
1139 * @phba pointer to the struct lpfc_hba for this port.
1140 * This fucnction resets the round robin bit mask and clears the
1141 * fcf priority list. The list deletions are done while holding the
1142 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1143 * from the lpfc_fcf_pri record.
1146 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1148 struct lpfc_fcf_pri
*fcf_pri
;
1149 struct lpfc_fcf_pri
*next_fcf_pri
;
1150 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1151 spin_lock_irq(&phba
->hbalock
);
1152 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1153 &phba
->fcf
.fcf_pri_list
, list
) {
1154 list_del_init(&fcf_pri
->list
);
1155 fcf_pri
->fcf_rec
.flag
= 0;
1157 spin_unlock_irq(&phba
->hbalock
);
1160 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1162 struct lpfc_vport
*vport
= mboxq
->vport
;
1164 if (mboxq
->u
.mb
.mbxStatus
) {
1165 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1166 "2017 REG_FCFI mbxStatus error x%x "
1168 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1172 /* Start FCoE discovery by sending a FLOGI. */
1173 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1174 /* Set the FCFI registered flag */
1175 spin_lock_irq(&phba
->hbalock
);
1176 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1177 spin_unlock_irq(&phba
->hbalock
);
1179 /* If there is a pending FCoE event, restart FCF table scan. */
1180 if ((!(phba
->hba_flag
& FCF_RR_INPROG
)) &&
1181 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1184 /* Mark successful completion of FCF table scan */
1185 spin_lock_irq(&phba
->hbalock
);
1186 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1187 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1188 if (vport
->port_state
!= LPFC_FLOGI
) {
1189 phba
->hba_flag
|= FCF_RR_INPROG
;
1190 spin_unlock_irq(&phba
->hbalock
);
1191 lpfc_issue_init_vfi(vport
);
1194 spin_unlock_irq(&phba
->hbalock
);
1198 spin_lock_irq(&phba
->hbalock
);
1199 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1200 spin_unlock_irq(&phba
->hbalock
);
1202 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1206 * lpfc_fab_name_match - Check if the fcf fabric name match.
1207 * @fab_name: pointer to fabric name.
1208 * @new_fcf_record: pointer to fcf record.
1210 * This routine compare the fcf record's fabric name with provided
1211 * fabric name. If the fabric name are identical this function
1212 * returns 1 else return 0.
1215 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1217 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1219 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1221 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1223 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1225 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1227 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1229 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1231 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1237 * lpfc_sw_name_match - Check if the fcf switch name match.
1238 * @fab_name: pointer to fabric name.
1239 * @new_fcf_record: pointer to fcf record.
1241 * This routine compare the fcf record's switch name with provided
1242 * switch name. If the switch name are identical this function
1243 * returns 1 else return 0.
1246 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1248 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1250 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1252 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1254 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1256 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1258 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1260 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1262 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1268 * lpfc_mac_addr_match - Check if the fcf mac address match.
1269 * @mac_addr: pointer to mac address.
1270 * @new_fcf_record: pointer to fcf record.
1272 * This routine compare the fcf record's mac address with HBA's
1273 * FCF mac address. If the mac addresses are identical this function
1274 * returns 1 else return 0.
1277 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1279 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1281 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1283 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1285 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1287 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1289 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1295 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1297 return (curr_vlan_id
== new_vlan_id
);
1301 * lpfc_update_fcf_record - Update driver fcf record
1302 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1303 * @phba: pointer to lpfc hba data structure.
1304 * @fcf_index: Index for the lpfc_fcf_record.
1305 * @new_fcf_record: pointer to hba fcf record.
1307 * This routine updates the driver FCF priority record from the new HBA FCF
1308 * record. This routine is called with the host lock held.
1311 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1312 struct fcf_record
*new_fcf_record
1315 struct lpfc_fcf_pri
*fcf_pri
;
1317 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1318 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1319 /* FCF record priority */
1320 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1325 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1326 * @fcf: pointer to driver fcf record.
1327 * @new_fcf_record: pointer to fcf record.
1329 * This routine copies the FCF information from the FCF
1330 * record to lpfc_hba data structure.
1333 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1334 struct fcf_record
*new_fcf_record
)
1337 fcf_rec
->fabric_name
[0] =
1338 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1339 fcf_rec
->fabric_name
[1] =
1340 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1341 fcf_rec
->fabric_name
[2] =
1342 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1343 fcf_rec
->fabric_name
[3] =
1344 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1345 fcf_rec
->fabric_name
[4] =
1346 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1347 fcf_rec
->fabric_name
[5] =
1348 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1349 fcf_rec
->fabric_name
[6] =
1350 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1351 fcf_rec
->fabric_name
[7] =
1352 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1354 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1355 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1356 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1357 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1358 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1359 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1360 /* FCF record index */
1361 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1362 /* FCF record priority */
1363 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1365 fcf_rec
->switch_name
[0] =
1366 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1367 fcf_rec
->switch_name
[1] =
1368 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1369 fcf_rec
->switch_name
[2] =
1370 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1371 fcf_rec
->switch_name
[3] =
1372 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1373 fcf_rec
->switch_name
[4] =
1374 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1375 fcf_rec
->switch_name
[5] =
1376 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1377 fcf_rec
->switch_name
[6] =
1378 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1379 fcf_rec
->switch_name
[7] =
1380 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1384 * lpfc_update_fcf_record - Update driver fcf record
1385 * @phba: pointer to lpfc hba data structure.
1386 * @fcf_rec: pointer to driver fcf record.
1387 * @new_fcf_record: pointer to hba fcf record.
1388 * @addr_mode: address mode to be set to the driver fcf record.
1389 * @vlan_id: vlan tag to be set to the driver fcf record.
1390 * @flag: flag bits to be set to the driver fcf record.
1392 * This routine updates the driver FCF record from the new HBA FCF record
1393 * together with the address mode, vlan_id, and other informations. This
1394 * routine is called with the host lock held.
1397 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1398 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1399 uint16_t vlan_id
, uint32_t flag
)
1401 /* Copy the fields from the HBA's FCF record */
1402 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1403 /* Update other fields of driver FCF record */
1404 fcf_rec
->addr_mode
= addr_mode
;
1405 fcf_rec
->vlan_id
= vlan_id
;
1406 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1407 __lpfc_update_fcf_record_pri(phba
,
1408 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1413 * lpfc_register_fcf - Register the FCF with hba.
1414 * @phba: pointer to lpfc hba data structure.
1416 * This routine issues a register fcfi mailbox command to register
1420 lpfc_register_fcf(struct lpfc_hba
*phba
)
1422 LPFC_MBOXQ_t
*fcf_mbxq
;
1425 spin_lock_irq(&phba
->hbalock
);
1426 /* If the FCF is not available do nothing. */
1427 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1428 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1429 spin_unlock_irq(&phba
->hbalock
);
1433 /* The FCF is already registered, start discovery */
1434 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1435 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1436 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1437 if (phba
->pport
->port_state
!= LPFC_FLOGI
&&
1438 phba
->pport
->fc_flag
& FC_FABRIC
) {
1439 phba
->hba_flag
|= FCF_RR_INPROG
;
1440 spin_unlock_irq(&phba
->hbalock
);
1441 lpfc_initial_flogi(phba
->pport
);
1444 spin_unlock_irq(&phba
->hbalock
);
1447 spin_unlock_irq(&phba
->hbalock
);
1449 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1451 spin_lock_irq(&phba
->hbalock
);
1452 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1453 spin_unlock_irq(&phba
->hbalock
);
1457 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1458 fcf_mbxq
->vport
= phba
->pport
;
1459 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1460 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1461 if (rc
== MBX_NOT_FINISHED
) {
1462 spin_lock_irq(&phba
->hbalock
);
1463 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1464 spin_unlock_irq(&phba
->hbalock
);
1465 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1472 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1473 * @phba: pointer to lpfc hba data structure.
1474 * @new_fcf_record: pointer to fcf record.
1475 * @boot_flag: Indicates if this record used by boot bios.
1476 * @addr_mode: The address mode to be used by this FCF
1477 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1479 * This routine compare the fcf record with connect list obtained from the
1480 * config region to decide if this FCF can be used for SAN discovery. It returns
1481 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1482 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1483 * is used by boot bios and addr_mode will indicate the addressing mode to be
1484 * used for this FCF when the function returns.
1485 * If the FCF record need to be used with a particular vlan id, the vlan is
1486 * set in the vlan_id on return of the function. If not VLAN tagging need to
1487 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1490 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1491 struct fcf_record
*new_fcf_record
,
1492 uint32_t *boot_flag
, uint32_t *addr_mode
,
1495 struct lpfc_fcf_conn_entry
*conn_entry
;
1496 int i
, j
, fcf_vlan_id
= 0;
1498 /* Find the lowest VLAN id in the FCF record */
1499 for (i
= 0; i
< 512; i
++) {
1500 if (new_fcf_record
->vlan_bitmap
[i
]) {
1501 fcf_vlan_id
= i
* 8;
1503 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1511 /* FCF not valid/available or solicitation in progress */
1512 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1513 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
) ||
1514 bf_get(lpfc_fcf_record_fcf_sol
, new_fcf_record
))
1517 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1519 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1521 if (phba
->valid_vlan
)
1522 *vlan_id
= phba
->vlan_id
;
1524 *vlan_id
= LPFC_FCOE_NULL_VID
;
1529 * If there are no FCF connection table entry, driver connect to all
1532 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1534 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1538 * When there are no FCF connect entries, use driver's default
1539 * addressing mode - FPMA.
1541 if (*addr_mode
& LPFC_FCF_FPMA
)
1542 *addr_mode
= LPFC_FCF_FPMA
;
1544 /* If FCF record report a vlan id use that vlan id */
1546 *vlan_id
= fcf_vlan_id
;
1548 *vlan_id
= LPFC_FCOE_NULL_VID
;
1552 list_for_each_entry(conn_entry
,
1553 &phba
->fcf_conn_rec_list
, list
) {
1554 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1557 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1558 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1561 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1562 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1565 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1567 * If the vlan bit map does not have the bit set for the
1568 * vlan id to be used, then it is not a match.
1570 if (!(new_fcf_record
->vlan_bitmap
1571 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1572 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1577 * If connection record does not support any addressing mode,
1578 * skip the FCF record.
1580 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1581 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1585 * Check if the connection record specifies a required
1588 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1589 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1592 * If SPMA required but FCF not support this continue.
1594 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1595 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1596 new_fcf_record
) & LPFC_FCF_SPMA
))
1600 * If FPMA required but FCF not support this continue.
1602 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1603 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1604 new_fcf_record
) & LPFC_FCF_FPMA
))
1609 * This fcf record matches filtering criteria.
1611 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1617 * If user did not specify any addressing mode, or if the
1618 * preferred addressing mode specified by user is not supported
1619 * by FCF, allow fabric to pick the addressing mode.
1621 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1624 * If the user specified a required address mode, assign that
1627 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1628 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1629 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1631 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1633 * If the user specified a preferred address mode, use the
1634 * addr mode only if FCF support the addr_mode.
1636 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1637 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1638 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1639 (*addr_mode
& LPFC_FCF_SPMA
))
1640 *addr_mode
= LPFC_FCF_SPMA
;
1641 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1642 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1643 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1644 (*addr_mode
& LPFC_FCF_FPMA
))
1645 *addr_mode
= LPFC_FCF_FPMA
;
1647 /* If matching connect list has a vlan id, use it */
1648 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1649 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1651 * If no vlan id is specified in connect list, use the vlan id
1654 else if (fcf_vlan_id
)
1655 *vlan_id
= fcf_vlan_id
;
1657 *vlan_id
= LPFC_FCOE_NULL_VID
;
1666 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1667 * @phba: pointer to lpfc hba data structure.
1668 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1670 * This function check if there is any fcoe event pending while driver
1671 * scan FCF entries. If there is any pending event, it will restart the
1672 * FCF saning and return 1 else return 0.
1675 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1678 * If the Link is up and no FCoE events while in the
1679 * FCF discovery, no need to restart FCF discovery.
1681 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1682 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1685 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1686 "2768 Pending link or FCF event during current "
1687 "handling of the previous event: link_state:x%x, "
1688 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1689 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1690 phba
->fcoe_eventtag
);
1692 spin_lock_irq(&phba
->hbalock
);
1693 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1694 spin_unlock_irq(&phba
->hbalock
);
1696 if (phba
->link_state
>= LPFC_LINK_UP
) {
1697 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1698 "2780 Restart FCF table scan due to "
1699 "pending FCF event:evt_tag_at_scan:x%x, "
1700 "evt_tag_current:x%x\n",
1701 phba
->fcoe_eventtag_at_fcf_scan
,
1702 phba
->fcoe_eventtag
);
1703 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1706 * Do not continue FCF discovery and clear FCF_TS_INPROG
1709 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1710 "2833 Stop FCF discovery process due to link "
1711 "state change (x%x)\n", phba
->link_state
);
1712 spin_lock_irq(&phba
->hbalock
);
1713 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1714 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1715 spin_unlock_irq(&phba
->hbalock
);
1718 /* Unregister the currently registered FCF if required */
1720 spin_lock_irq(&phba
->hbalock
);
1721 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1722 spin_unlock_irq(&phba
->hbalock
);
1723 lpfc_sli4_unregister_fcf(phba
);
1729 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1730 * @phba: pointer to lpfc hba data structure.
1731 * @fcf_cnt: number of eligible fcf record seen so far.
1733 * This function makes an running random selection decision on FCF record to
1734 * use through a sequence of @fcf_cnt eligible FCF records with equal
1735 * probability. To perform integer manunipulation of random numbers with
1736 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1737 * from prandom_u32() are taken as the random random number generated.
1739 * Returns true when outcome is for the newly read FCF record should be
1740 * chosen; otherwise, return false when outcome is for keeping the previously
1741 * chosen FCF record.
1744 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1748 /* Get 16-bit uniform random number */
1749 rand_num
= 0xFFFF & prandom_u32();
1751 /* Decision with probability 1/fcf_cnt */
1752 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1759 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1760 * @phba: pointer to lpfc hba data structure.
1761 * @mboxq: pointer to mailbox object.
1762 * @next_fcf_index: pointer to holder of next fcf index.
1764 * This routine parses the non-embedded fcf mailbox command by performing the
1765 * necessarily error checking, non-embedded read FCF record mailbox command
1766 * SGE parsing, and endianness swapping.
1768 * Returns the pointer to the new FCF record in the non-embedded mailbox
1769 * command DMA memory if successfully, other NULL.
1771 static struct fcf_record
*
1772 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1773 uint16_t *next_fcf_index
)
1776 struct lpfc_mbx_sge sge
;
1777 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1778 uint32_t shdr_status
, shdr_add_status
, if_type
;
1779 union lpfc_sli4_cfg_shdr
*shdr
;
1780 struct fcf_record
*new_fcf_record
;
1782 /* Get the first SGE entry from the non-embedded DMA memory. This
1783 * routine only uses a single SGE.
1785 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1786 if (unlikely(!mboxq
->sge_array
)) {
1787 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1788 "2524 Failed to get the non-embedded SGE "
1789 "virtual address\n");
1792 virt_addr
= mboxq
->sge_array
->addr
[0];
1794 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1795 lpfc_sli_pcimem_bcopy(shdr
, shdr
,
1796 sizeof(union lpfc_sli4_cfg_shdr
));
1797 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1798 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
1799 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1800 if (shdr_status
|| shdr_add_status
) {
1801 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
||
1802 if_type
== LPFC_SLI_INTF_IF_TYPE_2
)
1803 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1804 "2726 READ_FCF_RECORD Indicates empty "
1807 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1808 "2521 READ_FCF_RECORD mailbox failed "
1809 "with status x%x add_status x%x, "
1810 "mbx\n", shdr_status
, shdr_add_status
);
1814 /* Interpreting the returned information of the FCF record */
1815 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1816 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1817 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1818 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1819 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1820 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1821 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1822 offsetof(struct fcf_record
, vlan_bitmap
));
1823 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1824 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1826 return new_fcf_record
;
1830 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1831 * @phba: pointer to lpfc hba data structure.
1832 * @fcf_record: pointer to the fcf record.
1833 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1834 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1836 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1840 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1841 struct fcf_record
*fcf_record
,
1843 uint16_t next_fcf_index
)
1845 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1846 "2764 READ_FCF_RECORD:\n"
1847 "\tFCF_Index : x%x\n"
1848 "\tFCF_Avail : x%x\n"
1849 "\tFCF_Valid : x%x\n"
1851 "\tFIP_Priority : x%x\n"
1852 "\tMAC_Provider : x%x\n"
1853 "\tLowest VLANID : x%x\n"
1854 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1855 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1856 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1857 "\tNext_FCF_Index: x%x\n",
1858 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1859 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1860 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1861 bf_get(lpfc_fcf_record_fcf_sol
, fcf_record
),
1862 fcf_record
->fip_priority
,
1863 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1865 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1866 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1867 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1868 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1869 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1870 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1871 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1872 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1873 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1874 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1875 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1876 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1877 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1878 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1879 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1880 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1881 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1882 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1883 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1884 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1885 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1886 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1891 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1892 * @phba: pointer to lpfc hba data structure.
1893 * @fcf_rec: pointer to an existing FCF record.
1894 * @new_fcf_record: pointer to a new FCF record.
1895 * @new_vlan_id: vlan id from the new FCF record.
1897 * This function performs matching test of a new FCF record against an existing
1898 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1899 * will not be used as part of the FCF record matching criteria.
1901 * Returns true if all the fields matching, otherwise returns false.
1904 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1905 struct lpfc_fcf_rec
*fcf_rec
,
1906 struct fcf_record
*new_fcf_record
,
1907 uint16_t new_vlan_id
)
1909 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1910 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1912 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1914 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1916 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1918 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
1924 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1925 * @vport: Pointer to vport object.
1926 * @fcf_index: index to next fcf.
1928 * This function processing the roundrobin fcf failover to next fcf index.
1929 * When this function is invoked, there will be a current fcf registered
1931 * Return: 0 for continue retrying flogi on currently registered fcf;
1932 * 1 for stop flogi on currently registered fcf;
1934 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1936 struct lpfc_hba
*phba
= vport
->phba
;
1939 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1940 spin_lock_irq(&phba
->hbalock
);
1941 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
1942 spin_unlock_irq(&phba
->hbalock
);
1943 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1944 "2872 Devloss tmo with no eligible "
1945 "FCF, unregister in-use FCF (x%x) "
1946 "and rescan FCF table\n",
1947 phba
->fcf
.current_rec
.fcf_indx
);
1948 lpfc_unregister_fcf_rescan(phba
);
1949 goto stop_flogi_current_fcf
;
1951 /* Mark the end to FLOGI roundrobin failover */
1952 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1953 /* Allow action to new fcf asynchronous event */
1954 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1955 spin_unlock_irq(&phba
->hbalock
);
1956 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1957 "2865 No FCF available, stop roundrobin FCF "
1958 "failover and change port state:x%x/x%x\n",
1959 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
1960 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
1961 goto stop_flogi_current_fcf
;
1963 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
1964 "2794 Try FLOGI roundrobin FCF failover to "
1965 "(x%x)\n", fcf_index
);
1966 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
1968 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1969 "2761 FLOGI roundrobin FCF failover "
1970 "failed (rc:x%x) to read FCF (x%x)\n",
1971 rc
, phba
->fcf
.current_rec
.fcf_indx
);
1973 goto stop_flogi_current_fcf
;
1977 stop_flogi_current_fcf
:
1978 lpfc_can_disctmo(vport
);
1983 * lpfc_sli4_fcf_pri_list_del
1984 * @phba: pointer to lpfc hba data structure.
1985 * @fcf_index the index of the fcf record to delete
1986 * This routine checks the on list flag of the fcf_index to be deleted.
1987 * If it is one the list then it is removed from the list, and the flag
1988 * is cleared. This routine grab the hbalock before removing the fcf
1989 * record from the list.
1991 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
1994 struct lpfc_fcf_pri
*new_fcf_pri
;
1996 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1997 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1998 "3058 deleting idx x%x pri x%x flg x%x\n",
1999 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
2000 new_fcf_pri
->fcf_rec
.flag
);
2001 spin_lock_irq(&phba
->hbalock
);
2002 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
2003 if (phba
->fcf
.current_rec
.priority
==
2004 new_fcf_pri
->fcf_rec
.priority
)
2005 phba
->fcf
.eligible_fcf_cnt
--;
2006 list_del_init(&new_fcf_pri
->list
);
2007 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
2009 spin_unlock_irq(&phba
->hbalock
);
2013 * lpfc_sli4_set_fcf_flogi_fail
2014 * @phba: pointer to lpfc hba data structure.
2015 * @fcf_index the index of the fcf record to update
2016 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2017 * flag so the the round robin slection for the particular priority level
2018 * will try a different fcf record that does not have this bit set.
2019 * If the fcf record is re-read for any reason this flag is cleared brfore
2020 * adding it to the priority list.
2023 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
2025 struct lpfc_fcf_pri
*new_fcf_pri
;
2026 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2027 spin_lock_irq(&phba
->hbalock
);
2028 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
2029 spin_unlock_irq(&phba
->hbalock
);
2033 * lpfc_sli4_fcf_pri_list_add
2034 * @phba: pointer to lpfc hba data structure.
2035 * @fcf_index the index of the fcf record to add
2036 * This routine checks the priority of the fcf_index to be added.
2037 * If it is a lower priority than the current head of the fcf_pri list
2038 * then it is added to the list in the right order.
2039 * If it is the same priority as the current head of the list then it
2040 * is added to the head of the list and its bit in the rr_bmask is set.
2041 * If the fcf_index to be added is of a higher priority than the current
2042 * head of the list then the rr_bmask is cleared, its bit is set in the
2043 * rr_bmask and it is added to the head of the list.
2045 * 0=success 1=failure
2047 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
,
2049 struct fcf_record
*new_fcf_record
)
2051 uint16_t current_fcf_pri
;
2052 uint16_t last_index
;
2053 struct lpfc_fcf_pri
*fcf_pri
;
2054 struct lpfc_fcf_pri
*next_fcf_pri
;
2055 struct lpfc_fcf_pri
*new_fcf_pri
;
2058 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2059 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2060 "3059 adding idx x%x pri x%x flg x%x\n",
2061 fcf_index
, new_fcf_record
->fip_priority
,
2062 new_fcf_pri
->fcf_rec
.flag
);
2063 spin_lock_irq(&phba
->hbalock
);
2064 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2065 list_del_init(&new_fcf_pri
->list
);
2066 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2067 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2068 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2069 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2070 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2071 new_fcf_pri
->fcf_rec
.fcf_index
);
2075 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2076 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2077 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2078 ret
= 0; /* Empty rr list */
2081 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2082 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2083 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2084 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2085 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2086 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2087 /* fcfs_at_this_priority_level = 1; */
2088 phba
->fcf
.eligible_fcf_cnt
= 1;
2090 /* fcfs_at_this_priority_level++; */
2091 phba
->fcf
.eligible_fcf_cnt
++;
2092 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2093 new_fcf_pri
->fcf_rec
.fcf_index
);
2097 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2098 &phba
->fcf
.fcf_pri_list
, list
) {
2099 if (new_fcf_pri
->fcf_rec
.priority
<=
2100 fcf_pri
->fcf_rec
.priority
) {
2101 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2102 list_add(&new_fcf_pri
->list
,
2103 &phba
->fcf
.fcf_pri_list
);
2105 list_add(&new_fcf_pri
->list
,
2106 &((struct lpfc_fcf_pri
*)
2107 fcf_pri
->list
.prev
)->list
);
2110 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2111 || new_fcf_pri
->fcf_rec
.priority
<
2112 next_fcf_pri
->fcf_rec
.priority
) {
2113 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2117 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2123 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2124 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2125 spin_unlock_irq(&phba
->hbalock
);
2130 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2131 * @phba: pointer to lpfc hba data structure.
2132 * @mboxq: pointer to mailbox object.
2134 * This function iterates through all the fcf records available in
2135 * HBA and chooses the optimal FCF record for discovery. After finding
2136 * the FCF for discovery it registers the FCF record and kicks start
2138 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2139 * use an FCF record which matches fabric name and mac address of the
2140 * currently used FCF record.
2141 * If the driver supports only one FCF, it will try to use the FCF record
2142 * used by BOOT_BIOS.
2145 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2147 struct fcf_record
*new_fcf_record
;
2148 uint32_t boot_flag
, addr_mode
;
2149 uint16_t fcf_index
, next_fcf_index
;
2150 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2152 bool select_new_fcf
;
2155 /* If there is pending FCoE event restart FCF table scan */
2156 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2157 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2161 /* Parse the FCF record from the non-embedded mailbox command */
2162 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2164 if (!new_fcf_record
) {
2165 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2166 "2765 Mailbox command READ_FCF_RECORD "
2167 "failed to retrieve a FCF record.\n");
2168 /* Let next new FCF event trigger fast failover */
2169 spin_lock_irq(&phba
->hbalock
);
2170 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2171 spin_unlock_irq(&phba
->hbalock
);
2172 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2176 /* Check the FCF record against the connection list */
2177 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2178 &addr_mode
, &vlan_id
);
2180 /* Log the FCF record information if turned on */
2181 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2185 * If the fcf record does not match with connect list entries
2186 * read the next entry; otherwise, this is an eligible FCF
2187 * record for roundrobin FCF failover.
2190 lpfc_sli4_fcf_pri_list_del(phba
,
2191 bf_get(lpfc_fcf_record_fcf_index
,
2193 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2194 "2781 FCF (x%x) failed connection "
2195 "list check: (x%x/x%x/%x)\n",
2196 bf_get(lpfc_fcf_record_fcf_index
,
2198 bf_get(lpfc_fcf_record_fcf_avail
,
2200 bf_get(lpfc_fcf_record_fcf_valid
,
2202 bf_get(lpfc_fcf_record_fcf_sol
,
2204 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2205 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2206 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2207 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2208 phba
->fcf
.current_rec
.fcf_indx
) {
2209 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2210 "2862 FCF (x%x) matches property "
2211 "of in-use FCF (x%x)\n",
2212 bf_get(lpfc_fcf_record_fcf_index
,
2214 phba
->fcf
.current_rec
.fcf_indx
);
2218 * In case the current in-use FCF record becomes
2219 * invalid/unavailable during FCF discovery that
2220 * was not triggered by fast FCF failover process,
2221 * treat it as fast FCF failover.
2223 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2224 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2225 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2226 "2835 Invalid in-use FCF "
2227 "(x%x), enter FCF failover "
2229 phba
->fcf
.current_rec
.fcf_indx
);
2230 spin_lock_irq(&phba
->hbalock
);
2231 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2232 spin_unlock_irq(&phba
->hbalock
);
2233 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2234 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2235 LPFC_FCOE_FCF_GET_FIRST
);
2241 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2242 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2249 * If this is not the first FCF discovery of the HBA, use last
2250 * FCF record for the discovery. The condition that a rescan
2251 * matches the in-use FCF record: fabric name, switch name, mac
2252 * address, and vlan_id.
2254 spin_lock_irq(&phba
->hbalock
);
2255 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2256 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2257 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2258 new_fcf_record
, vlan_id
)) {
2259 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2260 phba
->fcf
.current_rec
.fcf_indx
) {
2261 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2262 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2263 /* Stop FCF redisc wait timer */
2264 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2266 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2267 /* Fast failover, mark completed */
2268 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2269 spin_unlock_irq(&phba
->hbalock
);
2270 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2271 "2836 New FCF matches in-use "
2272 "FCF (x%x), port_state:x%x, "
2274 phba
->fcf
.current_rec
.fcf_indx
,
2275 phba
->pport
->port_state
,
2276 phba
->pport
->fc_flag
);
2279 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2280 "2863 New FCF (x%x) matches "
2281 "property of in-use FCF (x%x)\n",
2282 bf_get(lpfc_fcf_record_fcf_index
,
2284 phba
->fcf
.current_rec
.fcf_indx
);
2287 * Read next FCF record from HBA searching for the matching
2288 * with in-use record only if not during the fast failover
2289 * period. In case of fast failover period, it shall try to
2290 * determine whether the FCF record just read should be the
2293 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2294 spin_unlock_irq(&phba
->hbalock
);
2299 * Update on failover FCF record only if it's in FCF fast-failover
2300 * period; otherwise, update on current FCF record.
2302 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2303 fcf_rec
= &phba
->fcf
.failover_rec
;
2305 fcf_rec
= &phba
->fcf
.current_rec
;
2307 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2309 * If the driver FCF record does not have boot flag
2310 * set and new hba fcf record has boot flag set, use
2311 * the new hba fcf record.
2313 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2314 /* Choose this FCF record */
2315 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2316 "2837 Update current FCF record "
2317 "(x%x) with new FCF record (x%x)\n",
2319 bf_get(lpfc_fcf_record_fcf_index
,
2321 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2322 addr_mode
, vlan_id
, BOOT_ENABLE
);
2323 spin_unlock_irq(&phba
->hbalock
);
2327 * If the driver FCF record has boot flag set and the
2328 * new hba FCF record does not have boot flag, read
2329 * the next FCF record.
2331 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2332 spin_unlock_irq(&phba
->hbalock
);
2336 * If the new hba FCF record has lower priority value
2337 * than the driver FCF record, use the new record.
2339 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2340 /* Choose the new FCF record with lower priority */
2341 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2342 "2838 Update current FCF record "
2343 "(x%x) with new FCF record (x%x)\n",
2345 bf_get(lpfc_fcf_record_fcf_index
,
2347 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2348 addr_mode
, vlan_id
, 0);
2349 /* Reset running random FCF selection count */
2350 phba
->fcf
.eligible_fcf_cnt
= 1;
2351 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2352 /* Update running random FCF selection count */
2353 phba
->fcf
.eligible_fcf_cnt
++;
2354 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2355 phba
->fcf
.eligible_fcf_cnt
);
2356 if (select_new_fcf
) {
2357 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2358 "2839 Update current FCF record "
2359 "(x%x) with new FCF record (x%x)\n",
2361 bf_get(lpfc_fcf_record_fcf_index
,
2363 /* Choose the new FCF by random selection */
2364 __lpfc_update_fcf_record(phba
, fcf_rec
,
2366 addr_mode
, vlan_id
, 0);
2369 spin_unlock_irq(&phba
->hbalock
);
2373 * This is the first suitable FCF record, choose this record for
2374 * initial best-fit FCF.
2377 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2378 "2840 Update initial FCF candidate "
2380 bf_get(lpfc_fcf_record_fcf_index
,
2382 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2383 addr_mode
, vlan_id
, (boot_flag
?
2385 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2386 /* Setup initial running random FCF selection count */
2387 phba
->fcf
.eligible_fcf_cnt
= 1;
2389 spin_unlock_irq(&phba
->hbalock
);
2393 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2394 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2395 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2397 * Case of FCF fast failover scan
2401 * It has not found any suitable FCF record, cancel
2402 * FCF scan inprogress, and do nothing
2404 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2405 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2406 "2782 No suitable FCF found: "
2408 phba
->fcoe_eventtag_at_fcf_scan
,
2409 bf_get(lpfc_fcf_record_fcf_index
,
2411 spin_lock_irq(&phba
->hbalock
);
2412 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2413 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2414 spin_unlock_irq(&phba
->hbalock
);
2415 /* Unregister in-use FCF and rescan */
2416 lpfc_printf_log(phba
, KERN_INFO
,
2418 "2864 On devloss tmo "
2419 "unreg in-use FCF and "
2420 "rescan FCF table\n");
2421 lpfc_unregister_fcf_rescan(phba
);
2425 * Let next new FCF event trigger fast failover
2427 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2428 spin_unlock_irq(&phba
->hbalock
);
2432 * It has found a suitable FCF record that is not
2433 * the same as in-use FCF record, unregister the
2434 * in-use FCF record, replace the in-use FCF record
2435 * with the new FCF record, mark FCF fast failover
2436 * completed, and then start register the new FCF
2440 /* Unregister the current in-use FCF record */
2441 lpfc_unregister_fcf(phba
);
2443 /* Replace in-use record with the new record */
2444 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2445 "2842 Replace in-use FCF (x%x) "
2446 "with failover FCF (x%x)\n",
2447 phba
->fcf
.current_rec
.fcf_indx
,
2448 phba
->fcf
.failover_rec
.fcf_indx
);
2449 memcpy(&phba
->fcf
.current_rec
,
2450 &phba
->fcf
.failover_rec
,
2451 sizeof(struct lpfc_fcf_rec
));
2453 * Mark the fast FCF failover rediscovery completed
2454 * and the start of the first round of the roundrobin
2457 spin_lock_irq(&phba
->hbalock
);
2458 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2459 spin_unlock_irq(&phba
->hbalock
);
2460 /* Register to the new FCF record */
2461 lpfc_register_fcf(phba
);
2464 * In case of transaction period to fast FCF failover,
2465 * do nothing when search to the end of the FCF table.
2467 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2468 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2471 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2472 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2474 * In case the current in-use FCF record no
2475 * longer existed during FCF discovery that
2476 * was not triggered by fast FCF failover
2477 * process, treat it as fast FCF failover.
2479 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2480 "2841 In-use FCF record (x%x) "
2481 "not reported, entering fast "
2482 "FCF failover mode scanning.\n",
2483 phba
->fcf
.current_rec
.fcf_indx
);
2484 spin_lock_irq(&phba
->hbalock
);
2485 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2486 spin_unlock_irq(&phba
->hbalock
);
2487 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2488 LPFC_FCOE_FCF_GET_FIRST
);
2491 /* Register to the new FCF record */
2492 lpfc_register_fcf(phba
);
2495 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2499 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2500 lpfc_register_fcf(phba
);
2506 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2507 * @phba: pointer to lpfc hba data structure.
2508 * @mboxq: pointer to mailbox object.
2510 * This is the callback function for FLOGI failure roundrobin FCF failover
2511 * read FCF record mailbox command from the eligible FCF record bmask for
2512 * performing the failover. If the FCF read back is not valid/available, it
2513 * fails through to retrying FLOGI to the currently registered FCF again.
2514 * Otherwise, if the FCF read back is valid and available, it will set the
2515 * newly read FCF record to the failover FCF record, unregister currently
2516 * registered FCF record, copy the failover FCF record to the current
2517 * FCF record, and then register the current FCF record before proceeding
2518 * to trying FLOGI on the new failover FCF.
2521 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2523 struct fcf_record
*new_fcf_record
;
2524 uint32_t boot_flag
, addr_mode
;
2525 uint16_t next_fcf_index
, fcf_index
;
2526 uint16_t current_fcf_index
;
2530 /* If link state is not up, stop the roundrobin failover process */
2531 if (phba
->link_state
< LPFC_LINK_UP
) {
2532 spin_lock_irq(&phba
->hbalock
);
2533 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2534 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2535 spin_unlock_irq(&phba
->hbalock
);
2539 /* Parse the FCF record from the non-embedded mailbox command */
2540 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2542 if (!new_fcf_record
) {
2543 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2544 "2766 Mailbox command READ_FCF_RECORD "
2545 "failed to retrieve a FCF record. "
2546 "hba_flg x%x fcf_flg x%x\n", phba
->hba_flag
,
2547 phba
->fcf
.fcf_flag
);
2548 lpfc_unregister_fcf_rescan(phba
);
2552 /* Get the needed parameters from FCF record */
2553 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2554 &addr_mode
, &vlan_id
);
2556 /* Log the FCF record information if turned on */
2557 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2560 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2562 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2563 "2848 Remove ineligible FCF (x%x) from "
2564 "from roundrobin bmask\n", fcf_index
);
2565 /* Clear roundrobin bmask bit for ineligible FCF */
2566 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2567 /* Perform next round of roundrobin FCF failover */
2568 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2569 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2575 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2576 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2577 "2760 Perform FLOGI roundrobin FCF failover: "
2578 "FCF (x%x) back to FCF (x%x)\n",
2579 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2580 /* Wait 500 ms before retrying FLOGI to current FCF */
2582 lpfc_issue_init_vfi(phba
->pport
);
2586 /* Upload new FCF record to the failover FCF record */
2587 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2588 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2589 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2590 spin_lock_irq(&phba
->hbalock
);
2591 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2592 new_fcf_record
, addr_mode
, vlan_id
,
2593 (boot_flag
? BOOT_ENABLE
: 0));
2594 spin_unlock_irq(&phba
->hbalock
);
2596 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2598 /* Unregister the current in-use FCF record */
2599 lpfc_unregister_fcf(phba
);
2601 /* Replace in-use record with the new record */
2602 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2603 sizeof(struct lpfc_fcf_rec
));
2605 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2606 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2607 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2610 lpfc_register_fcf(phba
);
2612 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2616 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2617 * @phba: pointer to lpfc hba data structure.
2618 * @mboxq: pointer to mailbox object.
2620 * This is the callback function of read FCF record mailbox command for
2621 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2622 * failover when a new FCF event happened. If the FCF read back is
2623 * valid/available and it passes the connection list check, it updates
2624 * the bmask for the eligible FCF record for roundrobin failover.
2627 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2629 struct fcf_record
*new_fcf_record
;
2630 uint32_t boot_flag
, addr_mode
;
2631 uint16_t fcf_index
, next_fcf_index
;
2635 /* If link state is not up, no need to proceed */
2636 if (phba
->link_state
< LPFC_LINK_UP
)
2639 /* If FCF discovery period is over, no need to proceed */
2640 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2643 /* Parse the FCF record from the non-embedded mailbox command */
2644 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2646 if (!new_fcf_record
) {
2647 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2648 "2767 Mailbox command READ_FCF_RECORD "
2649 "failed to retrieve a FCF record.\n");
2653 /* Check the connection list for eligibility */
2654 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2655 &addr_mode
, &vlan_id
);
2657 /* Log the FCF record information if turned on */
2658 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2664 /* Update the eligible FCF record index bmask */
2665 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2667 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
2670 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2674 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2675 * @phba: pointer to lpfc hba data structure.
2676 * @mboxq: pointer to mailbox data structure.
2678 * This function handles completion of init vfi mailbox command.
2681 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2683 struct lpfc_vport
*vport
= mboxq
->vport
;
2686 * VFI not supported on interface type 0, just do the flogi
2687 * Also continue if the VFI is in use - just use the same one.
2689 if (mboxq
->u
.mb
.mbxStatus
&&
2690 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2691 LPFC_SLI_INTF_IF_TYPE_0
) &&
2692 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2693 lpfc_printf_vlog(vport
, KERN_ERR
,
2695 "2891 Init VFI mailbox failed 0x%x\n",
2696 mboxq
->u
.mb
.mbxStatus
);
2697 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2698 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2702 lpfc_initial_flogi(vport
);
2703 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2708 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2709 * @vport: pointer to lpfc_vport data structure.
2711 * This function issue a init_vfi mailbox command to initialize the VFI and
2712 * VPI for the physical port.
2715 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
2717 LPFC_MBOXQ_t
*mboxq
;
2719 struct lpfc_hba
*phba
= vport
->phba
;
2721 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2723 lpfc_printf_vlog(vport
, KERN_ERR
,
2724 LOG_MBOX
, "2892 Failed to allocate "
2725 "init_vfi mailbox\n");
2728 lpfc_init_vfi(mboxq
, vport
);
2729 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
2730 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
2731 if (rc
== MBX_NOT_FINISHED
) {
2732 lpfc_printf_vlog(vport
, KERN_ERR
,
2733 LOG_MBOX
, "2893 Failed to issue init_vfi mailbox\n");
2734 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2739 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2740 * @phba: pointer to lpfc hba data structure.
2741 * @mboxq: pointer to mailbox data structure.
2743 * This function handles completion of init vpi mailbox command.
2746 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2748 struct lpfc_vport
*vport
= mboxq
->vport
;
2749 struct lpfc_nodelist
*ndlp
;
2750 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2752 if (mboxq
->u
.mb
.mbxStatus
) {
2753 lpfc_printf_vlog(vport
, KERN_ERR
,
2755 "2609 Init VPI mailbox failed 0x%x\n",
2756 mboxq
->u
.mb
.mbxStatus
);
2757 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2758 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2761 spin_lock_irq(shost
->host_lock
);
2762 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2763 spin_unlock_irq(shost
->host_lock
);
2765 /* If this port is physical port or FDISC is done, do reg_vpi */
2766 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2767 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2769 lpfc_printf_vlog(vport
, KERN_ERR
,
2771 "2731 Cannot find fabric "
2772 "controller node\n");
2774 lpfc_register_new_vport(phba
, vport
, ndlp
);
2775 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2779 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2780 lpfc_initial_fdisc(vport
);
2782 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2783 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2784 "2606 No NPIV Fabric support\n");
2786 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2791 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2792 * @vport: pointer to lpfc_vport data structure.
2794 * This function issue a init_vpi mailbox command to initialize
2795 * VPI for the vport.
2798 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2800 LPFC_MBOXQ_t
*mboxq
;
2803 if ((vport
->port_type
!= LPFC_PHYSICAL_PORT
) && (!vport
->vpi
)) {
2804 vpi
= lpfc_alloc_vpi(vport
->phba
);
2806 lpfc_printf_vlog(vport
, KERN_ERR
,
2808 "3303 Failed to obtain vport vpi\n");
2809 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2815 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2817 lpfc_printf_vlog(vport
, KERN_ERR
,
2818 LOG_MBOX
, "2607 Failed to allocate "
2819 "init_vpi mailbox\n");
2822 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2823 mboxq
->vport
= vport
;
2824 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2825 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2826 if (rc
== MBX_NOT_FINISHED
) {
2827 lpfc_printf_vlog(vport
, KERN_ERR
,
2828 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2829 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2834 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2835 * @phba: pointer to lpfc hba data structure.
2837 * This function loops through the list of vports on the @phba and issues an
2838 * FDISC if possible.
2841 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2843 struct lpfc_vport
**vports
;
2846 vports
= lpfc_create_vport_work_array(phba
);
2847 if (vports
!= NULL
) {
2848 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2849 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2851 /* There are no vpi for this vport */
2852 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2853 lpfc_vport_set_state(vports
[i
],
2857 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2858 lpfc_vport_set_state(vports
[i
],
2862 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2863 lpfc_issue_init_vpi(vports
[i
]);
2866 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2867 lpfc_initial_fdisc(vports
[i
]);
2869 lpfc_vport_set_state(vports
[i
],
2870 FC_VPORT_NO_FABRIC_SUPP
);
2871 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2874 "Fabric support\n");
2878 lpfc_destroy_vport_work_array(phba
, vports
);
2882 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2884 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2885 struct lpfc_vport
*vport
= mboxq
->vport
;
2886 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2889 * VFI not supported for interface type 0, so ignore any mailbox
2890 * error (except VFI in use) and continue with the discovery.
2892 if (mboxq
->u
.mb
.mbxStatus
&&
2893 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2894 LPFC_SLI_INTF_IF_TYPE_0
) &&
2895 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2896 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2897 "2018 REG_VFI mbxStatus error x%x "
2899 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2900 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2901 /* FLOGI failed, use loop map to make discovery list */
2902 lpfc_disc_list_loopmap(vport
);
2903 /* Start discovery */
2904 lpfc_disc_start(vport
);
2907 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2911 /* If the VFI is already registered, there is nothing else to do
2912 * Unless this was a VFI update and we are in PT2PT mode, then
2913 * we should drop through to set the port state to ready.
2915 if (vport
->fc_flag
& FC_VFI_REGISTERED
)
2916 if (!(phba
->sli_rev
== LPFC_SLI_REV4
&&
2917 vport
->fc_flag
& FC_PT2PT
))
2920 /* The VPI is implicitly registered when the VFI is registered */
2921 spin_lock_irq(shost
->host_lock
);
2922 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2923 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2924 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2925 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2926 spin_unlock_irq(shost
->host_lock
);
2928 /* In case SLI4 FC loopback test, we are ready */
2929 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2930 (phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2931 phba
->link_state
= LPFC_HBA_READY
;
2935 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
2936 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
2937 "alpacnt:%d LinkState:%x topology:%x\n",
2938 vport
->port_state
, vport
->fc_flag
, vport
->fc_myDID
,
2939 vport
->phba
->alpa_map
[0],
2940 phba
->link_state
, phba
->fc_topology
);
2942 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2944 * For private loop or for NPort pt2pt,
2945 * just start discovery and we are done.
2947 if ((vport
->fc_flag
& FC_PT2PT
) ||
2948 ((phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) &&
2949 !(vport
->fc_flag
& FC_PUBLIC_LOOP
))) {
2951 /* Use loop map to make discovery list */
2952 lpfc_disc_list_loopmap(vport
);
2953 /* Start discovery */
2954 if (vport
->fc_flag
& FC_PT2PT
)
2955 vport
->port_state
= LPFC_VPORT_READY
;
2957 lpfc_disc_start(vport
);
2959 lpfc_start_fdiscs(phba
);
2960 lpfc_do_scr_ns_plogi(phba
, vport
);
2965 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2966 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2972 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2974 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2975 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2976 struct lpfc_vport
*vport
= pmb
->vport
;
2977 struct serv_parm
*sp
= &vport
->fc_sparam
;
2980 /* Check for error */
2981 if (mb
->mbxStatus
) {
2982 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2983 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2984 "0319 READ_SPARAM mbxStatus error x%x "
2986 mb
->mbxStatus
, vport
->port_state
);
2987 lpfc_linkdown(phba
);
2991 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2992 sizeof (struct serv_parm
));
2994 ed_tov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
2995 if (sp
->cmn
.edtovResolution
) /* E_D_TOV ticks are in nanoseconds */
2996 ed_tov
= (ed_tov
+ 999999) / 1000000;
2998 phba
->fc_edtov
= ed_tov
;
2999 phba
->fc_ratov
= (2 * ed_tov
) / 1000;
3000 if (phba
->fc_ratov
< FF_DEF_RATOV
) {
3001 /* RA_TOV should be atleast 10sec for initial flogi */
3002 phba
->fc_ratov
= FF_DEF_RATOV
;
3005 lpfc_update_vport_wwn(vport
);
3006 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
3007 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
3008 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
3011 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3013 mempool_free(pmb
, phba
->mbox_mem_pool
);
3017 pmb
->context1
= NULL
;
3018 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3020 lpfc_issue_clear_la(phba
, vport
);
3021 mempool_free(pmb
, phba
->mbox_mem_pool
);
3026 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
3028 struct lpfc_vport
*vport
= phba
->pport
;
3029 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
3030 struct Scsi_Host
*shost
;
3032 struct lpfc_dmabuf
*mp
;
3034 struct fcf_record
*fcf_record
;
3035 uint32_t fc_flags
= 0;
3037 spin_lock_irq(&phba
->hbalock
);
3038 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
3039 case LPFC_LINK_SPEED_1GHZ
:
3040 case LPFC_LINK_SPEED_2GHZ
:
3041 case LPFC_LINK_SPEED_4GHZ
:
3042 case LPFC_LINK_SPEED_8GHZ
:
3043 case LPFC_LINK_SPEED_10GHZ
:
3044 case LPFC_LINK_SPEED_16GHZ
:
3045 case LPFC_LINK_SPEED_32GHZ
:
3046 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
3049 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
3053 if (phba
->fc_topology
&&
3054 phba
->fc_topology
!= bf_get(lpfc_mbx_read_top_topology
, la
)) {
3055 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3056 "3314 Toplogy changed was 0x%x is 0x%x\n",
3058 bf_get(lpfc_mbx_read_top_topology
, la
));
3059 phba
->fc_topology_changed
= 1;
3062 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
3063 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
3065 shost
= lpfc_shost_from_vport(vport
);
3066 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3067 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
3069 /* if npiv is enabled and this adapter supports npiv log
3070 * a message that npiv is not supported in this topology
3072 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
3073 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3074 "1309 Link Up Event npiv not supported in loop "
3076 /* Get Loop Map information */
3077 if (bf_get(lpfc_mbx_read_top_il
, la
))
3078 fc_flags
|= FC_LBIT
;
3080 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
3081 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
3084 phba
->alpa_map
[0] = 0;
3086 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
3097 numalpa
= phba
->alpa_map
[0];
3099 while (j
< numalpa
) {
3100 memset(un
.pamap
, 0, 16);
3101 for (k
= 1; j
< numalpa
; k
++) {
3103 phba
->alpa_map
[j
+ 1];
3108 /* Link Up Event ALPA map */
3109 lpfc_printf_log(phba
,
3112 "1304 Link Up Event "
3113 "ALPA map Data: x%x "
3115 un
.pa
.wd1
, un
.pa
.wd2
,
3116 un
.pa
.wd3
, un
.pa
.wd4
);
3121 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3122 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3123 (phba
->sli_rev
>= LPFC_SLI_REV3
))
3124 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3126 vport
->fc_myDID
= phba
->fc_pref_DID
;
3127 fc_flags
|= FC_LBIT
;
3129 spin_unlock_irq(&phba
->hbalock
);
3132 spin_lock_irq(shost
->host_lock
);
3133 vport
->fc_flag
|= fc_flags
;
3134 spin_unlock_irq(shost
->host_lock
);
3138 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3142 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3144 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3147 sparam_mbox
->vport
= vport
;
3148 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3149 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3150 if (rc
== MBX_NOT_FINISHED
) {
3151 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
3152 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3154 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3158 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3159 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3162 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3163 lpfc_config_link(phba
, cfglink_mbox
);
3164 cfglink_mbox
->vport
= vport
;
3165 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3166 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3167 if (rc
== MBX_NOT_FINISHED
) {
3168 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3172 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3174 * Add the driver's default FCF record at FCF index 0 now. This
3175 * is phase 1 implementation that support FCF index 0 and driver
3178 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
3179 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3181 if (unlikely(!fcf_record
)) {
3182 lpfc_printf_log(phba
, KERN_ERR
,
3184 "2554 Could not allocate memory for "
3190 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3191 LPFC_FCOE_FCF_DEF_INDEX
);
3192 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3194 lpfc_printf_log(phba
, KERN_ERR
,
3196 "2013 Could not manually add FCF "
3197 "record 0, status %d\n", rc
);
3205 * The driver is expected to do FIP/FCF. Call the port
3206 * and get the FCF Table.
3208 spin_lock_irq(&phba
->hbalock
);
3209 if (phba
->hba_flag
& FCF_TS_INPROG
) {
3210 spin_unlock_irq(&phba
->hbalock
);
3213 /* This is the initial FCF discovery scan */
3214 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3215 spin_unlock_irq(&phba
->hbalock
);
3216 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3217 "2778 Start FCF table scan at linkup\n");
3218 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3219 LPFC_FCOE_FCF_GET_FIRST
);
3221 spin_lock_irq(&phba
->hbalock
);
3222 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3223 spin_unlock_irq(&phba
->hbalock
);
3226 /* Reset FCF roundrobin bmask for new discovery */
3227 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3232 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3233 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3234 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3235 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3236 lpfc_issue_clear_la(phba
, vport
);
3241 lpfc_enable_la(struct lpfc_hba
*phba
)
3244 struct lpfc_sli
*psli
= &phba
->sli
;
3245 spin_lock_irq(&phba
->hbalock
);
3246 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3247 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3248 control
= readl(phba
->HCregaddr
);
3249 control
|= HC_LAINT_ENA
;
3250 writel(control
, phba
->HCregaddr
);
3251 readl(phba
->HCregaddr
); /* flush */
3253 spin_unlock_irq(&phba
->hbalock
);
3257 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3259 lpfc_linkdown(phba
);
3260 lpfc_enable_la(phba
);
3261 lpfc_unregister_unused_fcf(phba
);
3262 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3267 * This routine handles processing a READ_TOPOLOGY mailbox
3268 * command upon completion. It is setup in the LPFC_MBOXQ
3269 * as the completion routine when the command is
3270 * handed off to the SLI layer.
3273 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3275 struct lpfc_vport
*vport
= pmb
->vport
;
3276 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3277 struct lpfc_mbx_read_top
*la
;
3278 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3279 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3281 /* Unblock ELS traffic */
3282 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
3283 /* Check for error */
3284 if (mb
->mbxStatus
) {
3285 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3286 "1307 READ_LA mbox error x%x state x%x\n",
3287 mb
->mbxStatus
, vport
->port_state
);
3288 lpfc_mbx_issue_link_down(phba
);
3289 phba
->link_state
= LPFC_HBA_ERROR
;
3290 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3293 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3295 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3297 spin_lock_irq(shost
->host_lock
);
3298 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3299 vport
->fc_flag
|= FC_BYPASSED_MODE
;
3301 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
3302 spin_unlock_irq(shost
->host_lock
);
3304 if (phba
->fc_eventTag
<= la
->eventTag
) {
3305 phba
->fc_stat
.LinkMultiEvent
++;
3306 if (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)
3307 if (phba
->fc_eventTag
!= 0)
3308 lpfc_linkdown(phba
);
3311 phba
->fc_eventTag
= la
->eventTag
;
3312 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3313 spin_lock_irq(&phba
->hbalock
);
3314 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3315 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
3317 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
3318 spin_unlock_irq(&phba
->hbalock
);
3321 phba
->link_events
++;
3322 if ((bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
) &&
3323 !(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)) {
3324 phba
->fc_stat
.LinkUp
++;
3325 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3326 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3327 "1306 Link Up Event in loop back mode "
3328 "x%x received Data: x%x x%x x%x x%x\n",
3329 la
->eventTag
, phba
->fc_eventTag
,
3330 bf_get(lpfc_mbx_read_top_alpa_granted
,
3332 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3335 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3336 "1303 Link Up Event x%x received "
3337 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3338 la
->eventTag
, phba
->fc_eventTag
,
3339 bf_get(lpfc_mbx_read_top_alpa_granted
,
3341 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3343 bf_get(lpfc_mbx_read_top_mm
, la
),
3344 bf_get(lpfc_mbx_read_top_fa
, la
),
3345 phba
->wait_4_mlo_maint_flg
);
3347 lpfc_mbx_process_link_up(phba
, la
);
3348 } else if (bf_get(lpfc_mbx_read_top_att_type
, la
) ==
3349 LPFC_ATT_LINK_DOWN
) {
3350 phba
->fc_stat
.LinkDown
++;
3351 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
3352 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3353 "1308 Link Down Event in loop back mode "
3355 "Data: x%x x%x x%x\n",
3356 la
->eventTag
, phba
->fc_eventTag
,
3357 phba
->pport
->port_state
, vport
->fc_flag
);
3359 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3360 "1305 Link Down Event x%x received "
3361 "Data: x%x x%x x%x x%x x%x\n",
3362 la
->eventTag
, phba
->fc_eventTag
,
3363 phba
->pport
->port_state
, vport
->fc_flag
,
3364 bf_get(lpfc_mbx_read_top_mm
, la
),
3365 bf_get(lpfc_mbx_read_top_fa
, la
));
3366 lpfc_mbx_issue_link_down(phba
);
3368 if ((phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) &&
3369 ((bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
))) {
3370 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
3371 phba
->fc_stat
.LinkDown
++;
3372 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3373 "1312 Link Down Event x%x received "
3374 "Data: x%x x%x x%x\n",
3375 la
->eventTag
, phba
->fc_eventTag
,
3376 phba
->pport
->port_state
, vport
->fc_flag
);
3377 lpfc_mbx_issue_link_down(phba
);
3379 lpfc_enable_la(phba
);
3381 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3382 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3383 "Data: x%x x%x x%x\n",
3384 la
->eventTag
, phba
->fc_eventTag
,
3385 phba
->pport
->port_state
, vport
->fc_flag
);
3387 * The cmnd that triggered this will be waiting for this
3390 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3391 if (phba
->wait_4_mlo_maint_flg
) {
3392 phba
->wait_4_mlo_maint_flg
= 0;
3393 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3397 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3398 bf_get(lpfc_mbx_read_top_fa
, la
)) {
3399 if (phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)
3400 lpfc_issue_clear_la(phba
, vport
);
3401 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3403 bf_get(lpfc_mbx_read_top_fa
, la
));
3406 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3407 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3409 mempool_free(pmb
, phba
->mbox_mem_pool
);
3414 * This routine handles processing a REG_LOGIN mailbox
3415 * command upon completion. It is setup in the LPFC_MBOXQ
3416 * as the completion routine when the command is
3417 * handed off to the SLI layer.
3420 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3422 struct lpfc_vport
*vport
= pmb
->vport
;
3423 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3424 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3425 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3427 pmb
->context1
= NULL
;
3428 pmb
->context2
= NULL
;
3430 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3431 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
3432 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3433 atomic_read(&ndlp
->kref
.refcount
),
3434 ndlp
->nlp_usg_map
, ndlp
);
3435 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3436 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3438 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3439 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3440 /* We rcvd a rscn after issuing this
3441 * mbox reg login, we may have cycled
3442 * back through the state and be
3443 * back at reg login state so this
3444 * mbox needs to be ignored becase
3445 * there is another reg login in
3448 spin_lock_irq(shost
->host_lock
);
3449 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3450 spin_unlock_irq(shost
->host_lock
);
3452 /* Good status, call state machine */
3453 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
3454 NLP_EVT_CMPL_REG_LOGIN
);
3456 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3458 mempool_free(pmb
, phba
->mbox_mem_pool
);
3459 /* decrement the node reference count held for this callback
3468 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3470 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3471 struct lpfc_vport
*vport
= pmb
->vport
;
3472 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3474 switch (mb
->mbxStatus
) {
3477 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3478 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3481 /* If VPI is busy, reset the HBA */
3483 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3484 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3485 vport
->vpi
, mb
->mbxStatus
);
3486 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3487 lpfc_workq_post_event(phba
, NULL
, NULL
,
3488 LPFC_EVT_RESET_HBA
);
3490 spin_lock_irq(shost
->host_lock
);
3491 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3492 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3493 spin_unlock_irq(shost
->host_lock
);
3494 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3495 mempool_free(pmb
, phba
->mbox_mem_pool
);
3496 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3498 * This shost reference might have been taken at the beginning of
3499 * lpfc_vport_delete()
3501 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3502 scsi_host_put(shost
);
3506 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3508 struct lpfc_hba
*phba
= vport
->phba
;
3512 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3516 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3517 mbox
->vport
= vport
;
3518 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3519 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3520 if (rc
== MBX_NOT_FINISHED
) {
3521 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3522 "1800 Could not issue unreg_vpi\n");
3523 mempool_free(mbox
, phba
->mbox_mem_pool
);
3524 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3531 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3533 struct lpfc_vport
*vport
= pmb
->vport
;
3534 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3535 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3537 switch (mb
->mbxStatus
) {
3541 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3542 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3544 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3545 spin_lock_irq(shost
->host_lock
);
3546 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3547 spin_unlock_irq(shost
->host_lock
);
3548 vport
->fc_myDID
= 0;
3552 spin_lock_irq(shost
->host_lock
);
3553 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3554 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3555 spin_unlock_irq(shost
->host_lock
);
3556 vport
->num_disc_nodes
= 0;
3557 /* go thru NPR list and issue ELS PLOGIs */
3558 if (vport
->fc_npr_cnt
)
3559 lpfc_els_disc_plogi(vport
);
3561 if (!vport
->num_disc_nodes
) {
3562 spin_lock_irq(shost
->host_lock
);
3563 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3564 spin_unlock_irq(shost
->host_lock
);
3565 lpfc_can_disctmo(vport
);
3567 vport
->port_state
= LPFC_VPORT_READY
;
3570 mempool_free(pmb
, phba
->mbox_mem_pool
);
3575 * lpfc_create_static_vport - Read HBA config region to create static vports.
3576 * @phba: pointer to lpfc hba data structure.
3578 * This routine issue a DUMP mailbox command for config region 22 to get
3579 * the list of static vports to be created. The function create vports
3580 * based on the information returned from the HBA.
3583 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3585 LPFC_MBOXQ_t
*pmb
= NULL
;
3587 struct static_vport_info
*vport_info
;
3588 int mbx_wait_rc
= 0, i
;
3589 struct fc_vport_identifiers vport_id
;
3590 struct fc_vport
*new_fc_vport
;
3591 struct Scsi_Host
*shost
;
3592 struct lpfc_vport
*vport
;
3593 uint16_t offset
= 0;
3594 uint8_t *vport_buff
;
3595 struct lpfc_dmabuf
*mp
;
3596 uint32_t byte_count
= 0;
3598 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3600 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3601 "0542 lpfc_create_static_vport failed to"
3602 " allocate mailbox memory\n");
3605 memset(pmb
, 0, sizeof(LPFC_MBOXQ_t
));
3608 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3610 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3611 "0543 lpfc_create_static_vport failed to"
3612 " allocate vport_info\n");
3613 mempool_free(pmb
, phba
->mbox_mem_pool
);
3617 vport_buff
= (uint8_t *) vport_info
;
3619 /* free dma buffer from previous round */
3620 if (pmb
->context1
) {
3621 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3622 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3625 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3628 pmb
->vport
= phba
->pport
;
3629 mbx_wait_rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
,
3632 if ((mbx_wait_rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3633 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3634 "0544 lpfc_create_static_vport failed to"
3635 " issue dump mailbox command ret 0x%x "
3637 mbx_wait_rc
, mb
->mbxStatus
);
3641 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3642 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3643 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3644 if (byte_count
> sizeof(struct static_vport_info
) -
3646 byte_count
= sizeof(struct static_vport_info
)
3648 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3649 offset
+= byte_count
;
3651 if (mb
->un
.varDmp
.word_cnt
>
3652 sizeof(struct static_vport_info
) - offset
)
3653 mb
->un
.varDmp
.word_cnt
=
3654 sizeof(struct static_vport_info
)
3656 byte_count
= mb
->un
.varDmp
.word_cnt
;
3657 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3658 vport_buff
+ offset
,
3661 offset
+= byte_count
;
3664 } while (byte_count
&&
3665 offset
< sizeof(struct static_vport_info
));
3668 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3669 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3670 != VPORT_INFO_REV
)) {
3671 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3672 "0545 lpfc_create_static_vport bad"
3673 " information header 0x%x 0x%x\n",
3674 le32_to_cpu(vport_info
->signature
),
3675 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3680 shost
= lpfc_shost_from_vport(phba
->pport
);
3682 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3683 memset(&vport_id
, 0, sizeof(vport_id
));
3684 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3685 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3686 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3689 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3690 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3691 vport_id
.disable
= false;
3692 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3694 if (!new_fc_vport
) {
3695 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3696 "0546 lpfc_create_static_vport failed to"
3701 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3702 vport
->vport_flag
|= STATIC_VPORT
;
3707 if (mbx_wait_rc
!= MBX_TIMEOUT
) {
3708 if (pmb
->context1
) {
3709 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3710 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3713 mempool_free(pmb
, phba
->mbox_mem_pool
);
3720 * This routine handles processing a Fabric REG_LOGIN mailbox
3721 * command upon completion. It is setup in the LPFC_MBOXQ
3722 * as the completion routine when the command is
3723 * handed off to the SLI layer.
3726 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3728 struct lpfc_vport
*vport
= pmb
->vport
;
3729 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3730 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3731 struct lpfc_nodelist
*ndlp
;
3732 struct Scsi_Host
*shost
;
3734 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3735 pmb
->context1
= NULL
;
3736 pmb
->context2
= NULL
;
3738 if (mb
->mbxStatus
) {
3739 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3740 "0258 Register Fabric login error: 0x%x\n",
3742 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3744 mempool_free(pmb
, phba
->mbox_mem_pool
);
3746 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3747 /* FLOGI failed, use loop map to make discovery list */
3748 lpfc_disc_list_loopmap(vport
);
3750 /* Start discovery */
3751 lpfc_disc_start(vport
);
3752 /* Decrement the reference count to ndlp after the
3753 * reference to the ndlp are done.
3759 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3760 /* Decrement the reference count to ndlp after the reference
3761 * to the ndlp are done.
3767 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3768 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3769 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3770 ndlp
->nlp_type
|= NLP_FABRIC
;
3771 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3773 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3774 /* when physical port receive logo donot start
3775 * vport discovery */
3776 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3777 lpfc_start_fdiscs(phba
);
3779 shost
= lpfc_shost_from_vport(vport
);
3780 spin_lock_irq(shost
->host_lock
);
3781 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3782 spin_unlock_irq(shost
->host_lock
);
3784 lpfc_do_scr_ns_plogi(phba
, vport
);
3787 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3789 mempool_free(pmb
, phba
->mbox_mem_pool
);
3791 /* Drop the reference count from the mbox at the end after
3792 * all the current reference to the ndlp have been done.
3799 * This routine handles processing a NameServer REG_LOGIN mailbox
3800 * command upon completion. It is setup in the LPFC_MBOXQ
3801 * as the completion routine when the command is
3802 * handed off to the SLI layer.
3805 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3807 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3808 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3809 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3810 struct lpfc_vport
*vport
= pmb
->vport
;
3812 pmb
->context1
= NULL
;
3813 pmb
->context2
= NULL
;
3815 if (mb
->mbxStatus
) {
3817 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3818 "0260 Register NameServer error: 0x%x\n",
3820 /* decrement the node reference count held for this
3821 * callback function.
3824 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3826 mempool_free(pmb
, phba
->mbox_mem_pool
);
3828 /* If no other thread is using the ndlp, free it */
3829 lpfc_nlp_not_used(ndlp
);
3831 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3833 * RegLogin failed, use loop map to make discovery
3836 lpfc_disc_list_loopmap(vport
);
3838 /* Start discovery */
3839 lpfc_disc_start(vport
);
3842 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3846 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3847 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3848 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3849 ndlp
->nlp_type
|= NLP_FABRIC
;
3850 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3851 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3852 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
3853 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3854 atomic_read(&ndlp
->kref
.refcount
),
3855 ndlp
->nlp_usg_map
, ndlp
);
3857 if (vport
->port_state
< LPFC_VPORT_READY
) {
3858 /* Link up discovery requires Fabric registration. */
3859 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3860 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3861 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3862 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3863 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0);
3865 /* Issue SCR just before NameServer GID_FT Query */
3866 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3869 vport
->fc_ns_retry
= 0;
3870 /* Good status, issue CT Request to NameServer */
3871 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3872 /* Cannot issue NameServer Query, so finish up discovery */
3876 /* decrement the node reference count held for this
3877 * callback function.
3880 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3882 mempool_free(pmb
, phba
->mbox_mem_pool
);
3888 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3890 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3891 struct fc_rport
*rport
;
3892 struct lpfc_rport_data
*rdata
;
3893 struct fc_rport_identifiers rport_ids
;
3894 struct lpfc_hba
*phba
= vport
->phba
;
3896 /* Remote port has reappeared. Re-register w/ FC transport */
3897 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3898 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3899 rport_ids
.port_id
= ndlp
->nlp_DID
;
3900 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3903 * We leave our node pointer in rport->dd_data when we unregister a
3904 * FCP target port. But fc_remote_port_add zeros the space to which
3905 * rport->dd_data points. So, if we're reusing a previously
3906 * registered port, drop the reference that we took the last time we
3907 * registered the port.
3909 rport
= ndlp
->rport
;
3911 rdata
= rport
->dd_data
;
3912 /* break the link before dropping the ref */
3914 if (rdata
&& rdata
->pnode
== ndlp
)
3916 rdata
->pnode
= NULL
;
3917 /* drop reference for earlier registeration */
3918 put_device(&rport
->dev
);
3921 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3922 "rport add: did:x%x flg:x%x type x%x",
3923 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3925 /* Don't add the remote port if unloading. */
3926 if (vport
->load_flag
& FC_UNLOADING
)
3929 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3930 if (!rport
|| !get_device(&rport
->dev
)) {
3931 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3932 "Warning: fc_remote_port_add failed\n");
3936 /* initialize static port data */
3937 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3938 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3939 rdata
= rport
->dd_data
;
3940 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3942 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3943 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3944 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3945 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3947 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3948 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3950 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3951 "3183 rport register x%06x, rport %p role x%x\n",
3952 ndlp
->nlp_DID
, rport
, rport_ids
.roles
);
3954 if ((rport
->scsi_target_id
!= -1) &&
3955 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3956 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3962 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3964 struct fc_rport
*rport
= ndlp
->rport
;
3966 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3967 "rport delete: did:x%x flg:x%x type x%x",
3968 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3970 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3971 "3184 rport unregister x%06x, rport %p\n",
3972 ndlp
->nlp_DID
, rport
);
3974 fc_remote_port_delete(rport
);
3980 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3982 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3984 spin_lock_irq(shost
->host_lock
);
3986 case NLP_STE_UNUSED_NODE
:
3987 vport
->fc_unused_cnt
+= count
;
3989 case NLP_STE_PLOGI_ISSUE
:
3990 vport
->fc_plogi_cnt
+= count
;
3992 case NLP_STE_ADISC_ISSUE
:
3993 vport
->fc_adisc_cnt
+= count
;
3995 case NLP_STE_REG_LOGIN_ISSUE
:
3996 vport
->fc_reglogin_cnt
+= count
;
3998 case NLP_STE_PRLI_ISSUE
:
3999 vport
->fc_prli_cnt
+= count
;
4001 case NLP_STE_UNMAPPED_NODE
:
4002 vport
->fc_unmap_cnt
+= count
;
4004 case NLP_STE_MAPPED_NODE
:
4005 vport
->fc_map_cnt
+= count
;
4007 case NLP_STE_NPR_NODE
:
4008 if (vport
->fc_npr_cnt
== 0 && count
== -1)
4009 vport
->fc_npr_cnt
= 0;
4011 vport
->fc_npr_cnt
+= count
;
4014 spin_unlock_irq(shost
->host_lock
);
4018 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4019 int old_state
, int new_state
)
4021 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4023 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
4024 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4025 ndlp
->nlp_type
|= NLP_FC_NODE
;
4027 if (new_state
== NLP_STE_MAPPED_NODE
)
4028 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4029 if (new_state
== NLP_STE_NPR_NODE
)
4030 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
4032 /* Transport interface */
4033 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
4034 old_state
== NLP_STE_UNMAPPED_NODE
)) {
4035 vport
->phba
->nport_event_cnt
++;
4036 lpfc_unregister_remote_port(ndlp
);
4039 if (new_state
== NLP_STE_MAPPED_NODE
||
4040 new_state
== NLP_STE_UNMAPPED_NODE
) {
4041 vport
->phba
->nport_event_cnt
++;
4043 * Tell the fc transport about the port, if we haven't
4044 * already. If we have, and it's a scsi entity, be
4045 * sure to unblock any attached scsi devices
4047 lpfc_register_remote_port(vport
, ndlp
);
4049 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4050 (vport
->stat_data_enabled
)) {
4052 * A new target is discovered, if there is no buffer for
4053 * statistical data collection allocate buffer.
4055 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
4056 sizeof(struct lpfc_scsicmd_bkt
),
4059 if (!ndlp
->lat_data
)
4060 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
4061 "0286 lpfc_nlp_state_cleanup failed to "
4062 "allocate statistical data buffer DID "
4063 "0x%x\n", ndlp
->nlp_DID
);
4066 * if we added to Mapped list, but the remote port
4067 * registration failed or assigned a target id outside
4068 * our presentable range - move the node to the
4071 if (new_state
== NLP_STE_MAPPED_NODE
&&
4073 ndlp
->rport
->scsi_target_id
== -1 ||
4074 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
4075 spin_lock_irq(shost
->host_lock
);
4076 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
4077 spin_unlock_irq(shost
->host_lock
);
4078 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4083 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
4085 static char *states
[] = {
4086 [NLP_STE_UNUSED_NODE
] = "UNUSED",
4087 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
4088 [NLP_STE_ADISC_ISSUE
] = "ADISC",
4089 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
4090 [NLP_STE_PRLI_ISSUE
] = "PRLI",
4091 [NLP_STE_LOGO_ISSUE
] = "LOGO",
4092 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
4093 [NLP_STE_MAPPED_NODE
] = "MAPPED",
4094 [NLP_STE_NPR_NODE
] = "NPR",
4097 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
4098 strlcpy(buffer
, states
[state
], size
);
4100 snprintf(buffer
, size
, "unknown (%d)", state
);
4105 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4108 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4109 int old_state
= ndlp
->nlp_state
;
4110 char name1
[16], name2
[16];
4112 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4113 "0904 NPort state transition x%06x, %s -> %s\n",
4115 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
4116 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
4118 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4119 "node statechg did:x%x old:%d ste:%d",
4120 ndlp
->nlp_DID
, old_state
, state
);
4122 if (old_state
== NLP_STE_NPR_NODE
&&
4123 state
!= NLP_STE_NPR_NODE
)
4124 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4125 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
4126 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
4127 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
4130 if (list_empty(&ndlp
->nlp_listp
)) {
4131 spin_lock_irq(shost
->host_lock
);
4132 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4133 spin_unlock_irq(shost
->host_lock
);
4134 } else if (old_state
)
4135 lpfc_nlp_counters(vport
, old_state
, -1);
4137 ndlp
->nlp_state
= state
;
4138 lpfc_nlp_counters(vport
, state
, 1);
4139 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
4143 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4145 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4147 if (list_empty(&ndlp
->nlp_listp
)) {
4148 spin_lock_irq(shost
->host_lock
);
4149 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4150 spin_unlock_irq(shost
->host_lock
);
4155 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4157 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4159 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4160 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4161 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4162 spin_lock_irq(shost
->host_lock
);
4163 list_del_init(&ndlp
->nlp_listp
);
4164 spin_unlock_irq(shost
->host_lock
);
4165 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4166 NLP_STE_UNUSED_NODE
);
4170 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4172 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4173 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4174 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4175 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4176 NLP_STE_UNUSED_NODE
);
4179 * lpfc_initialize_node - Initialize all fields of node object
4180 * @vport: Pointer to Virtual Port object.
4181 * @ndlp: Pointer to FC node object.
4182 * @did: FC_ID of the node.
4184 * This function is always called when node object need to be initialized.
4185 * It initializes all the fields of the node object. Although the reference
4186 * to phba from @ndlp can be obtained indirectly through it's reference to
4187 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4188 * to the life-span of the @ndlp might go beyond the existence of @vport as
4189 * the final release of ndlp is determined by its reference count. And, the
4190 * operation on @ndlp needs the reference to phba.
4193 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4196 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4197 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4198 init_timer(&ndlp
->nlp_delayfunc
);
4199 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
4200 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
4201 ndlp
->nlp_DID
= did
;
4202 ndlp
->vport
= vport
;
4203 ndlp
->phba
= vport
->phba
;
4204 ndlp
->nlp_sid
= NLP_NO_SID
;
4205 kref_init(&ndlp
->kref
);
4206 NLP_INT_NODE_ACT(ndlp
);
4207 atomic_set(&ndlp
->cmd_pending
, 0);
4208 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4211 struct lpfc_nodelist
*
4212 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4215 struct lpfc_hba
*phba
= vport
->phba
;
4217 unsigned long flags
;
4218 unsigned long *active_rrqs_xri_bitmap
= NULL
;
4223 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4224 /* The ndlp should not be in memory free mode */
4225 if (NLP_CHK_FREE_REQ(ndlp
)) {
4226 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4227 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4228 "0277 lpfc_enable_node: ndlp:x%p "
4229 "usgmap:x%x refcnt:%d\n",
4230 (void *)ndlp
, ndlp
->nlp_usg_map
,
4231 atomic_read(&ndlp
->kref
.refcount
));
4234 /* The ndlp should not already be in active mode */
4235 if (NLP_CHK_NODE_ACT(ndlp
)) {
4236 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4237 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4238 "0278 lpfc_enable_node: ndlp:x%p "
4239 "usgmap:x%x refcnt:%d\n",
4240 (void *)ndlp
, ndlp
->nlp_usg_map
,
4241 atomic_read(&ndlp
->kref
.refcount
));
4245 /* Keep the original DID */
4246 did
= ndlp
->nlp_DID
;
4247 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4248 active_rrqs_xri_bitmap
= ndlp
->active_rrqs_xri_bitmap
;
4250 /* re-initialize ndlp except of ndlp linked list pointer */
4251 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
4252 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
4253 lpfc_initialize_node(vport
, ndlp
, did
);
4255 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4256 ndlp
->active_rrqs_xri_bitmap
= active_rrqs_xri_bitmap
;
4258 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4259 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4260 ndlp
->nlp_rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
4261 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4262 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4263 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4265 atomic_read(&ndlp
->kref
.refcount
),
4266 ndlp
->nlp_usg_map
, ndlp
);
4270 if (state
!= NLP_STE_UNUSED_NODE
)
4271 lpfc_nlp_set_state(vport
, ndlp
, state
);
4273 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4274 "node enable: did:x%x",
4275 ndlp
->nlp_DID
, 0, 0);
4280 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4283 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4284 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4285 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4286 * until ALL other outstanding threads have completed. We check
4287 * that the ndlp not already in the UNUSED state before we proceed.
4289 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4291 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4292 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4293 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4294 lpfc_unreg_rpi(vport
, ndlp
);
4302 * Start / ReStart rescue timer for Discovery / RSCN handling
4305 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4307 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4308 struct lpfc_hba
*phba
= vport
->phba
;
4311 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4312 /* For FAN, timeout should be greater than edtov */
4313 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4315 /* Normal discovery timeout should be > than ELS/CT timeout
4316 * FC spec states we need 3 * ratov for CT requests
4318 tmo
= ((phba
->fc_ratov
* 3) + 3);
4322 if (!timer_pending(&vport
->fc_disctmo
)) {
4323 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4324 "set disc timer: tmo:x%x state:x%x flg:x%x",
4325 tmo
, vport
->port_state
, vport
->fc_flag
);
4328 mod_timer(&vport
->fc_disctmo
, jiffies
+ msecs_to_jiffies(1000 * tmo
));
4329 spin_lock_irq(shost
->host_lock
);
4330 vport
->fc_flag
|= FC_DISC_TMO
;
4331 spin_unlock_irq(shost
->host_lock
);
4333 /* Start Discovery Timer state <hba_state> */
4334 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4335 "0247 Start Discovery Timer state x%x "
4336 "Data: x%x x%lx x%x x%x\n",
4337 vport
->port_state
, tmo
,
4338 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
4339 vport
->fc_adisc_cnt
);
4345 * Cancel rescue timer for Discovery / RSCN handling
4348 lpfc_can_disctmo(struct lpfc_vport
*vport
)
4350 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4351 unsigned long iflags
;
4353 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4354 "can disc timer: state:x%x rtry:x%x flg:x%x",
4355 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4357 /* Turn off discovery timer if its running */
4358 if (vport
->fc_flag
& FC_DISC_TMO
) {
4359 spin_lock_irqsave(shost
->host_lock
, iflags
);
4360 vport
->fc_flag
&= ~FC_DISC_TMO
;
4361 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4362 del_timer_sync(&vport
->fc_disctmo
);
4363 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
4364 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
4365 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
4368 /* Cancel Discovery Timer state <hba_state> */
4369 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4370 "0248 Cancel Discovery Timer state x%x "
4371 "Data: x%x x%x x%x\n",
4372 vport
->port_state
, vport
->fc_flag
,
4373 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
4378 * Check specified ring for outstanding IOCB on the SLI queue
4379 * Return true if iocb matches the specified nport
4382 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
4383 struct lpfc_sli_ring
*pring
,
4384 struct lpfc_iocbq
*iocb
,
4385 struct lpfc_nodelist
*ndlp
)
4387 struct lpfc_sli
*psli
= &phba
->sli
;
4388 IOCB_t
*icmd
= &iocb
->iocb
;
4389 struct lpfc_vport
*vport
= ndlp
->vport
;
4391 if (iocb
->vport
!= vport
)
4394 if (pring
->ringno
== LPFC_ELS_RING
) {
4395 switch (icmd
->ulpCommand
) {
4396 case CMD_GEN_REQUEST64_CR
:
4397 if (iocb
->context_un
.ndlp
== ndlp
)
4399 case CMD_ELS_REQUEST64_CR
:
4400 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
4402 case CMD_XMIT_ELS_RSP64_CX
:
4403 if (iocb
->context1
== (uint8_t *) ndlp
)
4406 } else if (pring
->ringno
== psli
->extra_ring
) {
4408 } else if (pring
->ringno
== psli
->fcp_ring
) {
4409 /* Skip match check if waiting to relogin to FCP target */
4410 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4411 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
4414 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
4417 } else if (pring
->ringno
== psli
->next_ring
) {
4424 * Free resources / clean up outstanding I/Os
4425 * associated with nlp_rpi in the LPFC_NODELIST entry.
4428 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4430 LIST_HEAD(completions
);
4431 struct lpfc_sli
*psli
;
4432 struct lpfc_sli_ring
*pring
;
4433 struct lpfc_iocbq
*iocb
, *next_iocb
;
4436 lpfc_fabric_abort_nport(ndlp
);
4439 * Everything that matches on txcmplq will be returned
4440 * by firmware with a no rpi error.
4443 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4444 /* Now process each ring */
4445 for (i
= 0; i
< psli
->num_rings
; i
++) {
4446 pring
= &psli
->ring
[i
];
4448 spin_lock_irq(&phba
->hbalock
);
4449 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
4452 * Check to see if iocb matches the nport we are
4455 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
4457 /* It matches, so deque and call compl
4459 list_move_tail(&iocb
->list
,
4463 spin_unlock_irq(&phba
->hbalock
);
4467 /* Cancel all the IOCBs from the completions list */
4468 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4475 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4476 * @phba: Pointer to HBA context object.
4477 * @pmb: Pointer to mailbox object.
4479 * This function will issue an ELS LOGO command after completing
4483 lpfc_nlp_logo_unreg(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4485 struct lpfc_vport
*vport
= pmb
->vport
;
4486 struct lpfc_nodelist
*ndlp
;
4488 ndlp
= (struct lpfc_nodelist
*)(pmb
->context1
);
4491 lpfc_issue_els_logo(vport
, ndlp
, 0);
4492 mempool_free(pmb
, phba
->mbox_mem_pool
);
4496 * Free rpi associated with LPFC_NODELIST entry.
4497 * This routine is called from lpfc_freenode(), when we are removing
4498 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4499 * LOGO that completes successfully, and we are waiting to PLOGI back
4500 * to the remote NPort. In addition, it is called after we receive
4501 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4502 * we are waiting to PLOGI back to the remote NPort.
4505 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4507 struct lpfc_hba
*phba
= vport
->phba
;
4509 int rc
, acc_plogi
= 1;
4512 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
||
4513 ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) {
4514 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
4515 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
4516 "3366 RPI x%x needs to be "
4517 "unregistered nlp_flag x%x "
4519 ndlp
->nlp_rpi
, ndlp
->nlp_flag
,
4521 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4523 /* SLI4 ports require the physical rpi value. */
4524 rpi
= ndlp
->nlp_rpi
;
4525 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4526 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4528 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
4529 mbox
->vport
= vport
;
4530 if (ndlp
->nlp_flag
& NLP_ISSUE_LOGO
) {
4531 mbox
->context1
= ndlp
;
4532 mbox
->mbox_cmpl
= lpfc_nlp_logo_unreg
;
4534 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
4535 (!(vport
->load_flag
& FC_UNLOADING
)) &&
4536 (bf_get(lpfc_sli_intf_if_type
,
4537 &phba
->sli4_hba
.sli_intf
) ==
4538 LPFC_SLI_INTF_IF_TYPE_2
)) {
4539 mbox
->context1
= lpfc_nlp_get(ndlp
);
4541 lpfc_sli4_unreg_rpi_cmpl_clr
;
4543 * accept PLOGIs after unreg_rpi_cmpl
4548 lpfc_sli_def_mbox_cmpl
;
4551 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4552 if (rc
== MBX_NOT_FINISHED
) {
4553 mempool_free(mbox
, phba
->mbox_mem_pool
);
4557 lpfc_no_rpi(phba
, ndlp
);
4559 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4561 ndlp
->nlp_flag
&= ~NLP_RPI_REGISTERED
;
4562 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4564 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4567 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4572 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4573 * @phba: pointer to lpfc hba data structure.
4575 * This routine is invoked to unregister all the currently registered RPIs
4579 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4581 struct lpfc_vport
**vports
;
4582 struct lpfc_nodelist
*ndlp
;
4583 struct Scsi_Host
*shost
;
4586 vports
= lpfc_create_vport_work_array(phba
);
4588 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4589 "2884 Vport array allocation failed \n");
4592 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4593 shost
= lpfc_shost_from_vport(vports
[i
]);
4594 spin_lock_irq(shost
->host_lock
);
4595 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4596 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4597 /* The mempool_alloc might sleep */
4598 spin_unlock_irq(shost
->host_lock
);
4599 lpfc_unreg_rpi(vports
[i
], ndlp
);
4600 spin_lock_irq(shost
->host_lock
);
4603 spin_unlock_irq(shost
->host_lock
);
4605 lpfc_destroy_vport_work_array(phba
, vports
);
4609 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4611 struct lpfc_hba
*phba
= vport
->phba
;
4615 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4616 lpfc_sli4_unreg_all_rpis(vport
);
4620 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4622 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
4624 mbox
->vport
= vport
;
4625 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4626 mbox
->context1
= NULL
;
4627 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4628 if (rc
!= MBX_TIMEOUT
)
4629 mempool_free(mbox
, phba
->mbox_mem_pool
);
4631 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4632 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4633 "1836 Could not issue "
4634 "unreg_login(all_rpis) status %d\n", rc
);
4639 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4641 struct lpfc_hba
*phba
= vport
->phba
;
4645 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4647 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
4649 mbox
->vport
= vport
;
4650 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4651 mbox
->context1
= NULL
;
4652 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4653 if (rc
!= MBX_TIMEOUT
)
4654 mempool_free(mbox
, phba
->mbox_mem_pool
);
4656 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4657 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4658 "1815 Could not issue "
4659 "unreg_did (default rpis) status %d\n",
4665 * Free resources associated with LPFC_NODELIST entry
4666 * so it can be freed.
4669 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4671 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4672 struct lpfc_hba
*phba
= vport
->phba
;
4673 LPFC_MBOXQ_t
*mb
, *nextmb
;
4674 struct lpfc_dmabuf
*mp
;
4676 /* Cleanup node for NPort <nlp_DID> */
4677 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4678 "0900 Cleanup node for NPort x%x "
4679 "Data: x%x x%x x%x\n",
4680 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4681 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4682 if (NLP_CHK_FREE_REQ(ndlp
)) {
4683 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4684 "0280 lpfc_cleanup_node: ndlp:x%p "
4685 "usgmap:x%x refcnt:%d\n",
4686 (void *)ndlp
, ndlp
->nlp_usg_map
,
4687 atomic_read(&ndlp
->kref
.refcount
));
4688 lpfc_dequeue_node(vport
, ndlp
);
4690 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4691 "0281 lpfc_cleanup_node: ndlp:x%p "
4692 "usgmap:x%x refcnt:%d\n",
4693 (void *)ndlp
, ndlp
->nlp_usg_map
,
4694 atomic_read(&ndlp
->kref
.refcount
));
4695 lpfc_disable_node(vport
, ndlp
);
4699 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4701 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4702 if ((mb
= phba
->sli
.mbox_active
)) {
4703 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4704 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4705 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4706 mb
->context2
= NULL
;
4707 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4711 spin_lock_irq(&phba
->hbalock
);
4712 /* Cleanup REG_LOGIN completions which are not yet processed */
4713 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
4714 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
4715 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) ||
4716 (ndlp
!= (struct lpfc_nodelist
*) mb
->context2
))
4719 mb
->context2
= NULL
;
4720 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4723 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
4724 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4725 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4726 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4727 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
4729 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4732 list_del(&mb
->list
);
4733 mempool_free(mb
, phba
->mbox_mem_pool
);
4734 /* We shall not invoke the lpfc_nlp_put to decrement
4735 * the ndlp reference count as we are in the process
4736 * of lpfc_nlp_release.
4740 spin_unlock_irq(&phba
->hbalock
);
4742 lpfc_els_abort(phba
, ndlp
);
4744 spin_lock_irq(shost
->host_lock
);
4745 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4746 spin_unlock_irq(shost
->host_lock
);
4748 ndlp
->nlp_last_elscmd
= 0;
4749 del_timer_sync(&ndlp
->nlp_delayfunc
);
4751 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4752 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4753 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4754 lpfc_unreg_rpi(vport
, ndlp
);
4760 * Check to see if we can free the nlp back to the freelist.
4761 * If we are in the middle of using the nlp in the discovery state
4762 * machine, defer the free till we reach the end of the state machine.
4765 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4767 struct lpfc_hba
*phba
= vport
->phba
;
4768 struct lpfc_rport_data
*rdata
;
4769 struct fc_rport
*rport
;
4773 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4774 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4775 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4776 !(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)) {
4777 /* For this case we need to cleanup the default rpi
4778 * allocated by the firmware.
4780 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4781 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
4782 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4783 atomic_read(&ndlp
->kref
.refcount
),
4784 ndlp
->nlp_usg_map
, ndlp
);
4785 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4787 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4788 (uint8_t *) &vport
->fc_sparam
, mbox
, ndlp
->nlp_rpi
);
4790 mempool_free(mbox
, phba
->mbox_mem_pool
);
4793 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4794 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4795 mbox
->vport
= vport
;
4796 mbox
->context2
= ndlp
;
4797 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4798 if (rc
== MBX_NOT_FINISHED
) {
4799 mempool_free(mbox
, phba
->mbox_mem_pool
);
4804 lpfc_cleanup_node(vport
, ndlp
);
4807 * ndlp->rport must be set to NULL before it reaches here
4808 * i.e. break rport/node link before doing lpfc_nlp_put for
4809 * registered rport and then drop the reference of rport.
4813 * extra lpfc_nlp_put dropped the reference of ndlp
4814 * for registered rport so need to cleanup rport
4816 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4817 "0940 removed node x%p DID x%x "
4818 " rport not null %p\n",
4819 ndlp
, ndlp
->nlp_DID
, ndlp
->rport
);
4820 rport
= ndlp
->rport
;
4821 rdata
= rport
->dd_data
;
4822 rdata
->pnode
= NULL
;
4824 put_device(&rport
->dev
);
4829 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4832 D_ID mydid
, ndlpdid
, matchdid
;
4834 if (did
== Bcast_DID
)
4837 /* First check for Direct match */
4838 if (ndlp
->nlp_DID
== did
)
4841 /* Next check for area/domain identically equals 0 match */
4842 mydid
.un
.word
= vport
->fc_myDID
;
4843 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
4847 matchdid
.un
.word
= did
;
4848 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
4849 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
4850 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
4851 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
4852 /* This code is supposed to match the ID
4853 * for a private loop device that is
4854 * connect to fl_port. But we need to
4855 * check that the port did not just go
4856 * from pt2pt to fabric or we could end
4857 * up matching ndlp->nlp_DID 000001 to
4858 * fabric DID 0x20101
4860 if ((ndlpdid
.un
.b
.domain
== 0) &&
4861 (ndlpdid
.un
.b
.area
== 0)) {
4862 if (ndlpdid
.un
.b
.id
&&
4863 vport
->phba
->fc_topology
==
4870 matchdid
.un
.word
= ndlp
->nlp_DID
;
4871 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
4872 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
4873 if ((matchdid
.un
.b
.domain
== 0) &&
4874 (matchdid
.un
.b
.area
== 0)) {
4875 if (matchdid
.un
.b
.id
)
4883 /* Search for a nodelist entry */
4884 static struct lpfc_nodelist
*
4885 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4887 struct lpfc_nodelist
*ndlp
;
4890 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4891 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4892 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4893 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4894 ((uint32_t) ndlp
->nlp_type
<< 8) |
4895 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4896 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4897 "0929 FIND node DID "
4898 "Data: x%p x%x x%x x%x %p\n",
4899 ndlp
, ndlp
->nlp_DID
,
4900 ndlp
->nlp_flag
, data1
,
4901 ndlp
->active_rrqs_xri_bitmap
);
4906 /* FIND node did <did> NOT FOUND */
4907 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4908 "0932 FIND node did x%x NOT FOUND.\n", did
);
4912 struct lpfc_nodelist
*
4913 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4915 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4916 struct lpfc_nodelist
*ndlp
;
4917 unsigned long iflags
;
4919 spin_lock_irqsave(shost
->host_lock
, iflags
);
4920 ndlp
= __lpfc_findnode_did(vport
, did
);
4921 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4925 struct lpfc_nodelist
*
4926 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4928 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4929 struct lpfc_nodelist
*ndlp
;
4931 ndlp
= lpfc_findnode_did(vport
, did
);
4933 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4934 lpfc_rscn_payload_check(vport
, did
) == 0)
4936 ndlp
= (struct lpfc_nodelist
*)
4937 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4940 lpfc_nlp_init(vport
, ndlp
, did
);
4941 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4942 spin_lock_irq(shost
->host_lock
);
4943 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4944 spin_unlock_irq(shost
->host_lock
);
4946 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4947 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4950 spin_lock_irq(shost
->host_lock
);
4951 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4952 spin_unlock_irq(shost
->host_lock
);
4956 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4957 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4958 if (lpfc_rscn_payload_check(vport
, did
)) {
4959 /* If we've already received a PLOGI from this NPort
4960 * we don't need to try to discover it again.
4962 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4965 /* Since this node is marked for discovery,
4966 * delay timeout is not needed.
4968 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4969 spin_lock_irq(shost
->host_lock
);
4970 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4971 spin_unlock_irq(shost
->host_lock
);
4975 /* If we've already received a PLOGI from this NPort,
4976 * or we are already in the process of discovery on it,
4977 * we don't need to try to discover it again.
4979 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4980 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4981 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4983 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4984 spin_lock_irq(shost
->host_lock
);
4985 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4986 spin_unlock_irq(shost
->host_lock
);
4991 /* Build a list of nodes to discover based on the loopmap */
4993 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4995 struct lpfc_hba
*phba
= vport
->phba
;
4997 uint32_t alpa
, index
;
4999 if (!lpfc_is_link_up(phba
))
5002 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
5005 /* Check for loop map present or not */
5006 if (phba
->alpa_map
[0]) {
5007 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
5008 alpa
= phba
->alpa_map
[j
];
5009 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
5011 lpfc_setup_disc_node(vport
, alpa
);
5014 /* No alpamap, so try all alpa's */
5015 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
5016 /* If cfg_scan_down is set, start from highest
5017 * ALPA (0xef) to lowest (0x1).
5019 if (vport
->cfg_scan_down
)
5022 index
= FC_MAXLOOP
- j
- 1;
5023 alpa
= lpfcAlpaArray
[index
];
5024 if ((vport
->fc_myDID
& 0xff) == alpa
)
5026 lpfc_setup_disc_node(vport
, alpa
);
5033 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5036 struct lpfc_sli
*psli
= &phba
->sli
;
5037 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
5038 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
5039 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
5043 * if it's not a physical port or if we already send
5044 * clear_la then don't send it.
5046 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
5047 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
5048 (phba
->sli_rev
== LPFC_SLI_REV4
))
5051 /* Link up discovery */
5052 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
5053 phba
->link_state
= LPFC_CLEAR_LA
;
5054 lpfc_clear_la(phba
, mbox
);
5055 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
5056 mbox
->vport
= vport
;
5057 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5058 if (rc
== MBX_NOT_FINISHED
) {
5059 mempool_free(mbox
, phba
->mbox_mem_pool
);
5060 lpfc_disc_flush_list(vport
);
5061 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5062 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5063 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5064 phba
->link_state
= LPFC_HBA_ERROR
;
5069 /* Reg_vpi to tell firmware to resume normal operations */
5071 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5073 LPFC_MBOXQ_t
*regvpimbox
;
5075 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5077 lpfc_reg_vpi(vport
, regvpimbox
);
5078 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
5079 regvpimbox
->vport
= vport
;
5080 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
5081 == MBX_NOT_FINISHED
) {
5082 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
5087 /* Start Link up / RSCN discovery on NPR nodes */
5089 lpfc_disc_start(struct lpfc_vport
*vport
)
5091 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5092 struct lpfc_hba
*phba
= vport
->phba
;
5094 uint32_t clear_la_pending
;
5096 if (!lpfc_is_link_up(phba
)) {
5097 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5098 "3315 Link is not up %x\n",
5103 if (phba
->link_state
== LPFC_CLEAR_LA
)
5104 clear_la_pending
= 1;
5106 clear_la_pending
= 0;
5108 if (vport
->port_state
< LPFC_VPORT_READY
)
5109 vport
->port_state
= LPFC_DISC_AUTH
;
5111 lpfc_set_disctmo(vport
);
5113 vport
->fc_prevDID
= vport
->fc_myDID
;
5114 vport
->num_disc_nodes
= 0;
5116 /* Start Discovery state <hba_state> */
5117 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5118 "0202 Start Discovery hba state x%x "
5119 "Data: x%x x%x x%x\n",
5120 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
5121 vport
->fc_adisc_cnt
);
5123 /* First do ADISCs - if any */
5124 num_sent
= lpfc_els_disc_adisc(vport
);
5129 /* Register the VPI for SLI3, NPIV only. */
5130 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
5131 !(vport
->fc_flag
& FC_PT2PT
) &&
5132 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
5133 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
5134 lpfc_issue_clear_la(phba
, vport
);
5135 lpfc_issue_reg_vpi(phba
, vport
);
5140 * For SLI2, we need to set port_state to READY and continue
5143 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
5144 /* If we get here, there is nothing to ADISC */
5145 lpfc_issue_clear_la(phba
, vport
);
5147 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
5148 vport
->num_disc_nodes
= 0;
5149 /* go thru NPR nodes and issue ELS PLOGIs */
5150 if (vport
->fc_npr_cnt
)
5151 lpfc_els_disc_plogi(vport
);
5153 if (!vport
->num_disc_nodes
) {
5154 spin_lock_irq(shost
->host_lock
);
5155 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
5156 spin_unlock_irq(shost
->host_lock
);
5157 lpfc_can_disctmo(vport
);
5160 vport
->port_state
= LPFC_VPORT_READY
;
5162 /* Next do PLOGIs - if any */
5163 num_sent
= lpfc_els_disc_plogi(vport
);
5168 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5169 /* Check to see if more RSCNs came in while we
5170 * were processing this one.
5172 if ((vport
->fc_rscn_id_cnt
== 0) &&
5173 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
5174 spin_lock_irq(shost
->host_lock
);
5175 vport
->fc_flag
&= ~FC_RSCN_MODE
;
5176 spin_unlock_irq(shost
->host_lock
);
5177 lpfc_can_disctmo(vport
);
5179 lpfc_els_handle_rscn(vport
);
5186 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5187 * ring the match the sppecified nodelist.
5190 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5192 LIST_HEAD(completions
);
5193 struct lpfc_sli
*psli
;
5195 struct lpfc_iocbq
*iocb
, *next_iocb
;
5196 struct lpfc_sli_ring
*pring
;
5199 pring
= &psli
->ring
[LPFC_ELS_RING
];
5201 /* Error matching iocb on txq or txcmplq
5202 * First check the txq.
5204 spin_lock_irq(&phba
->hbalock
);
5205 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5206 if (iocb
->context1
!= ndlp
) {
5210 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
5211 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
5213 list_move_tail(&iocb
->list
, &completions
);
5217 /* Next check the txcmplq */
5218 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
5219 if (iocb
->context1
!= ndlp
) {
5223 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
5224 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
5225 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
5228 spin_unlock_irq(&phba
->hbalock
);
5230 /* Cancel all the IOCBs from the completions list */
5231 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5236 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
5238 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5239 struct lpfc_hba
*phba
= vport
->phba
;
5241 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
5242 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5244 if (!NLP_CHK_NODE_ACT(ndlp
))
5246 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5247 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
5248 lpfc_free_tx(phba
, ndlp
);
5255 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
5257 lpfc_els_flush_rscn(vport
);
5258 lpfc_els_flush_cmd(vport
);
5259 lpfc_disc_flush_list(vport
);
5262 /*****************************************************************************/
5264 * NAME: lpfc_disc_timeout
5266 * FUNCTION: Fibre Channel driver discovery timeout routine.
5268 * EXECUTION ENVIRONMENT: interrupt only
5276 /*****************************************************************************/
5278 lpfc_disc_timeout(unsigned long ptr
)
5280 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
5281 struct lpfc_hba
*phba
= vport
->phba
;
5282 uint32_t tmo_posted
;
5283 unsigned long flags
= 0;
5285 if (unlikely(!phba
))
5288 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
5289 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
5291 vport
->work_port_events
|= WORKER_DISC_TMO
;
5292 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
5295 lpfc_worker_wake_up(phba
);
5300 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
5302 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5303 struct lpfc_hba
*phba
= vport
->phba
;
5304 struct lpfc_sli
*psli
= &phba
->sli
;
5305 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5306 LPFC_MBOXQ_t
*initlinkmbox
;
5307 int rc
, clrlaerr
= 0;
5309 if (!(vport
->fc_flag
& FC_DISC_TMO
))
5312 spin_lock_irq(shost
->host_lock
);
5313 vport
->fc_flag
&= ~FC_DISC_TMO
;
5314 spin_unlock_irq(shost
->host_lock
);
5316 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5317 "disc timeout: state:x%x rtry:x%x flg:x%x",
5318 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5320 switch (vport
->port_state
) {
5322 case LPFC_LOCAL_CFG_LINK
:
5323 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
5327 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
5328 "0221 FAN timeout\n");
5329 /* Start discovery by sending FLOGI, clean up old rpis */
5330 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5332 if (!NLP_CHK_NODE_ACT(ndlp
))
5334 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
5336 if (ndlp
->nlp_type
& NLP_FABRIC
) {
5337 /* Clean up the ndlp on Fabric connections */
5338 lpfc_drop_node(vport
, ndlp
);
5340 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
5341 /* Fail outstanding IO now since device
5342 * is marked for PLOGI.
5344 lpfc_unreg_rpi(vport
, ndlp
);
5347 if (vport
->port_state
!= LPFC_FLOGI
) {
5348 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
5349 lpfc_initial_flogi(vport
);
5351 lpfc_issue_init_vfi(vport
);
5358 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5359 /* Initial FLOGI timeout */
5360 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5361 "0222 Initial %s timeout\n",
5362 vport
->vpi
? "FDISC" : "FLOGI");
5364 /* Assume no Fabric and go on with discovery.
5365 * Check for outstanding ELS FLOGI to abort.
5368 /* FLOGI failed, so just use loop map to make discovery list */
5369 lpfc_disc_list_loopmap(vport
);
5371 /* Start discovery */
5372 lpfc_disc_start(vport
);
5375 case LPFC_FABRIC_CFG_LINK
:
5376 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5378 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5379 "0223 Timeout while waiting for "
5380 "NameServer login\n");
5381 /* Next look for NameServer ndlp */
5382 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
5383 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5384 lpfc_els_abort(phba
, ndlp
);
5386 /* ReStart discovery */
5390 /* Check for wait for NameServer Rsp timeout */
5391 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5392 "0224 NameServer Query timeout "
5394 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5396 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
5397 /* Try it one more time */
5398 vport
->fc_ns_retry
++;
5399 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
5400 vport
->fc_ns_retry
, 0);
5404 vport
->fc_ns_retry
= 0;
5408 * Discovery is over.
5409 * set port_state to PORT_READY if SLI2.
5410 * cmpl_reg_vpi will set port_state to READY for SLI3.
5412 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5413 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5414 lpfc_issue_reg_vpi(phba
, vport
);
5416 lpfc_issue_clear_la(phba
, vport
);
5417 vport
->port_state
= LPFC_VPORT_READY
;
5421 /* Setup and issue mailbox INITIALIZE LINK command */
5422 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5423 if (!initlinkmbox
) {
5424 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5425 "0206 Device Discovery "
5426 "completion error\n");
5427 phba
->link_state
= LPFC_HBA_ERROR
;
5431 lpfc_linkdown(phba
);
5432 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
5433 phba
->cfg_link_speed
);
5434 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
5435 initlinkmbox
->vport
= vport
;
5436 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5437 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
5438 lpfc_set_loopback_flag(phba
);
5439 if (rc
== MBX_NOT_FINISHED
)
5440 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
5444 case LPFC_DISC_AUTH
:
5445 /* Node Authentication timeout */
5446 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5447 "0227 Node Authentication timeout\n");
5448 lpfc_disc_flush_list(vport
);
5451 * set port_state to PORT_READY if SLI2.
5452 * cmpl_reg_vpi will set port_state to READY for SLI3.
5454 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5455 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5456 lpfc_issue_reg_vpi(phba
, vport
);
5457 else { /* NPIV Not enabled */
5458 lpfc_issue_clear_la(phba
, vport
);
5459 vport
->port_state
= LPFC_VPORT_READY
;
5464 case LPFC_VPORT_READY
:
5465 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5466 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5467 "0231 RSCN timeout Data: x%x "
5469 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5471 /* Cleanup any outstanding ELS commands */
5472 lpfc_els_flush_cmd(vport
);
5474 lpfc_els_flush_rscn(vport
);
5475 lpfc_disc_flush_list(vport
);
5480 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5481 "0273 Unexpected discovery timeout, "
5482 "vport State x%x\n", vport
->port_state
);
5486 switch (phba
->link_state
) {
5488 /* CLEAR LA timeout */
5489 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5490 "0228 CLEAR LA timeout\n");
5495 lpfc_issue_clear_la(phba
, vport
);
5497 case LPFC_LINK_UNKNOWN
:
5498 case LPFC_WARM_START
:
5499 case LPFC_INIT_START
:
5500 case LPFC_INIT_MBX_CMDS
:
5501 case LPFC_LINK_DOWN
:
5502 case LPFC_HBA_ERROR
:
5503 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5504 "0230 Unexpected timeout, hba link "
5505 "state x%x\n", phba
->link_state
);
5509 case LPFC_HBA_READY
:
5514 lpfc_disc_flush_list(vport
);
5515 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5516 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5517 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5518 vport
->port_state
= LPFC_VPORT_READY
;
5525 * This routine handles processing a NameServer REG_LOGIN mailbox
5526 * command upon completion. It is setup in the LPFC_MBOXQ
5527 * as the completion routine when the command is
5528 * handed off to the SLI layer.
5531 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5533 MAILBOX_t
*mb
= &pmb
->u
.mb
;
5534 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
5535 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5536 struct lpfc_vport
*vport
= pmb
->vport
;
5538 pmb
->context1
= NULL
;
5539 pmb
->context2
= NULL
;
5541 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5542 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
5543 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
5544 ndlp
->nlp_type
|= NLP_FABRIC
;
5545 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
5546 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5547 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5548 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5549 atomic_read(&ndlp
->kref
.refcount
),
5550 ndlp
->nlp_usg_map
, ndlp
);
5552 * Start issuing Fabric-Device Management Interface (FDMI) command to
5553 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
5554 * fdmi-on=2 (supporting RPA/hostnmae)
5557 if (vport
->cfg_fdmi_on
& LPFC_FDMI_REG_DELAY
)
5558 mod_timer(&vport
->fc_fdmitmo
,
5559 jiffies
+ msecs_to_jiffies(1000 * 60));
5561 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
5563 /* decrement the node reference count held for this callback
5567 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5569 mempool_free(pmb
, phba
->mbox_mem_pool
);
5575 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5577 uint16_t *rpi
= param
;
5579 /* check for active node */
5580 if (!NLP_CHK_NODE_ACT(ndlp
))
5583 return ndlp
->nlp_rpi
== *rpi
;
5587 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5589 return memcmp(&ndlp
->nlp_portname
, param
,
5590 sizeof(ndlp
->nlp_portname
)) == 0;
5593 static struct lpfc_nodelist
*
5594 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5596 struct lpfc_nodelist
*ndlp
;
5598 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5599 if (filter(ndlp
, param
)) {
5600 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5601 "3185 FIND node filter %p DID "
5602 "Data: x%p x%x x%x\n",
5603 filter
, ndlp
, ndlp
->nlp_DID
,
5608 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5609 "3186 FIND node filter %p NOT FOUND.\n", filter
);
5614 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5615 * returns the node list element pointer else return NULL.
5617 struct lpfc_nodelist
*
5618 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5620 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5624 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5625 * returns the node element list pointer else return NULL.
5627 struct lpfc_nodelist
*
5628 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5630 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5631 struct lpfc_nodelist
*ndlp
;
5633 spin_lock_irq(shost
->host_lock
);
5634 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
5635 spin_unlock_irq(shost
->host_lock
);
5640 * This routine looks up the ndlp lists for the given RPI. If the rpi
5641 * is found, the routine returns the node element list pointer else
5644 struct lpfc_nodelist
*
5645 lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5647 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5648 struct lpfc_nodelist
*ndlp
;
5650 spin_lock_irq(shost
->host_lock
);
5651 ndlp
= __lpfc_findnode_rpi(vport
, rpi
);
5652 spin_unlock_irq(shost
->host_lock
);
5657 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5658 * @phba: pointer to lpfc hba data structure.
5659 * @vpi: the physical host virtual N_Port identifier.
5661 * This routine finds a vport on a HBA (referred by @phba) through a
5662 * @vpi. The function walks the HBA's vport list and returns the address
5663 * of the vport with the matching @vpi.
5666 * NULL - No vport with the matching @vpi found
5667 * Otherwise - Address to the vport with the matching @vpi.
5670 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
5672 struct lpfc_vport
*vport
;
5673 unsigned long flags
;
5676 /* The physical ports are always vpi 0 - translate is unnecessary. */
5679 * Translate the physical vpi to the logical vpi. The
5680 * vport stores the logical vpi.
5682 for (i
= 0; i
< phba
->max_vpi
; i
++) {
5683 if (vpi
== phba
->vpi_ids
[i
])
5687 if (i
>= phba
->max_vpi
) {
5688 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
5689 "2936 Could not find Vport mapped "
5690 "to vpi %d\n", vpi
);
5695 spin_lock_irqsave(&phba
->hbalock
, flags
);
5696 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
5697 if (vport
->vpi
== i
) {
5698 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5702 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5707 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5710 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
5712 lpfc_initialize_node(vport
, ndlp
, did
);
5713 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
5714 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
5715 ndlp
->nlp_rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
5716 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5717 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
5718 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5720 atomic_read(&ndlp
->kref
.refcount
),
5721 ndlp
->nlp_usg_map
, ndlp
);
5723 ndlp
->active_rrqs_xri_bitmap
=
5724 mempool_alloc(vport
->phba
->active_rrq_pool
,
5726 if (ndlp
->active_rrqs_xri_bitmap
)
5727 memset(ndlp
->active_rrqs_xri_bitmap
, 0,
5728 ndlp
->phba
->cfg_rrq_xri_bitmap_sz
);
5733 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
5734 "node init: did:x%x",
5735 ndlp
->nlp_DID
, 0, 0);
5740 /* This routine releases all resources associated with a specifc NPort's ndlp
5741 * and mempool_free's the nodelist.
5744 lpfc_nlp_release(struct kref
*kref
)
5746 struct lpfc_hba
*phba
;
5747 unsigned long flags
;
5748 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
5751 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5752 "node release: did:x%x flg:x%x type:x%x",
5753 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
5755 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
5756 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5757 "usgmap:x%x refcnt:%d rpi:%x\n",
5758 (void *)ndlp
, ndlp
->nlp_DID
, ndlp
->nlp_usg_map
,
5759 atomic_read(&ndlp
->kref
.refcount
), ndlp
->nlp_rpi
);
5761 /* remove ndlp from action. */
5762 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
5764 /* clear the ndlp active flag for all release cases */
5766 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5767 NLP_CLR_NODE_ACT(ndlp
);
5768 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5769 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5770 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
5772 /* free ndlp memory for final ndlp release */
5773 if (NLP_CHK_FREE_REQ(ndlp
)) {
5774 kfree(ndlp
->lat_data
);
5775 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5776 mempool_free(ndlp
->active_rrqs_xri_bitmap
,
5777 ndlp
->phba
->active_rrq_pool
);
5778 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
5782 /* This routine bumps the reference count for a ndlp structure to ensure
5783 * that one discovery thread won't free a ndlp while another discovery thread
5786 struct lpfc_nodelist
*
5787 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
5789 struct lpfc_hba
*phba
;
5790 unsigned long flags
;
5793 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5794 "node get: did:x%x flg:x%x refcnt:x%x",
5795 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5796 atomic_read(&ndlp
->kref
.refcount
));
5797 /* The check of ndlp usage to prevent incrementing the
5798 * ndlp reference count that is in the process of being
5802 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5803 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
5804 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5805 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5806 "0276 lpfc_nlp_get: ndlp:x%p "
5807 "usgmap:x%x refcnt:%d\n",
5808 (void *)ndlp
, ndlp
->nlp_usg_map
,
5809 atomic_read(&ndlp
->kref
.refcount
));
5812 kref_get(&ndlp
->kref
);
5813 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5818 /* This routine decrements the reference count for a ndlp structure. If the
5819 * count goes to 0, this indicates the the associated nodelist should be
5820 * freed. Returning 1 indicates the ndlp resource has been released; on the
5821 * other hand, returning 0 indicates the ndlp resource has not been released
5825 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
5827 struct lpfc_hba
*phba
;
5828 unsigned long flags
;
5833 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5834 "node put: did:x%x flg:x%x refcnt:x%x",
5835 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5836 atomic_read(&ndlp
->kref
.refcount
));
5838 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5839 /* Check the ndlp memory free acknowledge flag to avoid the
5840 * possible race condition that kref_put got invoked again
5841 * after previous one has done ndlp memory free.
5843 if (NLP_CHK_FREE_ACK(ndlp
)) {
5844 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5845 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5846 "0274 lpfc_nlp_put: ndlp:x%p "
5847 "usgmap:x%x refcnt:%d\n",
5848 (void *)ndlp
, ndlp
->nlp_usg_map
,
5849 atomic_read(&ndlp
->kref
.refcount
));
5852 /* Check the ndlp inactivate log flag to avoid the possible
5853 * race condition that kref_put got invoked again after ndlp
5854 * is already in inactivating state.
5856 if (NLP_CHK_IACT_REQ(ndlp
)) {
5857 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5858 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5859 "0275 lpfc_nlp_put: ndlp:x%p "
5860 "usgmap:x%x refcnt:%d\n",
5861 (void *)ndlp
, ndlp
->nlp_usg_map
,
5862 atomic_read(&ndlp
->kref
.refcount
));
5865 /* For last put, mark the ndlp usage flags to make sure no
5866 * other kref_get and kref_put on the same ndlp shall get
5867 * in between the process when the final kref_put has been
5868 * invoked on this ndlp.
5870 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
5871 /* Indicate ndlp is put to inactive state. */
5872 NLP_SET_IACT_REQ(ndlp
);
5873 /* Acknowledge ndlp memory free has been seen. */
5874 if (NLP_CHK_FREE_REQ(ndlp
))
5875 NLP_SET_FREE_ACK(ndlp
);
5877 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5878 /* Note, the kref_put returns 1 when decrementing a reference
5879 * count that was 1, it invokes the release callback function,
5880 * but it still left the reference count as 1 (not actually
5881 * performs the last decrementation). Otherwise, it actually
5882 * decrements the reference count and returns 0.
5884 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
5887 /* This routine free's the specified nodelist if it is not in use
5888 * by any other discovery thread. This routine returns 1 if the
5889 * ndlp has been freed. A return value of 0 indicates the ndlp is
5890 * not yet been released.
5893 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
5895 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5896 "node not used: did:x%x flg:x%x refcnt:x%x",
5897 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5898 atomic_read(&ndlp
->kref
.refcount
));
5899 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
5900 if (lpfc_nlp_put(ndlp
))
5906 * lpfc_fcf_inuse - Check if FCF can be unregistered.
5907 * @phba: Pointer to hba context object.
5909 * This function iterate through all FC nodes associated
5910 * will all vports to check if there is any node with
5911 * fc_rports associated with it. If there is an fc_rport
5912 * associated with the node, then the node is either in
5913 * discovered state or its devloss_timer is pending.
5916 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
5918 struct lpfc_vport
**vports
;
5920 struct lpfc_nodelist
*ndlp
;
5921 struct Scsi_Host
*shost
;
5923 vports
= lpfc_create_vport_work_array(phba
);
5925 /* If driver cannot allocate memory, indicate fcf is in use */
5929 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5930 shost
= lpfc_shost_from_vport(vports
[i
]);
5931 spin_lock_irq(shost
->host_lock
);
5933 * IF the CVL_RCVD bit is not set then we have sent the
5935 * If dev_loss fires while we are waiting we do not want to
5938 if (!(vports
[i
]->fc_flag
& FC_VPORT_CVL_RCVD
)) {
5939 spin_unlock_irq(shost
->host_lock
);
5943 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
5944 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
5945 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
5947 spin_unlock_irq(shost
->host_lock
);
5949 } else if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
5951 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
5952 "2624 RPI %x DID %x flag %x "
5953 "still logged in\n",
5954 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5958 spin_unlock_irq(shost
->host_lock
);
5961 lpfc_destroy_vport_work_array(phba
, vports
);
5966 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
5967 * @phba: Pointer to hba context object.
5968 * @mboxq: Pointer to mailbox object.
5970 * This function frees memory associated with the mailbox command.
5973 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5975 struct lpfc_vport
*vport
= mboxq
->vport
;
5976 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5978 if (mboxq
->u
.mb
.mbxStatus
) {
5979 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5980 "2555 UNREG_VFI mbxStatus error x%x "
5982 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5984 spin_lock_irq(shost
->host_lock
);
5985 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5986 spin_unlock_irq(shost
->host_lock
);
5987 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5992 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
5993 * @phba: Pointer to hba context object.
5994 * @mboxq: Pointer to mailbox object.
5996 * This function frees memory associated with the mailbox command.
5999 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6001 struct lpfc_vport
*vport
= mboxq
->vport
;
6003 if (mboxq
->u
.mb
.mbxStatus
) {
6004 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6005 "2550 UNREG_FCFI mbxStatus error x%x "
6007 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6009 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6014 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6015 * @phba: Pointer to hba context object.
6017 * This function prepare the HBA for unregistering the currently registered
6018 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6022 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
6024 struct lpfc_vport
**vports
;
6025 struct lpfc_nodelist
*ndlp
;
6026 struct Scsi_Host
*shost
;
6029 /* Unregister RPIs */
6030 if (lpfc_fcf_inuse(phba
))
6031 lpfc_unreg_hba_rpis(phba
);
6033 /* At this point, all discovery is aborted */
6034 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
6036 /* Unregister VPIs */
6037 vports
= lpfc_create_vport_work_array(phba
);
6038 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
6039 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6040 /* Stop FLOGI/FDISC retries */
6041 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
6043 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
6044 lpfc_cleanup_pending_mbox(vports
[i
]);
6045 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6046 lpfc_sli4_unreg_all_rpis(vports
[i
]);
6047 lpfc_mbx_unreg_vpi(vports
[i
]);
6048 shost
= lpfc_shost_from_vport(vports
[i
]);
6049 spin_lock_irq(shost
->host_lock
);
6050 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6051 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6052 spin_unlock_irq(shost
->host_lock
);
6054 lpfc_destroy_vport_work_array(phba
, vports
);
6055 if (i
== 0 && (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))) {
6056 ndlp
= lpfc_findnode_did(phba
->pport
, Fabric_DID
);
6058 lpfc_cancel_retry_delay_tmo(phba
->pport
, ndlp
);
6059 lpfc_cleanup_pending_mbox(phba
->pport
);
6060 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6061 lpfc_sli4_unreg_all_rpis(phba
->pport
);
6062 lpfc_mbx_unreg_vpi(phba
->pport
);
6063 shost
= lpfc_shost_from_vport(phba
->pport
);
6064 spin_lock_irq(shost
->host_lock
);
6065 phba
->pport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6066 phba
->pport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6067 spin_unlock_irq(shost
->host_lock
);
6070 /* Cleanup any outstanding ELS commands */
6071 lpfc_els_flush_all_cmd(phba
);
6073 /* Unregister the physical port VFI */
6074 rc
= lpfc_issue_unreg_vfi(phba
->pport
);
6079 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6080 * @phba: Pointer to hba context object.
6082 * This function issues synchronous unregister FCF mailbox command to HBA to
6083 * unregister the currently registered FCF record. The driver does not reset
6084 * the driver FCF usage state flags.
6086 * Return 0 if successfully issued, none-zero otherwise.
6089 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
6094 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6096 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6097 "2551 UNREG_FCFI mbox allocation failed"
6098 "HBA state x%x\n", phba
->pport
->port_state
);
6101 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
6102 mbox
->vport
= phba
->pport
;
6103 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
6104 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
6106 if (rc
== MBX_NOT_FINISHED
) {
6107 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6108 "2552 Unregister FCFI command failed rc x%x "
6110 rc
, phba
->pport
->port_state
);
6117 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6118 * @phba: Pointer to hba context object.
6120 * This function unregisters the currently reigstered FCF. This function
6121 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6124 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
6128 /* Preparation for unregistering fcf */
6129 rc
= lpfc_unregister_fcf_prep(phba
);
6131 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6132 "2748 Failed to prepare for unregistering "
6133 "HBA's FCF record: rc=%d\n", rc
);
6137 /* Now, unregister FCF record and reset HBA FCF state */
6138 rc
= lpfc_sli4_unregister_fcf(phba
);
6141 /* Reset HBA FCF states after successful unregister FCF */
6142 phba
->fcf
.fcf_flag
= 0;
6143 phba
->fcf
.current_rec
.flag
= 0;
6146 * If driver is not unloading, check if there is any other
6147 * FCF record that can be used for discovery.
6149 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
6150 (phba
->link_state
< LPFC_LINK_UP
))
6153 /* This is considered as the initial FCF discovery scan */
6154 spin_lock_irq(&phba
->hbalock
);
6155 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
6156 spin_unlock_irq(&phba
->hbalock
);
6158 /* Reset FCF roundrobin bmask for new discovery */
6159 lpfc_sli4_clear_fcf_rr_bmask(phba
);
6161 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
6164 spin_lock_irq(&phba
->hbalock
);
6165 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
6166 spin_unlock_irq(&phba
->hbalock
);
6167 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6168 "2553 lpfc_unregister_unused_fcf failed "
6169 "to read FCF record HBA state x%x\n",
6170 phba
->pport
->port_state
);
6175 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6176 * @phba: Pointer to hba context object.
6178 * This function just unregisters the currently reigstered FCF. It does not
6179 * try to find another FCF for discovery.
6182 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
6186 /* Preparation for unregistering fcf */
6187 rc
= lpfc_unregister_fcf_prep(phba
);
6189 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6190 "2749 Failed to prepare for unregistering "
6191 "HBA's FCF record: rc=%d\n", rc
);
6195 /* Now, unregister FCF record and reset HBA FCF state */
6196 rc
= lpfc_sli4_unregister_fcf(phba
);
6199 /* Set proper HBA FCF states after successful unregister FCF */
6200 spin_lock_irq(&phba
->hbalock
);
6201 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
6202 spin_unlock_irq(&phba
->hbalock
);
6206 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6207 * @phba: Pointer to hba context object.
6209 * This function check if there are any connected remote port for the FCF and
6210 * if all the devices are disconnected, this function unregister FCFI.
6211 * This function also tries to use another FCF for discovery.
6214 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
6217 * If HBA is not running in FIP mode, if HBA does not support
6218 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6219 * registered, do nothing.
6221 spin_lock_irq(&phba
->hbalock
);
6222 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) ||
6223 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
6224 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
6225 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
6226 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
6227 spin_unlock_irq(&phba
->hbalock
);
6230 spin_unlock_irq(&phba
->hbalock
);
6232 if (lpfc_fcf_inuse(phba
))
6235 lpfc_unregister_fcf_rescan(phba
);
6239 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6240 * @phba: Pointer to hba context object.
6241 * @buff: Buffer containing the FCF connection table as in the config
6243 * This function create driver data structure for the FCF connection
6244 * record table read from config region 23.
6247 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
6250 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
6251 struct lpfc_fcf_conn_hdr
*conn_hdr
;
6252 struct lpfc_fcf_conn_rec
*conn_rec
;
6253 uint32_t record_count
;
6256 /* Free the current connect table */
6257 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
6258 &phba
->fcf_conn_rec_list
, list
) {
6259 list_del_init(&conn_entry
->list
);
6263 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
6264 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
6265 sizeof(struct lpfc_fcf_conn_rec
);
6267 conn_rec
= (struct lpfc_fcf_conn_rec
*)
6268 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
6270 for (i
= 0; i
< record_count
; i
++) {
6271 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
6273 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
6276 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6277 "2566 Failed to allocate connection"
6282 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
6283 sizeof(struct lpfc_fcf_conn_rec
));
6284 list_add_tail(&conn_entry
->list
,
6285 &phba
->fcf_conn_rec_list
);
6288 if (!list_empty(&phba
->fcf_conn_rec_list
)) {
6290 list_for_each_entry(conn_entry
, &phba
->fcf_conn_rec_list
,
6292 conn_rec
= &conn_entry
->conn_rec
;
6293 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6294 "3345 FCF connection list rec[%02d]: "
6295 "flags:x%04x, vtag:x%04x, "
6296 "fabric_name:x%02x:%02x:%02x:%02x:"
6297 "%02x:%02x:%02x:%02x, "
6298 "switch_name:x%02x:%02x:%02x:%02x:"
6299 "%02x:%02x:%02x:%02x\n", i
++,
6300 conn_rec
->flags
, conn_rec
->vlan_tag
,
6301 conn_rec
->fabric_name
[0],
6302 conn_rec
->fabric_name
[1],
6303 conn_rec
->fabric_name
[2],
6304 conn_rec
->fabric_name
[3],
6305 conn_rec
->fabric_name
[4],
6306 conn_rec
->fabric_name
[5],
6307 conn_rec
->fabric_name
[6],
6308 conn_rec
->fabric_name
[7],
6309 conn_rec
->switch_name
[0],
6310 conn_rec
->switch_name
[1],
6311 conn_rec
->switch_name
[2],
6312 conn_rec
->switch_name
[3],
6313 conn_rec
->switch_name
[4],
6314 conn_rec
->switch_name
[5],
6315 conn_rec
->switch_name
[6],
6316 conn_rec
->switch_name
[7]);
6322 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6323 * @phba: Pointer to hba context object.
6324 * @buff: Buffer containing the FCoE parameter data structure.
6326 * This function update driver data structure with config
6327 * parameters read from config region 23.
6330 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
6333 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
6334 struct lpfc_fcoe_params
*fcoe_param
;
6336 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
6338 fcoe_param
= (struct lpfc_fcoe_params
*)
6339 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
6341 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
6342 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
6345 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
6346 phba
->valid_vlan
= 1;
6347 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
6351 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
6352 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
6353 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
6358 * lpfc_get_rec_conf23 - Get a record type in config region data.
6359 * @buff: Buffer containing config region 23 data.
6360 * @size: Size of the data buffer.
6361 * @rec_type: Record type to be searched.
6363 * This function searches config region data to find the beginning
6364 * of the record specified by record_type. If record found, this
6365 * function return pointer to the record else return NULL.
6368 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
6370 uint32_t offset
= 0, rec_length
;
6372 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
6373 (size
< sizeof(uint32_t)))
6376 rec_length
= buff
[offset
+ 1];
6379 * One TLV record has one word header and number of data words
6380 * specified in the rec_length field of the record header.
6382 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
6384 if (buff
[offset
] == rec_type
)
6385 return &buff
[offset
];
6387 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
6390 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
6391 rec_length
= buff
[offset
+ 1];
6397 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6398 * @phba: Pointer to lpfc_hba data structure.
6399 * @buff: Buffer containing config region 23 data.
6400 * @size: Size of the data buffer.
6402 * This function parses the FCoE config parameters in config region 23 and
6403 * populate driver data structure with the parameters.
6406 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
6410 uint32_t offset
= 0;
6414 * If data size is less than 2 words signature and version cannot be
6417 if (size
< 2*sizeof(uint32_t))
6420 /* Check the region signature first */
6421 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
6422 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6423 "2567 Config region 23 has bad signature\n");
6429 /* Check the data structure version */
6430 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
6431 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6432 "2568 Config region 23 has bad version\n");
6437 /* Read FCoE param record */
6438 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6439 size
- offset
, FCOE_PARAM_TYPE
);
6441 lpfc_read_fcoe_param(phba
, rec_ptr
);
6443 /* Read FCF connection table */
6444 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6445 size
- offset
, FCOE_CONN_TBL_TYPE
);
6447 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);