1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray
[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 static void lpfc_disc_timeout_handler(struct lpfc_hba
*);
60 lpfc_process_nodev_timeout(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
62 uint8_t *name
= (uint8_t *)&ndlp
->nlp_portname
;
65 spin_lock_irq(phba
->host
->host_lock
);
66 if (!(ndlp
->nlp_flag
& NLP_NODEV_TMO
)) {
67 spin_unlock_irq(phba
->host
->host_lock
);
72 * If a discovery event readded nodev_timer after timer
73 * firing and before processing the timer, cancel the
76 spin_unlock_irq(phba
->host
->host_lock
);
77 del_timer_sync(&ndlp
->nlp_tmofunc
);
78 spin_lock_irq(phba
->host
->host_lock
);
80 ndlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
82 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
84 /* flush the target */
85 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
86 ndlp
->nlp_sid
, 0, 0, LPFC_CTX_TGT
);
88 spin_unlock_irq(phba
->host
->host_lock
);
91 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
92 "%d:0203 Nodev timeout on "
93 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
94 "NPort x%x Data: x%x x%x x%x\n",
96 *name
, *(name
+1), *(name
+2), *(name
+3),
97 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
98 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
99 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
101 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
102 "%d:0204 Nodev timeout on "
103 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
104 "NPort x%x Data: x%x x%x x%x\n",
106 *name
, *(name
+1), *(name
+2), *(name
+3),
107 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
108 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
109 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
112 lpfc_disc_state_machine(phba
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
117 lpfc_work_list_done(struct lpfc_hba
* phba
)
119 struct lpfc_work_evt
*evtp
= NULL
;
120 struct lpfc_nodelist
*ndlp
;
123 spin_lock_irq(phba
->host
->host_lock
);
124 while(!list_empty(&phba
->work_list
)) {
125 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
127 spin_unlock_irq(phba
->host
->host_lock
);
130 case LPFC_EVT_NODEV_TMO
:
131 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
132 lpfc_process_nodev_timeout(phba
, ndlp
);
135 case LPFC_EVT_ELS_RETRY
:
136 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
137 lpfc_els_retry_delay_handler(ndlp
);
140 case LPFC_EVT_ONLINE
:
141 if (phba
->hba_state
< LPFC_LINK_DOWN
)
142 *(int *)(evtp
->evt_arg1
) = lpfc_online(phba
);
144 *(int *)(evtp
->evt_arg1
) = 0;
145 complete((struct completion
*)(evtp
->evt_arg2
));
147 case LPFC_EVT_OFFLINE
:
148 if (phba
->hba_state
>= LPFC_LINK_DOWN
)
150 lpfc_sli_brdrestart(phba
);
151 *(int *)(evtp
->evt_arg1
) =
152 lpfc_sli_brdready(phba
,HS_FFRDY
| HS_MBRDY
);
153 complete((struct completion
*)(evtp
->evt_arg2
));
155 case LPFC_EVT_WARM_START
:
156 if (phba
->hba_state
>= LPFC_LINK_DOWN
)
158 lpfc_reset_barrier(phba
);
159 lpfc_sli_brdreset(phba
);
160 lpfc_hba_down_post(phba
);
161 *(int *)(evtp
->evt_arg1
) =
162 lpfc_sli_brdready(phba
, HS_MBRDY
);
163 complete((struct completion
*)(evtp
->evt_arg2
));
166 if (phba
->hba_state
>= LPFC_LINK_DOWN
)
168 *(int *)(evtp
->evt_arg1
)
169 = (phba
->stopped
) ? 0 : lpfc_sli_brdkill(phba
);
170 complete((struct completion
*)(evtp
->evt_arg2
));
175 spin_lock_irq(phba
->host
->host_lock
);
177 spin_unlock_irq(phba
->host
->host_lock
);
182 lpfc_work_done(struct lpfc_hba
* phba
)
184 struct lpfc_sli_ring
*pring
;
188 uint32_t work_hba_events
;
190 spin_lock_irq(phba
->host
->host_lock
);
191 ha_copy
= phba
->work_ha
;
193 work_hba_events
=phba
->work_hba_events
;
194 spin_unlock_irq(phba
->host
->host_lock
);
196 if (ha_copy
& HA_ERATT
)
197 lpfc_handle_eratt(phba
);
199 if (ha_copy
& HA_MBATT
)
200 lpfc_sli_handle_mb_event(phba
);
202 if (ha_copy
& HA_LATT
)
203 lpfc_handle_latt(phba
);
205 if (work_hba_events
& WORKER_DISC_TMO
)
206 lpfc_disc_timeout_handler(phba
);
208 if (work_hba_events
& WORKER_ELS_TMO
)
209 lpfc_els_timeout_handler(phba
);
211 if (work_hba_events
& WORKER_MBOX_TMO
)
212 lpfc_mbox_timeout_handler(phba
);
214 if (work_hba_events
& WORKER_FDMI_TMO
)
215 lpfc_fdmi_tmo_handler(phba
);
217 spin_lock_irq(phba
->host
->host_lock
);
218 phba
->work_hba_events
&= ~work_hba_events
;
219 spin_unlock_irq(phba
->host
->host_lock
);
221 for (i
= 0; i
< phba
->sli
.num_rings
; i
++, ha_copy
>>= 4) {
222 pring
= &phba
->sli
.ring
[i
];
223 if ((ha_copy
& HA_RXATT
)
224 || (pring
->flag
& LPFC_DEFERRED_RING_EVENT
)) {
225 if (pring
->flag
& LPFC_STOP_IOCB_MASK
) {
226 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
228 lpfc_sli_handle_slow_ring_event(phba
, pring
,
231 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
234 * Turn on Ring interrupts
236 spin_lock_irq(phba
->host
->host_lock
);
237 control
= readl(phba
->HCregaddr
);
238 control
|= (HC_R0INT_ENA
<< i
);
239 writel(control
, phba
->HCregaddr
);
240 readl(phba
->HCregaddr
); /* flush */
241 spin_unlock_irq(phba
->host
->host_lock
);
245 lpfc_work_list_done (phba
);
250 check_work_wait_done(struct lpfc_hba
*phba
) {
252 spin_lock_irq(phba
->host
->host_lock
);
254 phba
->work_hba_events
||
255 (!list_empty(&phba
->work_list
)) ||
256 kthread_should_stop()) {
257 spin_unlock_irq(phba
->host
->host_lock
);
260 spin_unlock_irq(phba
->host
->host_lock
);
266 lpfc_do_work(void *p
)
268 struct lpfc_hba
*phba
= p
;
270 DECLARE_WAIT_QUEUE_HEAD(work_waitq
);
272 set_user_nice(current
, -20);
273 phba
->work_wait
= &work_waitq
;
277 rc
= wait_event_interruptible(work_waitq
,
278 check_work_wait_done(phba
));
281 if (kthread_should_stop())
284 lpfc_work_done(phba
);
287 phba
->work_wait
= NULL
;
292 * This is only called to handle FC worker events. Since this a rare
293 * occurance, we allocate a struct lpfc_work_evt structure here instead of
294 * embedding it in the IOCB.
297 lpfc_workq_post_event(struct lpfc_hba
* phba
, void *arg1
, void *arg2
,
300 struct lpfc_work_evt
*evtp
;
303 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
304 * be queued to worker thread for processing
306 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_KERNEL
);
310 evtp
->evt_arg1
= arg1
;
311 evtp
->evt_arg2
= arg2
;
314 spin_lock_irq(phba
->host
->host_lock
);
315 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
317 wake_up(phba
->work_wait
);
318 spin_unlock_irq(phba
->host
->host_lock
);
324 lpfc_linkdown(struct lpfc_hba
* phba
)
326 struct lpfc_sli
*psli
;
327 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
328 struct list_head
*listp
, *node_list
[7];
333 /* sysfs or selective reset may call this routine to clean up */
334 if (phba
->hba_state
>= LPFC_LINK_DOWN
) {
335 if (phba
->hba_state
== LPFC_LINK_DOWN
)
338 spin_lock_irq(phba
->host
->host_lock
);
339 phba
->hba_state
= LPFC_LINK_DOWN
;
340 spin_unlock_irq(phba
->host
->host_lock
);
343 /* Clean up any firmware default rpi's */
344 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
345 lpfc_unreg_did(phba
, 0xffffffff, mb
);
346 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
347 if (lpfc_sli_issue_mbox(phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
348 == MBX_NOT_FINISHED
) {
349 mempool_free( mb
, phba
->mbox_mem_pool
);
353 /* Cleanup any outstanding RSCN activity */
354 lpfc_els_flush_rscn(phba
);
356 /* Cleanup any outstanding ELS commands */
357 lpfc_els_flush_cmd(phba
);
359 /* Issue a LINK DOWN event to all nodes */
360 node_list
[0] = &phba
->fc_npr_list
; /* MUST do this list first */
361 node_list
[1] = &phba
->fc_nlpmap_list
;
362 node_list
[2] = &phba
->fc_nlpunmap_list
;
363 node_list
[3] = &phba
->fc_prli_list
;
364 node_list
[4] = &phba
->fc_reglogin_list
;
365 node_list
[5] = &phba
->fc_adisc_list
;
366 node_list
[6] = &phba
->fc_plogi_list
;
367 for (i
= 0; i
< 7; i
++) {
368 listp
= node_list
[i
];
369 if (list_empty(listp
))
372 list_for_each_entry_safe(ndlp
, next_ndlp
, listp
, nlp_listp
) {
374 rc
= lpfc_disc_state_machine(phba
, ndlp
, NULL
,
375 NLP_EVT_DEVICE_RECOVERY
);
377 /* Check config parameter use-adisc or FCP-2 */
378 if ((rc
!= NLP_STE_FREED_NODE
) &&
379 (phba
->cfg_use_adisc
== 0) &&
380 !(ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)) {
381 /* We know we will have to relogin, so
382 * unreglogin the rpi right now to fail
383 * any outstanding I/Os quickly.
385 lpfc_unreg_rpi(phba
, ndlp
);
390 /* free any ndlp's on unused list */
391 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_unused_list
,
393 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
396 /* Setup myDID for link up if we are in pt2pt mode */
397 if (phba
->fc_flag
& FC_PT2PT
) {
399 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
400 lpfc_config_link(phba
, mb
);
401 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
402 if (lpfc_sli_issue_mbox
403 (phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
404 == MBX_NOT_FINISHED
) {
405 mempool_free( mb
, phba
->mbox_mem_pool
);
408 spin_lock_irq(phba
->host
->host_lock
);
409 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
410 spin_unlock_irq(phba
->host
->host_lock
);
412 spin_lock_irq(phba
->host
->host_lock
);
413 phba
->fc_flag
&= ~FC_LBIT
;
414 spin_unlock_irq(phba
->host
->host_lock
);
416 /* Turn off discovery timer if its running */
417 lpfc_can_disctmo(phba
);
419 /* Must process IOCBs on all rings to handle ABORTed I/Os */
424 lpfc_linkup(struct lpfc_hba
* phba
)
426 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
427 struct list_head
*listp
, *node_list
[7];
430 spin_lock_irq(phba
->host
->host_lock
);
431 phba
->hba_state
= LPFC_LINK_UP
;
432 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
433 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
434 phba
->fc_flag
|= FC_NDISC_ACTIVE
;
435 phba
->fc_ns_retry
= 0;
436 spin_unlock_irq(phba
->host
->host_lock
);
439 node_list
[0] = &phba
->fc_plogi_list
;
440 node_list
[1] = &phba
->fc_adisc_list
;
441 node_list
[2] = &phba
->fc_reglogin_list
;
442 node_list
[3] = &phba
->fc_prli_list
;
443 node_list
[4] = &phba
->fc_nlpunmap_list
;
444 node_list
[5] = &phba
->fc_nlpmap_list
;
445 node_list
[6] = &phba
->fc_npr_list
;
446 for (i
= 0; i
< 7; i
++) {
447 listp
= node_list
[i
];
448 if (list_empty(listp
))
451 list_for_each_entry_safe(ndlp
, next_ndlp
, listp
, nlp_listp
) {
452 if (phba
->fc_flag
& FC_LBIT
) {
453 if (ndlp
->nlp_type
& NLP_FABRIC
) {
454 /* On Linkup its safe to clean up the
455 * ndlp from Fabric connections.
457 lpfc_nlp_list(phba
, ndlp
,
459 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
460 /* Fail outstanding IO now since device
461 * is marked for PLOGI.
463 lpfc_unreg_rpi(phba
, ndlp
);
469 /* free any ndlp's on unused list */
470 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_unused_list
,
472 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
479 * This routine handles processing a CLEAR_LA mailbox
480 * command upon completion. It is setup in the LPFC_MBOXQ
481 * as the completion routine when the command is
482 * handed off to the SLI layer.
485 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
487 struct lpfc_sli
*psli
;
493 /* Since we don't do discovery right now, turn these off here */
494 psli
->ring
[psli
->ip_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
495 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
496 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
498 /* Check for error */
499 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
500 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
501 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
502 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
504 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
506 phba
->hba_state
= LPFC_HBA_ERROR
;
510 if (phba
->fc_flag
& FC_ABORT_DISCOVERY
)
513 phba
->num_disc_nodes
= 0;
514 /* go thru NPR list and issue ELS PLOGIs */
515 if (phba
->fc_npr_cnt
) {
516 lpfc_els_disc_plogi(phba
);
519 if (!phba
->num_disc_nodes
) {
520 spin_lock_irq(phba
->host
->host_lock
);
521 phba
->fc_flag
&= ~FC_NDISC_ACTIVE
;
522 spin_unlock_irq(phba
->host
->host_lock
);
525 phba
->hba_state
= LPFC_HBA_READY
;
528 /* Device Discovery completes */
529 lpfc_printf_log(phba
,
532 "%d:0225 Device Discovery completes\n",
535 mempool_free( pmb
, phba
->mbox_mem_pool
);
537 spin_lock_irq(phba
->host
->host_lock
);
538 phba
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
539 if (phba
->fc_flag
& FC_ESTABLISH_LINK
) {
540 phba
->fc_flag
&= ~FC_ESTABLISH_LINK
;
542 spin_unlock_irq(phba
->host
->host_lock
);
544 del_timer_sync(&phba
->fc_estabtmo
);
546 lpfc_can_disctmo(phba
);
548 /* turn on Link Attention interrupts */
549 spin_lock_irq(phba
->host
->host_lock
);
550 psli
->sli_flag
|= LPFC_PROCESS_LA
;
551 control
= readl(phba
->HCregaddr
);
552 control
|= HC_LAINT_ENA
;
553 writel(control
, phba
->HCregaddr
);
554 readl(phba
->HCregaddr
); /* flush */
555 spin_unlock_irq(phba
->host
->host_lock
);
561 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
563 struct lpfc_sli
*psli
= &phba
->sli
;
566 if (pmb
->mb
.mbxStatus
)
569 mempool_free(pmb
, phba
->mbox_mem_pool
);
571 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
572 phba
->fc_flag
& FC_PUBLIC_LOOP
&&
573 !(phba
->fc_flag
& FC_LBIT
)) {
574 /* Need to wait for FAN - use discovery timer
575 * for timeout. hba_state is identically
576 * LPFC_LOCAL_CFG_LINK while waiting for FAN
578 lpfc_set_disctmo(phba
);
582 /* Start discovery by sending a FLOGI. hba_state is identically
583 * LPFC_FLOGI while waiting for FLOGI cmpl
585 phba
->hba_state
= LPFC_FLOGI
;
586 lpfc_set_disctmo(phba
);
587 lpfc_initial_flogi(phba
);
591 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
592 "%d:0306 CONFIG_LINK mbxStatus error x%x "
594 phba
->brd_no
, pmb
->mb
.mbxStatus
, phba
->hba_state
);
598 phba
->hba_state
= LPFC_HBA_ERROR
;
600 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
601 "%d:0200 CONFIG_LINK bad hba state x%x\n",
602 phba
->brd_no
, phba
->hba_state
);
604 lpfc_clear_la(phba
, pmb
);
605 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
606 rc
= lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
607 if (rc
== MBX_NOT_FINISHED
) {
608 mempool_free(pmb
, phba
->mbox_mem_pool
);
609 lpfc_disc_flush_list(phba
);
610 psli
->ring
[(psli
->ip_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
611 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
612 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
613 phba
->hba_state
= LPFC_HBA_READY
;
619 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
621 struct lpfc_sli
*psli
= &phba
->sli
;
622 MAILBOX_t
*mb
= &pmb
->mb
;
623 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
626 /* Check for error */
628 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
629 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
630 "%d:0319 READ_SPARAM mbxStatus error x%x "
632 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
635 phba
->hba_state
= LPFC_HBA_ERROR
;
639 memcpy((uint8_t *) & phba
->fc_sparam
, (uint8_t *) mp
->virt
,
640 sizeof (struct serv_parm
));
641 memcpy((uint8_t *) & phba
->fc_nodename
,
642 (uint8_t *) & phba
->fc_sparam
.nodeName
,
643 sizeof (struct lpfc_name
));
644 memcpy((uint8_t *) & phba
->fc_portname
,
645 (uint8_t *) & phba
->fc_sparam
.portName
,
646 sizeof (struct lpfc_name
));
647 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
649 mempool_free( pmb
, phba
->mbox_mem_pool
);
653 pmb
->context1
= NULL
;
654 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
656 if (phba
->hba_state
!= LPFC_CLEAR_LA
) {
657 lpfc_clear_la(phba
, pmb
);
658 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
659 if (lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
660 == MBX_NOT_FINISHED
) {
661 mempool_free( pmb
, phba
->mbox_mem_pool
);
662 lpfc_disc_flush_list(phba
);
663 psli
->ring
[(psli
->ip_ring
)].flag
&=
664 ~LPFC_STOP_IOCB_EVENT
;
665 psli
->ring
[(psli
->fcp_ring
)].flag
&=
666 ~LPFC_STOP_IOCB_EVENT
;
667 psli
->ring
[(psli
->next_ring
)].flag
&=
668 ~LPFC_STOP_IOCB_EVENT
;
669 phba
->hba_state
= LPFC_HBA_READY
;
672 mempool_free( pmb
, phba
->mbox_mem_pool
);
678 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
681 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
;
682 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
683 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
685 spin_lock_irq(phba
->host
->host_lock
);
686 switch (la
->UlnkSpeed
) {
688 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
691 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
694 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
697 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
701 phba
->fc_topology
= la
->topology
;
703 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
704 /* Get Loop Map information */
707 phba
->fc_flag
|= FC_LBIT
;
709 phba
->fc_myDID
= la
->granted_AL_PA
;
710 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
713 phba
->alpa_map
[0] = 0;
715 if (phba
->cfg_log_verbose
& LOG_LINK_EVENT
) {
726 numalpa
= phba
->alpa_map
[0];
728 while (j
< numalpa
) {
729 memset(un
.pamap
, 0, 16);
730 for (k
= 1; j
< numalpa
; k
++) {
732 phba
->alpa_map
[j
+ 1];
737 /* Link Up Event ALPA map */
738 lpfc_printf_log(phba
,
741 "%d:1304 Link Up Event "
742 "ALPA map Data: x%x "
745 un
.pa
.wd1
, un
.pa
.wd2
,
746 un
.pa
.wd3
, un
.pa
.wd4
);
751 phba
->fc_myDID
= phba
->fc_pref_DID
;
752 phba
->fc_flag
|= FC_LBIT
;
754 spin_unlock_irq(phba
->host
->host_lock
);
758 lpfc_read_sparam(phba
, sparam_mbox
);
759 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
760 lpfc_sli_issue_mbox(phba
, sparam_mbox
,
761 (MBX_NOWAIT
| MBX_STOP_IOCB
));
765 phba
->hba_state
= LPFC_LOCAL_CFG_LINK
;
766 lpfc_config_link(phba
, cfglink_mbox
);
767 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
768 lpfc_sli_issue_mbox(phba
, cfglink_mbox
,
769 (MBX_NOWAIT
| MBX_STOP_IOCB
));
774 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
) {
776 struct lpfc_sli
*psli
= &phba
->sli
;
780 /* turn on Link Attention interrupts - no CLEAR_LA needed */
781 spin_lock_irq(phba
->host
->host_lock
);
782 psli
->sli_flag
|= LPFC_PROCESS_LA
;
783 control
= readl(phba
->HCregaddr
);
784 control
|= HC_LAINT_ENA
;
785 writel(control
, phba
->HCregaddr
);
786 readl(phba
->HCregaddr
); /* flush */
787 spin_unlock_irq(phba
->host
->host_lock
);
791 * This routine handles processing a READ_LA mailbox
792 * command upon completion. It is setup in the LPFC_MBOXQ
793 * as the completion routine when the command is
794 * handed off to the SLI layer.
797 lpfc_mbx_cmpl_read_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
800 MAILBOX_t
*mb
= &pmb
->mb
;
801 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
803 /* Check for error */
805 lpfc_printf_log(phba
,
808 "%d:1307 READ_LA mbox error x%x state x%x\n",
810 mb
->mbxStatus
, phba
->hba_state
);
811 lpfc_mbx_issue_link_down(phba
);
812 phba
->hba_state
= LPFC_HBA_ERROR
;
813 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
816 la
= (READ_LA_VAR
*) & pmb
->mb
.un
.varReadLA
;
818 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
820 spin_lock_irq(phba
->host
->host_lock
);
822 phba
->fc_flag
|= FC_BYPASSED_MODE
;
824 phba
->fc_flag
&= ~FC_BYPASSED_MODE
;
825 spin_unlock_irq(phba
->host
->host_lock
);
827 if (((phba
->fc_eventTag
+ 1) < la
->eventTag
) ||
828 (phba
->fc_eventTag
== la
->eventTag
)) {
829 phba
->fc_stat
.LinkMultiEvent
++;
830 if (la
->attType
== AT_LINK_UP
) {
831 if (phba
->fc_eventTag
!= 0)
836 phba
->fc_eventTag
= la
->eventTag
;
838 if (la
->attType
== AT_LINK_UP
) {
839 phba
->fc_stat
.LinkUp
++;
840 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
841 "%d:1303 Link Up Event x%x received "
842 "Data: x%x x%x x%x x%x\n",
843 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
844 la
->granted_AL_PA
, la
->UlnkSpeed
,
846 lpfc_mbx_process_link_up(phba
, la
);
848 phba
->fc_stat
.LinkDown
++;
849 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
850 "%d:1305 Link Down Event x%x received "
851 "Data: x%x x%x x%x\n",
852 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
853 phba
->hba_state
, phba
->fc_flag
);
854 lpfc_mbx_issue_link_down(phba
);
857 lpfc_mbx_cmpl_read_la_free_mbuf
:
858 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
860 mempool_free(pmb
, phba
->mbox_mem_pool
);
865 * This routine handles processing a REG_LOGIN mailbox
866 * command upon completion. It is setup in the LPFC_MBOXQ
867 * as the completion routine when the command is
868 * handed off to the SLI layer.
871 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
873 struct lpfc_sli
*psli
;
875 struct lpfc_dmabuf
*mp
;
876 struct lpfc_nodelist
*ndlp
;
881 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
882 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
884 pmb
->context1
= NULL
;
886 /* Good status, call state machine */
887 lpfc_disc_state_machine(phba
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
888 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
890 mempool_free( pmb
, phba
->mbox_mem_pool
);
896 * This routine handles processing a Fabric REG_LOGIN mailbox
897 * command upon completion. It is setup in the LPFC_MBOXQ
898 * as the completion routine when the command is
899 * handed off to the SLI layer.
902 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
904 struct lpfc_sli
*psli
;
906 struct lpfc_dmabuf
*mp
;
907 struct lpfc_nodelist
*ndlp
;
908 struct lpfc_nodelist
*ndlp_fdmi
;
914 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
915 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
918 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
920 mempool_free( pmb
, phba
->mbox_mem_pool
);
921 mempool_free( ndlp
, phba
->nlp_mem_pool
);
923 /* FLOGI failed, so just use loop map to make discovery list */
924 lpfc_disc_list_loopmap(phba
);
926 /* Start discovery */
927 lpfc_disc_start(phba
);
931 pmb
->context1
= NULL
;
933 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
934 ndlp
->nlp_type
|= NLP_FABRIC
;
935 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
936 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
938 if (phba
->hba_state
== LPFC_FABRIC_CFG_LINK
) {
939 /* This NPort has been assigned an NPort_ID by the fabric as a
940 * result of the completed fabric login. Issue a State Change
941 * Registration (SCR) ELS request to the fabric controller
942 * (SCR_DID) so that this NPort gets RSCN events from the
945 lpfc_issue_els_scr(phba
, SCR_DID
, 0);
947 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, NameServer_DID
);
949 /* Allocate a new node instance. If the pool is empty,
950 * start the discovery process and skip the Nameserver
951 * login process. This is attempted again later on.
952 * Otherwise, issue a Port Login (PLOGI) to NameServer.
954 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_ATOMIC
);
956 lpfc_disc_start(phba
);
957 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
959 mempool_free( pmb
, phba
->mbox_mem_pool
);
962 lpfc_nlp_init(phba
, ndlp
, NameServer_DID
);
963 ndlp
->nlp_type
|= NLP_FABRIC
;
966 ndlp
->nlp_state
= NLP_STE_PLOGI_ISSUE
;
967 lpfc_nlp_list(phba
, ndlp
, NLP_PLOGI_LIST
);
968 lpfc_issue_els_plogi(phba
, NameServer_DID
, 0);
969 if (phba
->cfg_fdmi_on
) {
970 ndlp_fdmi
= mempool_alloc(phba
->nlp_mem_pool
,
973 lpfc_nlp_init(phba
, ndlp_fdmi
, FDMI_DID
);
974 ndlp_fdmi
->nlp_type
|= NLP_FABRIC
;
975 ndlp_fdmi
->nlp_state
= NLP_STE_PLOGI_ISSUE
;
976 lpfc_issue_els_plogi(phba
, FDMI_DID
, 0);
981 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
983 mempool_free( pmb
, phba
->mbox_mem_pool
);
988 * This routine handles processing a NameServer REG_LOGIN mailbox
989 * command upon completion. It is setup in the LPFC_MBOXQ
990 * as the completion routine when the command is
991 * handed off to the SLI layer.
994 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
996 struct lpfc_sli
*psli
;
998 struct lpfc_dmabuf
*mp
;
999 struct lpfc_nodelist
*ndlp
;
1004 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
1005 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1007 if (mb
->mbxStatus
) {
1008 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1010 mempool_free( pmb
, phba
->mbox_mem_pool
);
1011 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
1013 /* RegLogin failed, so just use loop map to make discovery
1015 lpfc_disc_list_loopmap(phba
);
1017 /* Start discovery */
1018 lpfc_disc_start(phba
);
1022 pmb
->context1
= NULL
;
1024 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
1025 ndlp
->nlp_type
|= NLP_FABRIC
;
1026 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
1027 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
1029 if (phba
->hba_state
< LPFC_HBA_READY
) {
1030 /* Link up discovery requires Fabrib registration. */
1031 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RNN_ID
);
1032 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RSNN_NN
);
1033 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RFT_ID
);
1036 phba
->fc_ns_retry
= 0;
1037 /* Good status, issue CT Request to NameServer */
1038 if (lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
)) {
1039 /* Cannot issue NameServer Query, so finish up discovery */
1040 lpfc_disc_start(phba
);
1043 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1045 mempool_free( pmb
, phba
->mbox_mem_pool
);
1051 lpfc_register_remote_port(struct lpfc_hba
* phba
,
1052 struct lpfc_nodelist
* ndlp
)
1054 struct fc_rport
*rport
;
1055 struct lpfc_rport_data
*rdata
;
1056 struct fc_rport_identifiers rport_ids
;
1058 /* Remote port has reappeared. Re-register w/ FC transport */
1059 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
1060 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
1061 rport_ids
.port_id
= ndlp
->nlp_DID
;
1062 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
1064 ndlp
->rport
= rport
= fc_remote_port_add(phba
->host
, 0, &rport_ids
);
1066 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
1067 "Warning: fc_remote_port_add failed\n");
1071 /* initialize static port data */
1072 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
1073 rport
->supported_classes
= ndlp
->nlp_class_sup
;
1074 rdata
= rport
->dd_data
;
1075 rdata
->pnode
= ndlp
;
1077 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
1078 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
1079 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
1080 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
1083 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
1084 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
1086 if ((rport
->scsi_target_id
!= -1) &&
1087 (rport
->scsi_target_id
< MAX_FCP_TARGET
)) {
1088 ndlp
->nlp_sid
= rport
->scsi_target_id
;
1095 lpfc_unregister_remote_port(struct lpfc_hba
* phba
,
1096 struct lpfc_nodelist
* ndlp
)
1098 struct fc_rport
*rport
= ndlp
->rport
;
1099 struct lpfc_rport_data
*rdata
= rport
->dd_data
;
1102 rdata
->pnode
= NULL
;
1103 fc_remote_port_delete(rport
);
1109 lpfc_nlp_list(struct lpfc_hba
* phba
, struct lpfc_nodelist
* nlp
, int list
)
1111 enum { none
, unmapped
, mapped
} rport_add
= none
, rport_del
= none
;
1112 struct lpfc_sli
*psli
;
1115 /* Sanity check to ensure we are not moving to / from the same list */
1116 if ((nlp
->nlp_flag
& NLP_LIST_MASK
) == list
)
1117 if (list
!= NLP_NO_LIST
)
1120 spin_lock_irq(phba
->host
->host_lock
);
1121 switch (nlp
->nlp_flag
& NLP_LIST_MASK
) {
1122 case NLP_NO_LIST
: /* Not on any list */
1124 case NLP_UNUSED_LIST
:
1125 phba
->fc_unused_cnt
--;
1126 list_del(&nlp
->nlp_listp
);
1128 case NLP_PLOGI_LIST
:
1129 phba
->fc_plogi_cnt
--;
1130 list_del(&nlp
->nlp_listp
);
1132 case NLP_ADISC_LIST
:
1133 phba
->fc_adisc_cnt
--;
1134 list_del(&nlp
->nlp_listp
);
1136 case NLP_REGLOGIN_LIST
:
1137 phba
->fc_reglogin_cnt
--;
1138 list_del(&nlp
->nlp_listp
);
1141 phba
->fc_prli_cnt
--;
1142 list_del(&nlp
->nlp_listp
);
1144 case NLP_UNMAPPED_LIST
:
1145 phba
->fc_unmap_cnt
--;
1146 list_del(&nlp
->nlp_listp
);
1147 nlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
1148 nlp
->nlp_type
&= ~NLP_FC_NODE
;
1149 phba
->nport_event_cnt
++;
1151 rport_del
= unmapped
;
1153 case NLP_MAPPED_LIST
:
1155 list_del(&nlp
->nlp_listp
);
1156 phba
->nport_event_cnt
++;
1162 list_del(&nlp
->nlp_listp
);
1163 /* Stop delay tmo if taking node off NPR list */
1164 if ((nlp
->nlp_flag
& NLP_DELAY_TMO
) &&
1165 (list
!= NLP_NPR_LIST
)) {
1166 spin_unlock_irq(phba
->host
->host_lock
);
1167 lpfc_cancel_retry_delay_tmo(phba
, nlp
);
1168 spin_lock_irq(phba
->host
->host_lock
);
1173 nlp
->nlp_flag
&= ~NLP_LIST_MASK
;
1175 /* Add NPort <did> to <num> list */
1176 lpfc_printf_log(phba
,
1179 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1181 nlp
->nlp_DID
, list
, nlp
->nlp_flag
);
1184 case NLP_NO_LIST
: /* No list, just remove it */
1185 spin_unlock_irq(phba
->host
->host_lock
);
1186 lpfc_nlp_remove(phba
, nlp
);
1187 spin_lock_irq(phba
->host
->host_lock
);
1188 /* as node removed - stop further transport calls */
1191 case NLP_UNUSED_LIST
:
1192 nlp
->nlp_flag
|= list
;
1193 /* Put it at the end of the unused list */
1194 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_unused_list
);
1195 phba
->fc_unused_cnt
++;
1197 case NLP_PLOGI_LIST
:
1198 nlp
->nlp_flag
|= list
;
1199 /* Put it at the end of the plogi list */
1200 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_plogi_list
);
1201 phba
->fc_plogi_cnt
++;
1203 case NLP_ADISC_LIST
:
1204 nlp
->nlp_flag
|= list
;
1205 /* Put it at the end of the adisc list */
1206 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_adisc_list
);
1207 phba
->fc_adisc_cnt
++;
1209 case NLP_REGLOGIN_LIST
:
1210 nlp
->nlp_flag
|= list
;
1211 /* Put it at the end of the reglogin list */
1212 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_reglogin_list
);
1213 phba
->fc_reglogin_cnt
++;
1216 nlp
->nlp_flag
|= list
;
1217 /* Put it at the end of the prli list */
1218 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_prli_list
);
1219 phba
->fc_prli_cnt
++;
1221 case NLP_UNMAPPED_LIST
:
1222 rport_add
= unmapped
;
1223 /* ensure all vestiges of "mapped" significance are gone */
1224 nlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
1225 nlp
->nlp_flag
|= list
;
1226 /* Put it at the end of the unmap list */
1227 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_nlpunmap_list
);
1228 phba
->fc_unmap_cnt
++;
1229 phba
->nport_event_cnt
++;
1230 /* stop nodev tmo if running */
1231 if (nlp
->nlp_flag
& NLP_NODEV_TMO
) {
1232 nlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
1233 spin_unlock_irq(phba
->host
->host_lock
);
1234 del_timer_sync(&nlp
->nlp_tmofunc
);
1235 spin_lock_irq(phba
->host
->host_lock
);
1236 if (!list_empty(&nlp
->nodev_timeout_evt
.evt_listp
))
1237 list_del_init(&nlp
->nodev_timeout_evt
.
1241 nlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1242 nlp
->nlp_type
|= NLP_FC_NODE
;
1244 case NLP_MAPPED_LIST
:
1246 nlp
->nlp_flag
|= list
;
1247 /* Put it at the end of the map list */
1248 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_nlpmap_list
);
1250 phba
->nport_event_cnt
++;
1251 /* stop nodev tmo if running */
1252 if (nlp
->nlp_flag
& NLP_NODEV_TMO
) {
1253 nlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
1254 spin_unlock_irq(phba
->host
->host_lock
);
1255 del_timer_sync(&nlp
->nlp_tmofunc
);
1256 spin_lock_irq(phba
->host
->host_lock
);
1257 if (!list_empty(&nlp
->nodev_timeout_evt
.evt_listp
))
1258 list_del_init(&nlp
->nodev_timeout_evt
.
1262 nlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1265 nlp
->nlp_flag
|= list
;
1266 /* Put it at the end of the npr list */
1267 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_npr_list
);
1270 if (!(nlp
->nlp_flag
& NLP_NODEV_TMO
))
1271 mod_timer(&nlp
->nlp_tmofunc
,
1272 jiffies
+ HZ
* phba
->cfg_nodev_tmo
);
1274 nlp
->nlp_flag
|= NLP_NODEV_TMO
;
1275 nlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
1281 spin_unlock_irq(phba
->host
->host_lock
);
1284 * We make all the calls into the transport after we have
1285 * moved the node between lists. This so that we don't
1286 * release the lock while in-between lists.
1289 /* Don't upcall midlayer if we're unloading */
1290 if (!(phba
->fc_flag
& FC_UNLOADING
)) {
1292 * We revalidate the rport pointer as the "add" function
1293 * may have removed the remote port.
1295 if ((rport_del
!= none
) && nlp
->rport
)
1296 lpfc_unregister_remote_port(phba
, nlp
);
1298 if (rport_add
!= none
) {
1300 * Tell the fc transport about the port, if we haven't
1301 * already. If we have, and it's a scsi entity, be
1302 * sure to unblock any attached scsi devices
1305 lpfc_register_remote_port(phba
, nlp
);
1308 * if we added to Mapped list, but the remote port
1309 * registration failed or assigned a target id outside
1310 * our presentable range - move the node to the
1313 if ((rport_add
== mapped
) &&
1315 (nlp
->rport
->scsi_target_id
== -1) ||
1316 (nlp
->rport
->scsi_target_id
>= MAX_FCP_TARGET
))) {
1317 nlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
1318 spin_lock_irq(phba
->host
->host_lock
);
1319 nlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
1320 spin_unlock_irq(phba
->host
->host_lock
);
1321 lpfc_nlp_list(phba
, nlp
, NLP_UNMAPPED_LIST
);
1329 * Start / ReStart rescue timer for Discovery / RSCN handling
1332 lpfc_set_disctmo(struct lpfc_hba
* phba
)
1336 if (phba
->hba_state
== LPFC_LOCAL_CFG_LINK
) {
1337 /* For FAN, timeout should be greater then edtov */
1338 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
1340 /* Normal discovery timeout should be > then ELS/CT timeout
1341 * FC spec states we need 3 * ratov for CT requests
1343 tmo
= ((phba
->fc_ratov
* 3) + 3);
1346 mod_timer(&phba
->fc_disctmo
, jiffies
+ HZ
* tmo
);
1347 spin_lock_irq(phba
->host
->host_lock
);
1348 phba
->fc_flag
|= FC_DISC_TMO
;
1349 spin_unlock_irq(phba
->host
->host_lock
);
1351 /* Start Discovery Timer state <hba_state> */
1352 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1353 "%d:0247 Start Discovery Timer state x%x "
1354 "Data: x%x x%lx x%x x%x\n",
1356 phba
->hba_state
, tmo
, (unsigned long)&phba
->fc_disctmo
,
1357 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1363 * Cancel rescue timer for Discovery / RSCN handling
1366 lpfc_can_disctmo(struct lpfc_hba
* phba
)
1368 /* Turn off discovery timer if its running */
1369 if (phba
->fc_flag
& FC_DISC_TMO
) {
1370 spin_lock_irq(phba
->host
->host_lock
);
1371 phba
->fc_flag
&= ~FC_DISC_TMO
;
1372 spin_unlock_irq(phba
->host
->host_lock
);
1373 del_timer_sync(&phba
->fc_disctmo
);
1374 phba
->work_hba_events
&= ~WORKER_DISC_TMO
;
1377 /* Cancel Discovery Timer state <hba_state> */
1378 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1379 "%d:0248 Cancel Discovery Timer state x%x "
1380 "Data: x%x x%x x%x\n",
1381 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
1382 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1388 * Check specified ring for outstanding IOCB on the SLI queue
1389 * Return true if iocb matches the specified nport
1392 lpfc_check_sli_ndlp(struct lpfc_hba
* phba
,
1393 struct lpfc_sli_ring
* pring
,
1394 struct lpfc_iocbq
* iocb
, struct lpfc_nodelist
* ndlp
)
1396 struct lpfc_sli
*psli
;
1401 if (pring
->ringno
== LPFC_ELS_RING
) {
1402 switch (icmd
->ulpCommand
) {
1403 case CMD_GEN_REQUEST64_CR
:
1404 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
)
1406 case CMD_ELS_REQUEST64_CR
:
1407 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
1409 case CMD_XMIT_ELS_RSP64_CX
:
1410 if (iocb
->context1
== (uint8_t *) ndlp
)
1413 } else if (pring
->ringno
== psli
->ip_ring
) {
1415 } else if (pring
->ringno
== psli
->fcp_ring
) {
1416 /* Skip match check if waiting to relogin to FCP target */
1417 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
1418 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
1421 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
1424 } else if (pring
->ringno
== psli
->next_ring
) {
1431 * Free resources / clean up outstanding I/Os
1432 * associated with nlp_rpi in the LPFC_NODELIST entry.
1435 lpfc_no_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1437 struct lpfc_sli
*psli
;
1438 struct lpfc_sli_ring
*pring
;
1439 struct lpfc_iocbq
*iocb
, *next_iocb
;
1444 * Everything that matches on txcmplq will be returned
1445 * by firmware with a no rpi error.
1448 rpi
= ndlp
->nlp_rpi
;
1450 /* Now process each ring */
1451 for (i
= 0; i
< psli
->num_rings
; i
++) {
1452 pring
= &psli
->ring
[i
];
1454 spin_lock_irq(phba
->host
->host_lock
);
1455 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
1458 * Check to see if iocb matches the nport we are
1461 if ((lpfc_check_sli_ndlp
1462 (phba
, pring
, iocb
, ndlp
))) {
1463 /* It matches, so deque and call compl
1465 list_del(&iocb
->list
);
1467 if (iocb
->iocb_cmpl
) {
1470 IOSTAT_LOCAL_REJECT
;
1471 icmd
->un
.ulpWord
[4] =
1473 spin_unlock_irq(phba
->host
->
1475 (iocb
->iocb_cmpl
) (phba
,
1477 spin_lock_irq(phba
->host
->
1480 lpfc_sli_release_iocbq(phba
,
1484 spin_unlock_irq(phba
->host
->host_lock
);
1492 * Free rpi associated with LPFC_NODELIST entry.
1493 * This routine is called from lpfc_freenode(), when we are removing
1494 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1495 * LOGO that completes successfully, and we are waiting to PLOGI back
1496 * to the remote NPort. In addition, it is called after we receive
1497 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1498 * we are waiting to PLOGI back to the remote NPort.
1501 lpfc_unreg_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1506 if (ndlp
->nlp_rpi
) {
1507 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
1508 lpfc_unreg_login(phba
, ndlp
->nlp_rpi
, mbox
);
1509 mbox
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
1510 rc
= lpfc_sli_issue_mbox
1511 (phba
, mbox
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
1512 if (rc
== MBX_NOT_FINISHED
)
1513 mempool_free( mbox
, phba
->mbox_mem_pool
);
1515 lpfc_no_rpi(phba
, ndlp
);
1523 * Free resources associated with LPFC_NODELIST entry
1524 * so it can be freed.
1527 lpfc_freenode(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1530 LPFC_MBOXQ_t
*nextmb
;
1531 struct lpfc_dmabuf
*mp
;
1533 /* Cleanup node for NPort <nlp_DID> */
1534 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1535 "%d:0900 Cleanup node for NPort x%x "
1536 "Data: x%x x%x x%x\n",
1537 phba
->brd_no
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
1538 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
1540 lpfc_nlp_list(phba
, ndlp
, NLP_JUST_DQ
);
1543 * if unloading the driver - just leave the remote port in place.
1544 * The driver unload will force the attached devices to detach
1545 * and flush cache's w/o generating flush errors.
1547 if ((ndlp
->rport
) && !(phba
->fc_flag
& FC_UNLOADING
)) {
1548 lpfc_unregister_remote_port(phba
, ndlp
);
1549 ndlp
->nlp_sid
= NLP_NO_SID
;
1552 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1553 if ((mb
= phba
->sli
.mbox_active
)) {
1554 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1555 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1556 mb
->context2
= NULL
;
1557 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1560 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
1561 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1562 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1563 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
1565 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1568 list_del(&mb
->list
);
1569 mempool_free(mb
, phba
->mbox_mem_pool
);
1573 lpfc_els_abort(phba
,ndlp
,0);
1574 spin_lock_irq(phba
->host
->host_lock
);
1575 ndlp
->nlp_flag
&= ~(NLP_NODEV_TMO
|NLP_DELAY_TMO
);
1576 spin_unlock_irq(phba
->host
->host_lock
);
1577 del_timer_sync(&ndlp
->nlp_tmofunc
);
1579 ndlp
->nlp_last_elscmd
= 0;
1580 del_timer_sync(&ndlp
->nlp_delayfunc
);
1582 if (!list_empty(&ndlp
->nodev_timeout_evt
.evt_listp
))
1583 list_del_init(&ndlp
->nodev_timeout_evt
.evt_listp
);
1584 if (!list_empty(&ndlp
->els_retry_evt
.evt_listp
))
1585 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
1587 lpfc_unreg_rpi(phba
, ndlp
);
1593 * Check to see if we can free the nlp back to the freelist.
1594 * If we are in the middle of using the nlp in the discovery state
1595 * machine, defer the free till we reach the end of the state machine.
1598 lpfc_nlp_remove(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1600 if (ndlp
->nlp_flag
& NLP_NODEV_TMO
) {
1601 spin_lock_irq(phba
->host
->host_lock
);
1602 ndlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
1603 spin_unlock_irq(phba
->host
->host_lock
);
1604 del_timer_sync(&ndlp
->nlp_tmofunc
);
1605 if (!list_empty(&ndlp
->nodev_timeout_evt
.evt_listp
))
1606 list_del_init(&ndlp
->nodev_timeout_evt
.evt_listp
);
1611 if (ndlp
->nlp_flag
& NLP_DELAY_TMO
) {
1612 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1615 if (ndlp
->nlp_disc_refcnt
) {
1616 spin_lock_irq(phba
->host
->host_lock
);
1617 ndlp
->nlp_flag
|= NLP_DELAY_REMOVE
;
1618 spin_unlock_irq(phba
->host
->host_lock
);
1620 lpfc_freenode(phba
, ndlp
);
1621 mempool_free( ndlp
, phba
->nlp_mem_pool
);
1627 lpfc_matchdid(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
, uint32_t did
)
1633 if (did
== Bcast_DID
)
1636 if (ndlp
->nlp_DID
== 0) {
1640 /* First check for Direct match */
1641 if (ndlp
->nlp_DID
== did
)
1644 /* Next check for area/domain identically equals 0 match */
1645 mydid
.un
.word
= phba
->fc_myDID
;
1646 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
1650 matchdid
.un
.word
= did
;
1651 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
1652 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
1653 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
1654 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
1655 if ((ndlpdid
.un
.b
.domain
== 0) &&
1656 (ndlpdid
.un
.b
.area
== 0)) {
1657 if (ndlpdid
.un
.b
.id
)
1663 matchdid
.un
.word
= ndlp
->nlp_DID
;
1664 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
1665 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
1666 if ((matchdid
.un
.b
.domain
== 0) &&
1667 (matchdid
.un
.b
.area
== 0)) {
1668 if (matchdid
.un
.b
.id
)
1676 /* Search for a nodelist entry on a specific list */
1677 struct lpfc_nodelist
*
1678 lpfc_findnode_did(struct lpfc_hba
* phba
, uint32_t order
, uint32_t did
)
1680 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1683 spin_lock_irq(phba
->host
->host_lock
);
1684 if (order
& NLP_SEARCH_UNMAPPED
) {
1685 list_for_each_entry_safe(ndlp
, next_ndlp
,
1686 &phba
->fc_nlpunmap_list
, nlp_listp
) {
1687 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1688 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1689 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1690 ((uint32_t) ndlp
->nlp_type
<< 8) |
1691 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1692 /* FIND node DID unmapped */
1693 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1694 "%d:0929 FIND node DID unmapped"
1695 " Data: x%p x%x x%x x%x\n",
1697 ndlp
, ndlp
->nlp_DID
,
1698 ndlp
->nlp_flag
, data1
);
1699 spin_unlock_irq(phba
->host
->host_lock
);
1705 if (order
& NLP_SEARCH_MAPPED
) {
1706 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nlpmap_list
,
1708 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1710 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1711 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1712 ((uint32_t) ndlp
->nlp_type
<< 8) |
1713 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1714 /* FIND node DID mapped */
1715 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1716 "%d:0930 FIND node DID mapped "
1717 "Data: x%p x%x x%x x%x\n",
1719 ndlp
, ndlp
->nlp_DID
,
1720 ndlp
->nlp_flag
, data1
);
1721 spin_unlock_irq(phba
->host
->host_lock
);
1727 if (order
& NLP_SEARCH_PLOGI
) {
1728 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_plogi_list
,
1730 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1732 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1733 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1734 ((uint32_t) ndlp
->nlp_type
<< 8) |
1735 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1736 /* LOG change to PLOGI */
1737 /* FIND node DID plogi */
1738 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1739 "%d:0908 FIND node DID plogi "
1740 "Data: x%p x%x x%x x%x\n",
1742 ndlp
, ndlp
->nlp_DID
,
1743 ndlp
->nlp_flag
, data1
);
1744 spin_unlock_irq(phba
->host
->host_lock
);
1750 if (order
& NLP_SEARCH_ADISC
) {
1751 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
1753 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1755 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1756 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1757 ((uint32_t) ndlp
->nlp_type
<< 8) |
1758 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1759 /* LOG change to ADISC */
1760 /* FIND node DID adisc */
1761 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1762 "%d:0931 FIND node DID adisc "
1763 "Data: x%p x%x x%x x%x\n",
1765 ndlp
, ndlp
->nlp_DID
,
1766 ndlp
->nlp_flag
, data1
);
1767 spin_unlock_irq(phba
->host
->host_lock
);
1773 if (order
& NLP_SEARCH_REGLOGIN
) {
1774 list_for_each_entry_safe(ndlp
, next_ndlp
,
1775 &phba
->fc_reglogin_list
, nlp_listp
) {
1776 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1778 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1779 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1780 ((uint32_t) ndlp
->nlp_type
<< 8) |
1781 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1782 /* LOG change to REGLOGIN */
1783 /* FIND node DID reglogin */
1784 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1785 "%d:0931 FIND node DID reglogin"
1786 " Data: x%p x%x x%x x%x\n",
1788 ndlp
, ndlp
->nlp_DID
,
1789 ndlp
->nlp_flag
, data1
);
1790 spin_unlock_irq(phba
->host
->host_lock
);
1796 if (order
& NLP_SEARCH_PRLI
) {
1797 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_prli_list
,
1799 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1801 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1802 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1803 ((uint32_t) ndlp
->nlp_type
<< 8) |
1804 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1805 /* LOG change to PRLI */
1806 /* FIND node DID prli */
1807 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1808 "%d:0931 FIND node DID prli "
1809 "Data: x%p x%x x%x x%x\n",
1811 ndlp
, ndlp
->nlp_DID
,
1812 ndlp
->nlp_flag
, data1
);
1813 spin_unlock_irq(phba
->host
->host_lock
);
1819 if (order
& NLP_SEARCH_NPR
) {
1820 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
1822 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1824 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1825 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1826 ((uint32_t) ndlp
->nlp_type
<< 8) |
1827 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1828 /* LOG change to NPR */
1829 /* FIND node DID npr */
1830 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1831 "%d:0931 FIND node DID npr "
1832 "Data: x%p x%x x%x x%x\n",
1834 ndlp
, ndlp
->nlp_DID
,
1835 ndlp
->nlp_flag
, data1
);
1836 spin_unlock_irq(phba
->host
->host_lock
);
1842 if (order
& NLP_SEARCH_UNUSED
) {
1843 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
1845 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1847 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1848 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1849 ((uint32_t) ndlp
->nlp_type
<< 8) |
1850 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1851 /* LOG change to UNUSED */
1852 /* FIND node DID unused */
1853 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1854 "%d:0931 FIND node DID unused "
1855 "Data: x%p x%x x%x x%x\n",
1857 ndlp
, ndlp
->nlp_DID
,
1858 ndlp
->nlp_flag
, data1
);
1859 spin_unlock_irq(phba
->host
->host_lock
);
1865 spin_unlock_irq(phba
->host
->host_lock
);
1867 /* FIND node did <did> NOT FOUND */
1868 lpfc_printf_log(phba
,
1871 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1872 phba
->brd_no
, did
, order
);
1874 /* no match found */
1878 struct lpfc_nodelist
*
1879 lpfc_setup_disc_node(struct lpfc_hba
* phba
, uint32_t did
)
1881 struct lpfc_nodelist
*ndlp
;
1884 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, did
);
1886 if ((phba
->fc_flag
& FC_RSCN_MODE
) &&
1887 ((lpfc_rscn_payload_check(phba
, did
) == 0)))
1889 ndlp
= (struct lpfc_nodelist
*)
1890 mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
1893 lpfc_nlp_init(phba
, ndlp
, did
);
1894 ndlp
->nlp_state
= NLP_STE_NPR_NODE
;
1895 lpfc_nlp_list(phba
, ndlp
, NLP_NPR_LIST
);
1896 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1899 if (phba
->fc_flag
& FC_RSCN_MODE
) {
1900 if (lpfc_rscn_payload_check(phba
, did
)) {
1901 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1903 /* Since this node is marked for discovery,
1904 * delay timeout is not needed.
1906 if (ndlp
->nlp_flag
& NLP_DELAY_TMO
)
1907 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1911 flg
= ndlp
->nlp_flag
& NLP_LIST_MASK
;
1912 if ((flg
== NLP_ADISC_LIST
) || (flg
== NLP_PLOGI_LIST
))
1914 ndlp
->nlp_state
= NLP_STE_NPR_NODE
;
1915 lpfc_nlp_list(phba
, ndlp
, NLP_NPR_LIST
);
1916 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1921 /* Build a list of nodes to discover based on the loopmap */
1923 lpfc_disc_list_loopmap(struct lpfc_hba
* phba
)
1926 uint32_t alpa
, index
;
1928 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1931 if (phba
->fc_topology
!= TOPOLOGY_LOOP
) {
1935 /* Check for loop map present or not */
1936 if (phba
->alpa_map
[0]) {
1937 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
1938 alpa
= phba
->alpa_map
[j
];
1940 if (((phba
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0)) {
1943 lpfc_setup_disc_node(phba
, alpa
);
1946 /* No alpamap, so try all alpa's */
1947 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
1948 /* If cfg_scan_down is set, start from highest
1949 * ALPA (0xef) to lowest (0x1).
1951 if (phba
->cfg_scan_down
)
1954 index
= FC_MAXLOOP
- j
- 1;
1955 alpa
= lpfcAlpaArray
[index
];
1956 if ((phba
->fc_myDID
& 0xff) == alpa
) {
1960 lpfc_setup_disc_node(phba
, alpa
);
1966 /* Start Link up / RSCN discovery on NPR list */
1968 lpfc_disc_start(struct lpfc_hba
* phba
)
1970 struct lpfc_sli
*psli
;
1972 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1973 uint32_t did_changed
, num_sent
;
1974 uint32_t clear_la_pending
;
1979 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1982 if (phba
->hba_state
== LPFC_CLEAR_LA
)
1983 clear_la_pending
= 1;
1985 clear_la_pending
= 0;
1987 if (phba
->hba_state
< LPFC_HBA_READY
) {
1988 phba
->hba_state
= LPFC_DISC_AUTH
;
1990 lpfc_set_disctmo(phba
);
1992 if (phba
->fc_prevDID
== phba
->fc_myDID
) {
1997 phba
->fc_prevDID
= phba
->fc_myDID
;
1998 phba
->num_disc_nodes
= 0;
2000 /* Start Discovery state <hba_state> */
2001 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
2002 "%d:0202 Start Discovery hba state x%x "
2003 "Data: x%x x%x x%x\n",
2004 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
2005 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
2007 /* If our did changed, we MUST do PLOGI */
2008 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
2010 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
2012 spin_lock_irq(phba
->host
->host_lock
);
2013 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
2014 spin_unlock_irq(phba
->host
->host_lock
);
2019 /* First do ADISCs - if any */
2020 num_sent
= lpfc_els_disc_adisc(phba
);
2025 if ((phba
->hba_state
< LPFC_HBA_READY
) && (!clear_la_pending
)) {
2026 /* If we get here, there is nothing to ADISC */
2027 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
2028 phba
->hba_state
= LPFC_CLEAR_LA
;
2029 lpfc_clear_la(phba
, mbox
);
2030 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2031 rc
= lpfc_sli_issue_mbox(phba
, mbox
,
2032 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2033 if (rc
== MBX_NOT_FINISHED
) {
2034 mempool_free( mbox
, phba
->mbox_mem_pool
);
2035 lpfc_disc_flush_list(phba
);
2036 psli
->ring
[(psli
->ip_ring
)].flag
&=
2037 ~LPFC_STOP_IOCB_EVENT
;
2038 psli
->ring
[(psli
->fcp_ring
)].flag
&=
2039 ~LPFC_STOP_IOCB_EVENT
;
2040 psli
->ring
[(psli
->next_ring
)].flag
&=
2041 ~LPFC_STOP_IOCB_EVENT
;
2042 phba
->hba_state
= LPFC_HBA_READY
;
2046 /* Next do PLOGIs - if any */
2047 num_sent
= lpfc_els_disc_plogi(phba
);
2052 if (phba
->fc_flag
& FC_RSCN_MODE
) {
2053 /* Check to see if more RSCNs came in while we
2054 * were processing this one.
2056 if ((phba
->fc_rscn_id_cnt
== 0) &&
2057 (!(phba
->fc_flag
& FC_RSCN_DISCOVERY
))) {
2058 spin_lock_irq(phba
->host
->host_lock
);
2059 phba
->fc_flag
&= ~FC_RSCN_MODE
;
2060 spin_unlock_irq(phba
->host
->host_lock
);
2062 lpfc_els_handle_rscn(phba
);
2069 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2070 * ring the match the sppecified nodelist.
2073 lpfc_free_tx(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
2075 struct lpfc_sli
*psli
;
2077 struct lpfc_iocbq
*iocb
, *next_iocb
;
2078 struct lpfc_sli_ring
*pring
;
2079 struct lpfc_dmabuf
*mp
;
2082 pring
= &psli
->ring
[LPFC_ELS_RING
];
2084 /* Error matching iocb on txq or txcmplq
2085 * First check the txq.
2087 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
2088 if (iocb
->context1
!= ndlp
) {
2092 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
2093 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
2095 list_del(&iocb
->list
);
2097 lpfc_els_free_iocb(phba
, iocb
);
2101 /* Next check the txcmplq */
2102 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
2103 if (iocb
->context1
!= ndlp
) {
2107 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
2108 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
2110 iocb
->iocb_cmpl
= NULL
;
2111 /* context2 = cmd, context2->next = rsp, context3 =
2113 if (iocb
->context2
) {
2114 /* Free the response IOCB before handling the
2117 mp
= (struct lpfc_dmabuf
*) (iocb
->context2
);
2118 mp
= list_get_first(&mp
->list
,
2122 /* Delay before releasing rsp buffer to
2123 * give UNREG mbox a chance to take
2127 &phba
->freebufList
);
2129 lpfc_mbuf_free(phba
,
2130 ((struct lpfc_dmabuf
*)
2131 iocb
->context2
)->virt
,
2132 ((struct lpfc_dmabuf
*)
2133 iocb
->context2
)->phys
);
2134 kfree(iocb
->context2
);
2137 if (iocb
->context3
) {
2138 lpfc_mbuf_free(phba
,
2139 ((struct lpfc_dmabuf
*)
2140 iocb
->context3
)->virt
,
2141 ((struct lpfc_dmabuf
*)
2142 iocb
->context3
)->phys
);
2143 kfree(iocb
->context3
);
2152 lpfc_disc_flush_list(struct lpfc_hba
* phba
)
2154 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2156 if (phba
->fc_plogi_cnt
) {
2157 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_plogi_list
,
2159 lpfc_free_tx(phba
, ndlp
);
2160 lpfc_nlp_remove(phba
, ndlp
);
2163 if (phba
->fc_adisc_cnt
) {
2164 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
2166 lpfc_free_tx(phba
, ndlp
);
2167 lpfc_nlp_remove(phba
, ndlp
);
2173 /*****************************************************************************/
2175 * NAME: lpfc_disc_timeout
2177 * FUNCTION: Fibre Channel driver discovery timeout routine.
2179 * EXECUTION ENVIRONMENT: interrupt only
2187 /*****************************************************************************/
2189 lpfc_disc_timeout(unsigned long ptr
)
2191 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
2192 unsigned long flags
= 0;
2194 if (unlikely(!phba
))
2197 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2198 if (!(phba
->work_hba_events
& WORKER_DISC_TMO
)) {
2199 phba
->work_hba_events
|= WORKER_DISC_TMO
;
2200 if (phba
->work_wait
)
2201 wake_up(phba
->work_wait
);
2203 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2208 lpfc_disc_timeout_handler(struct lpfc_hba
*phba
)
2210 struct lpfc_sli
*psli
;
2211 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2212 LPFC_MBOXQ_t
*clearlambox
, *initlinkmbox
;
2213 int rc
, clrlaerr
= 0;
2215 if (unlikely(!phba
))
2218 if (!(phba
->fc_flag
& FC_DISC_TMO
))
2223 spin_lock_irq(phba
->host
->host_lock
);
2224 phba
->fc_flag
&= ~FC_DISC_TMO
;
2225 spin_unlock_irq(phba
->host
->host_lock
);
2227 switch (phba
->hba_state
) {
2229 case LPFC_LOCAL_CFG_LINK
:
2230 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2232 lpfc_printf_log(phba
,
2235 "%d:0221 FAN timeout\n",
2238 /* Start discovery by sending FLOGI, clean up old rpis */
2239 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
2241 if (ndlp
->nlp_type
& NLP_FABRIC
) {
2242 /* Clean up the ndlp on Fabric connections */
2243 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
2244 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
2245 /* Fail outstanding IO now since device
2246 * is marked for PLOGI.
2248 lpfc_unreg_rpi(phba
, ndlp
);
2251 phba
->hba_state
= LPFC_FLOGI
;
2252 lpfc_set_disctmo(phba
);
2253 lpfc_initial_flogi(phba
);
2257 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2258 /* Initial FLOGI timeout */
2259 lpfc_printf_log(phba
,
2262 "%d:0222 Initial FLOGI timeout\n",
2265 /* Assume no Fabric and go on with discovery.
2266 * Check for outstanding ELS FLOGI to abort.
2269 /* FLOGI failed, so just use loop map to make discovery list */
2270 lpfc_disc_list_loopmap(phba
);
2272 /* Start discovery */
2273 lpfc_disc_start(phba
);
2276 case LPFC_FABRIC_CFG_LINK
:
2277 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2279 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2280 "%d:0223 Timeout while waiting for NameServer "
2281 "login\n", phba
->brd_no
);
2283 /* Next look for NameServer ndlp */
2284 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, NameServer_DID
);
2286 lpfc_nlp_remove(phba
, ndlp
);
2287 /* Start discovery */
2288 lpfc_disc_start(phba
);
2292 /* Check for wait for NameServer Rsp timeout */
2293 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2294 "%d:0224 NameServer Query timeout "
2297 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2299 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_UNMAPPED
,
2302 if (phba
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
2303 /* Try it one more time */
2304 rc
= lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
);
2308 phba
->fc_ns_retry
= 0;
2311 /* Nothing to authenticate, so CLEAR_LA right now */
2312 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2315 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2316 "%d:0226 Device Discovery "
2317 "completion error\n",
2319 phba
->hba_state
= LPFC_HBA_ERROR
;
2323 phba
->hba_state
= LPFC_CLEAR_LA
;
2324 lpfc_clear_la(phba
, clearlambox
);
2325 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2326 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2327 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2328 if (rc
== MBX_NOT_FINISHED
) {
2329 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2334 /* Setup and issue mailbox INITIALIZE LINK command */
2335 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2336 if (!initlinkmbox
) {
2337 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2338 "%d:0226 Device Discovery "
2339 "completion error\n",
2341 phba
->hba_state
= LPFC_HBA_ERROR
;
2345 lpfc_linkdown(phba
);
2346 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
2347 phba
->cfg_link_speed
);
2348 initlinkmbox
->mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
2349 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
,
2350 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2351 if (rc
== MBX_NOT_FINISHED
)
2352 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
2356 case LPFC_DISC_AUTH
:
2357 /* Node Authentication timeout */
2358 lpfc_printf_log(phba
,
2361 "%d:0227 Node Authentication timeout\n",
2363 lpfc_disc_flush_list(phba
);
2364 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2367 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2368 "%d:0226 Device Discovery "
2369 "completion error\n",
2371 phba
->hba_state
= LPFC_HBA_ERROR
;
2374 phba
->hba_state
= LPFC_CLEAR_LA
;
2375 lpfc_clear_la(phba
, clearlambox
);
2376 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2377 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2378 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2379 if (rc
== MBX_NOT_FINISHED
) {
2380 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2386 /* CLEAR LA timeout */
2387 lpfc_printf_log(phba
,
2390 "%d:0228 CLEAR LA timeout\n",
2395 case LPFC_HBA_READY
:
2396 if (phba
->fc_flag
& FC_RSCN_MODE
) {
2397 lpfc_printf_log(phba
,
2400 "%d:0231 RSCN timeout Data: x%x x%x\n",
2402 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2404 /* Cleanup any outstanding ELS commands */
2405 lpfc_els_flush_cmd(phba
);
2407 lpfc_els_flush_rscn(phba
);
2408 lpfc_disc_flush_list(phba
);
2414 lpfc_disc_flush_list(phba
);
2415 psli
->ring
[(psli
->ip_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2416 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2417 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2418 phba
->hba_state
= LPFC_HBA_READY
;
2425 lpfc_nodev_timeout(unsigned long ptr
)
2427 struct lpfc_hba
*phba
;
2428 struct lpfc_nodelist
*ndlp
;
2429 unsigned long iflag
;
2430 struct lpfc_work_evt
*evtp
;
2432 ndlp
= (struct lpfc_nodelist
*)ptr
;
2433 phba
= ndlp
->nlp_phba
;
2434 evtp
= &ndlp
->nodev_timeout_evt
;
2435 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
2437 if (!list_empty(&evtp
->evt_listp
)) {
2438 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
2441 evtp
->evt_arg1
= ndlp
;
2442 evtp
->evt
= LPFC_EVT_NODEV_TMO
;
2443 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
2444 if (phba
->work_wait
)
2445 wake_up(phba
->work_wait
);
2447 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
2453 * This routine handles processing a NameServer REG_LOGIN mailbox
2454 * command upon completion. It is setup in the LPFC_MBOXQ
2455 * as the completion routine when the command is
2456 * handed off to the SLI layer.
2459 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
2461 struct lpfc_sli
*psli
;
2463 struct lpfc_dmabuf
*mp
;
2464 struct lpfc_nodelist
*ndlp
;
2469 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2470 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2472 pmb
->context1
= NULL
;
2474 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2475 ndlp
->nlp_type
|= NLP_FABRIC
;
2476 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
2477 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
2479 /* Start issuing Fabric-Device Management Interface (FDMI)
2480 * command to 0xfffffa (FDMI well known port)
2482 if (phba
->cfg_fdmi_on
== 1) {
2483 lpfc_fdmi_cmd(phba
, ndlp
, SLI_MGMT_DHBA
);
2486 * Delay issuing FDMI command if fdmi-on=2
2487 * (supporting RPA/hostnmae)
2489 mod_timer(&phba
->fc_fdmitmo
, jiffies
+ HZ
* 60);
2492 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2494 mempool_free( pmb
, phba
->mbox_mem_pool
);
2500 * This routine looks up the ndlp lists
2501 * for the given RPI. If rpi found
2502 * it return the node list pointer
2505 struct lpfc_nodelist
*
2506 lpfc_findnode_rpi(struct lpfc_hba
* phba
, uint16_t rpi
)
2508 struct lpfc_nodelist
*ndlp
;
2509 struct list_head
* lists
[]={&phba
->fc_nlpunmap_list
,
2510 &phba
->fc_nlpmap_list
,
2511 &phba
->fc_plogi_list
,
2512 &phba
->fc_adisc_list
,
2513 &phba
->fc_reglogin_list
};
2516 spin_lock_irq(phba
->host
->host_lock
);
2517 for (i
= 0; i
< ARRAY_SIZE(lists
); i
++ )
2518 list_for_each_entry(ndlp
, lists
[i
], nlp_listp
)
2519 if (ndlp
->nlp_rpi
== rpi
) {
2520 spin_unlock_irq(phba
->host
->host_lock
);
2523 spin_unlock_irq(phba
->host
->host_lock
);
2528 * This routine looks up the ndlp lists
2529 * for the given WWPN. If WWPN found
2530 * it return the node list pointer
2533 struct lpfc_nodelist
*
2534 lpfc_findnode_wwpn(struct lpfc_hba
* phba
, uint32_t order
,
2535 struct lpfc_name
* wwpn
)
2537 struct lpfc_nodelist
*ndlp
;
2538 struct list_head
* lists
[]={&phba
->fc_nlpunmap_list
,
2539 &phba
->fc_nlpmap_list
,
2541 &phba
->fc_plogi_list
,
2542 &phba
->fc_adisc_list
,
2543 &phba
->fc_reglogin_list
,
2544 &phba
->fc_prli_list
};
2545 uint32_t search
[]={NLP_SEARCH_UNMAPPED
,
2550 NLP_SEARCH_REGLOGIN
,
2554 spin_lock_irq(phba
->host
->host_lock
);
2555 for (i
= 0; i
< ARRAY_SIZE(lists
); i
++ ) {
2556 if (!(order
& search
[i
]))
2558 list_for_each_entry(ndlp
, lists
[i
], nlp_listp
) {
2559 if (memcmp(&ndlp
->nlp_portname
, wwpn
,
2560 sizeof(struct lpfc_name
)) == 0) {
2561 spin_unlock_irq(phba
->host
->host_lock
);
2566 spin_unlock_irq(phba
->host
->host_lock
);
2571 lpfc_nlp_init(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
,
2574 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
2575 INIT_LIST_HEAD(&ndlp
->nodev_timeout_evt
.evt_listp
);
2576 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
2577 init_timer(&ndlp
->nlp_tmofunc
);
2578 ndlp
->nlp_tmofunc
.function
= lpfc_nodev_timeout
;
2579 ndlp
->nlp_tmofunc
.data
= (unsigned long)ndlp
;
2580 init_timer(&ndlp
->nlp_delayfunc
);
2581 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
2582 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
2583 ndlp
->nlp_DID
= did
;
2584 ndlp
->nlp_phba
= phba
;
2585 ndlp
->nlp_sid
= NLP_NO_SID
;