1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_hbadisc.c 1.266 2005/04/13 11:59:06EDT sf_support Exp $
25 #include <linux/blkdev.h>
26 #include <linux/pci.h>
27 #include <linux/kthread.h>
28 #include <linux/interrupt.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray
[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
59 static void lpfc_disc_timeout_handler(struct lpfc_hba
*);
62 lpfc_process_nodev_timeout(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
64 if (!(ndlp
->nlp_type
& NLP_FABRIC
)) {
65 /* Nodev timeout on NPort <nlp_DID> */
66 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
67 "%d:0203 Nodev timeout on NPort x%x "
68 "Data: x%x x%x x%x\n",
69 phba
->brd_no
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
70 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
73 spin_lock_irq(phba
->host
->host_lock
);
74 if (!(ndlp
->nlp_flag
& NLP_NODEV_TMO
)) {
75 spin_unlock_irq(phba
->host
->host_lock
);
79 ndlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
81 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
82 /* flush the target */
83 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
84 ndlp
->nlp_sid
, 0, 0, LPFC_CTX_TGT
);
86 spin_unlock_irq(phba
->host
->host_lock
);
88 lpfc_disc_state_machine(phba
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
93 lpfc_work_list_done(struct lpfc_hba
* phba
)
95 struct lpfc_work_evt
*evtp
= NULL
;
96 struct lpfc_nodelist
*ndlp
;
99 spin_lock_irq(phba
->host
->host_lock
);
100 while(!list_empty(&phba
->work_list
)) {
101 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
103 spin_unlock_irq(phba
->host
->host_lock
);
106 case LPFC_EVT_NODEV_TMO
:
107 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
108 lpfc_process_nodev_timeout(phba
, ndlp
);
111 case LPFC_EVT_ELS_RETRY
:
112 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
113 lpfc_els_retry_delay_handler(ndlp
);
116 case LPFC_EVT_ONLINE
:
117 *(int *)(evtp
->evt_arg1
) = lpfc_online(phba
);
118 complete((struct completion
*)(evtp
->evt_arg2
));
120 case LPFC_EVT_OFFLINE
:
121 *(int *)(evtp
->evt_arg1
) = lpfc_offline(phba
);
122 complete((struct completion
*)(evtp
->evt_arg2
));
127 spin_lock_irq(phba
->host
->host_lock
);
129 spin_unlock_irq(phba
->host
->host_lock
);
134 lpfc_work_done(struct lpfc_hba
* phba
)
136 struct lpfc_sli_ring
*pring
;
140 uint32_t work_hba_events
;
142 spin_lock_irq(phba
->host
->host_lock
);
143 ha_copy
= phba
->work_ha
;
145 work_hba_events
=phba
->work_hba_events
;
146 spin_unlock_irq(phba
->host
->host_lock
);
148 if(ha_copy
& HA_ERATT
)
149 lpfc_handle_eratt(phba
);
151 if(ha_copy
& HA_MBATT
)
152 lpfc_sli_handle_mb_event(phba
);
154 if(ha_copy
& HA_LATT
)
155 lpfc_handle_latt(phba
);
157 if (work_hba_events
& WORKER_DISC_TMO
)
158 lpfc_disc_timeout_handler(phba
);
160 if (work_hba_events
& WORKER_ELS_TMO
)
161 lpfc_els_timeout_handler(phba
);
163 if (work_hba_events
& WORKER_MBOX_TMO
)
164 lpfc_mbox_timeout_handler(phba
);
166 if (work_hba_events
& WORKER_FDMI_TMO
)
167 lpfc_fdmi_tmo_handler(phba
);
169 spin_lock_irq(phba
->host
->host_lock
);
170 phba
->work_hba_events
&= ~work_hba_events
;
171 spin_unlock_irq(phba
->host
->host_lock
);
173 for (i
= 0; i
< phba
->sli
.num_rings
; i
++, ha_copy
>>= 4) {
174 pring
= &phba
->sli
.ring
[i
];
175 if ((ha_copy
& HA_RXATT
)
176 || (pring
->flag
& LPFC_DEFERRED_RING_EVENT
)) {
177 if (pring
->flag
& LPFC_STOP_IOCB_MASK
) {
178 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
180 lpfc_sli_handle_slow_ring_event(phba
, pring
,
183 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
186 * Turn on Ring interrupts
188 spin_lock_irq(phba
->host
->host_lock
);
189 control
= readl(phba
->HCregaddr
);
190 control
|= (HC_R0INT_ENA
<< i
);
191 writel(control
, phba
->HCregaddr
);
192 readl(phba
->HCregaddr
); /* flush */
193 spin_unlock_irq(phba
->host
->host_lock
);
197 lpfc_work_list_done (phba
);
202 check_work_wait_done(struct lpfc_hba
*phba
) {
204 spin_lock_irq(phba
->host
->host_lock
);
206 phba
->work_hba_events
||
207 (!list_empty(&phba
->work_list
)) ||
208 kthread_should_stop()) {
209 spin_unlock_irq(phba
->host
->host_lock
);
212 spin_unlock_irq(phba
->host
->host_lock
);
218 lpfc_do_work(void *p
)
220 struct lpfc_hba
*phba
= p
;
222 DECLARE_WAIT_QUEUE_HEAD(work_waitq
);
224 set_user_nice(current
, -20);
225 phba
->work_wait
= &work_waitq
;
229 rc
= wait_event_interruptible(work_waitq
,
230 check_work_wait_done(phba
));
233 if (kthread_should_stop())
236 lpfc_work_done(phba
);
239 phba
->work_wait
= NULL
;
244 * This is only called to handle FC worker events. Since this a rare
245 * occurance, we allocate a struct lpfc_work_evt structure here instead of
246 * embedding it in the IOCB.
249 lpfc_workq_post_event(struct lpfc_hba
* phba
, void *arg1
, void *arg2
,
252 struct lpfc_work_evt
*evtp
;
255 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
256 * be queued to worker thread for processing
258 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_KERNEL
);
262 evtp
->evt_arg1
= arg1
;
263 evtp
->evt_arg2
= arg2
;
266 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
267 spin_lock_irq(phba
->host
->host_lock
);
269 wake_up(phba
->work_wait
);
270 spin_unlock_irq(phba
->host
->host_lock
);
276 lpfc_linkdown(struct lpfc_hba
* phba
)
278 struct lpfc_sli
*psli
;
279 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
280 struct list_head
*listp
;
281 struct list_head
*node_list
[7];
287 spin_lock_irq(phba
->host
->host_lock
);
288 phba
->hba_state
= LPFC_LINK_DOWN
;
289 spin_unlock_irq(phba
->host
->host_lock
);
291 /* Clean up any firmware default rpi's */
292 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
293 lpfc_unreg_did(phba
, 0xffffffff, mb
);
294 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
295 if (lpfc_sli_issue_mbox(phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
296 == MBX_NOT_FINISHED
) {
297 mempool_free( mb
, phba
->mbox_mem_pool
);
301 /* Cleanup any outstanding RSCN activity */
302 lpfc_els_flush_rscn(phba
);
304 /* Cleanup any outstanding ELS commands */
305 lpfc_els_flush_cmd(phba
);
307 /* Issue a LINK DOWN event to all nodes */
308 node_list
[0] = &phba
->fc_npr_list
; /* MUST do this list first */
309 node_list
[1] = &phba
->fc_nlpmap_list
;
310 node_list
[2] = &phba
->fc_nlpunmap_list
;
311 node_list
[3] = &phba
->fc_prli_list
;
312 node_list
[4] = &phba
->fc_reglogin_list
;
313 node_list
[5] = &phba
->fc_adisc_list
;
314 node_list
[6] = &phba
->fc_plogi_list
;
315 for (i
= 0; i
< 7; i
++) {
316 listp
= node_list
[i
];
317 if (list_empty(listp
))
320 list_for_each_entry_safe(ndlp
, next_ndlp
, listp
, nlp_listp
) {
321 /* Fabric nodes are not handled thru state machine for
323 if (ndlp
->nlp_type
& NLP_FABRIC
) {
324 /* Remove ALL Fabric nodes except Fabric_DID */
325 if (ndlp
->nlp_DID
!= Fabric_DID
) {
326 /* Take it off current list and free */
327 lpfc_nlp_list(phba
, ndlp
,
333 rc
= lpfc_disc_state_machine(phba
, ndlp
, NULL
,
334 NLP_EVT_DEVICE_RECOVERY
);
336 /* Check config parameter use-adisc or FCP-2 */
337 if ((rc
!= NLP_STE_FREED_NODE
) &&
338 (phba
->cfg_use_adisc
== 0) &&
339 !(ndlp
->nlp_fcp_info
&
341 /* We know we will have to relogin, so
342 * unreglogin the rpi right now to fail
343 * any outstanding I/Os quickly.
345 lpfc_unreg_rpi(phba
, ndlp
);
351 /* free any ndlp's on unused list */
352 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_unused_list
,
354 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
357 /* Setup myDID for link up if we are in pt2pt mode */
358 if (phba
->fc_flag
& FC_PT2PT
) {
360 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
361 lpfc_config_link(phba
, mb
);
362 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
363 if (lpfc_sli_issue_mbox
364 (phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
365 == MBX_NOT_FINISHED
) {
366 mempool_free( mb
, phba
->mbox_mem_pool
);
369 spin_lock_irq(phba
->host
->host_lock
);
370 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
371 spin_unlock_irq(phba
->host
->host_lock
);
373 spin_lock_irq(phba
->host
->host_lock
);
374 phba
->fc_flag
&= ~FC_LBIT
;
375 spin_unlock_irq(phba
->host
->host_lock
);
377 /* Turn off discovery timer if its running */
378 lpfc_can_disctmo(phba
);
380 /* Must process IOCBs on all rings to handle ABORTed I/Os */
385 lpfc_linkup(struct lpfc_hba
* phba
)
387 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
389 spin_lock_irq(phba
->host
->host_lock
);
390 phba
->hba_state
= LPFC_LINK_UP
;
391 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
392 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
393 phba
->fc_flag
|= FC_NDISC_ACTIVE
;
394 phba
->fc_ns_retry
= 0;
395 spin_unlock_irq(phba
->host
->host_lock
);
399 * Clean up old Fabric NLP_FABRIC logins.
401 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nlpunmap_list
,
403 if (ndlp
->nlp_DID
== Fabric_DID
) {
404 /* Take it off current list and free */
405 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
409 /* free any ndlp's on unused list */
410 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_unused_list
,
412 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
419 * This routine handles processing a CLEAR_LA mailbox
420 * command upon completion. It is setup in the LPFC_MBOXQ
421 * as the completion routine when the command is
422 * handed off to the SLI layer.
425 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
427 struct lpfc_sli
*psli
;
433 /* Since we don't do discovery right now, turn these off here */
434 psli
->ring
[psli
->ip_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
435 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
436 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
438 /* Check for error */
439 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
440 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
441 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
442 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
444 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
446 phba
->hba_state
= LPFC_HBA_ERROR
;
450 if (phba
->fc_flag
& FC_ABORT_DISCOVERY
)
453 phba
->num_disc_nodes
= 0;
454 /* go thru NPR list and issue ELS PLOGIs */
455 if (phba
->fc_npr_cnt
) {
456 lpfc_els_disc_plogi(phba
);
459 if(!phba
->num_disc_nodes
) {
460 spin_lock_irq(phba
->host
->host_lock
);
461 phba
->fc_flag
&= ~FC_NDISC_ACTIVE
;
462 spin_unlock_irq(phba
->host
->host_lock
);
465 phba
->hba_state
= LPFC_HBA_READY
;
468 /* Device Discovery completes */
469 lpfc_printf_log(phba
,
472 "%d:0225 Device Discovery completes\n",
475 mempool_free( pmb
, phba
->mbox_mem_pool
);
477 spin_lock_irq(phba
->host
->host_lock
);
478 phba
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
479 if (phba
->fc_flag
& FC_ESTABLISH_LINK
) {
480 phba
->fc_flag
&= ~FC_ESTABLISH_LINK
;
482 spin_unlock_irq(phba
->host
->host_lock
);
484 del_timer_sync(&phba
->fc_estabtmo
);
486 lpfc_can_disctmo(phba
);
488 /* turn on Link Attention interrupts */
489 spin_lock_irq(phba
->host
->host_lock
);
490 psli
->sli_flag
|= LPFC_PROCESS_LA
;
491 control
= readl(phba
->HCregaddr
);
492 control
|= HC_LAINT_ENA
;
493 writel(control
, phba
->HCregaddr
);
494 readl(phba
->HCregaddr
); /* flush */
495 spin_unlock_irq(phba
->host
->host_lock
);
501 lpfc_mbx_cmpl_config_link(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
503 struct lpfc_sli
*psli
;
508 /* Check for error */
510 /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
511 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
512 "%d:0306 CONFIG_LINK mbxStatus error x%x "
514 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
517 phba
->hba_state
= LPFC_HBA_ERROR
;
521 if (phba
->hba_state
== LPFC_LOCAL_CFG_LINK
) {
522 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
523 /* If we are public loop and L bit was set */
524 if ((phba
->fc_flag
& FC_PUBLIC_LOOP
) &&
525 !(phba
->fc_flag
& FC_LBIT
)) {
526 /* Need to wait for FAN - use discovery timer
527 * for timeout. hba_state is identically
528 * LPFC_LOCAL_CFG_LINK while waiting for FAN
530 lpfc_set_disctmo(phba
);
531 mempool_free( pmb
, phba
->mbox_mem_pool
);
536 /* Start discovery by sending a FLOGI hba_state is identically
537 * LPFC_FLOGI while waiting for FLOGI cmpl
539 phba
->hba_state
= LPFC_FLOGI
;
540 lpfc_set_disctmo(phba
);
541 lpfc_initial_flogi(phba
);
542 mempool_free( pmb
, phba
->mbox_mem_pool
);
545 if (phba
->hba_state
== LPFC_FABRIC_CFG_LINK
) {
546 mempool_free( pmb
, phba
->mbox_mem_pool
);
551 /* CONFIG_LINK bad hba state <hba_state> */
552 lpfc_printf_log(phba
,
555 "%d:0200 CONFIG_LINK bad hba state x%x\n",
556 phba
->brd_no
, phba
->hba_state
);
558 if (phba
->hba_state
!= LPFC_CLEAR_LA
) {
559 lpfc_clear_la(phba
, pmb
);
560 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
561 if (lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
562 == MBX_NOT_FINISHED
) {
563 mempool_free( pmb
, phba
->mbox_mem_pool
);
564 lpfc_disc_flush_list(phba
);
565 psli
->ring
[(psli
->ip_ring
)].flag
&=
566 ~LPFC_STOP_IOCB_EVENT
;
567 psli
->ring
[(psli
->fcp_ring
)].flag
&=
568 ~LPFC_STOP_IOCB_EVENT
;
569 psli
->ring
[(psli
->next_ring
)].flag
&=
570 ~LPFC_STOP_IOCB_EVENT
;
571 phba
->hba_state
= LPFC_HBA_READY
;
574 mempool_free( pmb
, phba
->mbox_mem_pool
);
580 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
582 struct lpfc_sli
*psli
= &phba
->sli
;
583 MAILBOX_t
*mb
= &pmb
->mb
;
584 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
587 /* Check for error */
589 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
590 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
591 "%d:0319 READ_SPARAM mbxStatus error x%x "
593 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
596 phba
->hba_state
= LPFC_HBA_ERROR
;
600 memcpy((uint8_t *) & phba
->fc_sparam
, (uint8_t *) mp
->virt
,
601 sizeof (struct serv_parm
));
602 memcpy((uint8_t *) & phba
->fc_nodename
,
603 (uint8_t *) & phba
->fc_sparam
.nodeName
,
604 sizeof (struct lpfc_name
));
605 memcpy((uint8_t *) & phba
->fc_portname
,
606 (uint8_t *) & phba
->fc_sparam
.portName
,
607 sizeof (struct lpfc_name
));
608 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
610 mempool_free( pmb
, phba
->mbox_mem_pool
);
614 pmb
->context1
= NULL
;
615 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
617 if (phba
->hba_state
!= LPFC_CLEAR_LA
) {
618 lpfc_clear_la(phba
, pmb
);
619 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
620 if (lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
621 == MBX_NOT_FINISHED
) {
622 mempool_free( pmb
, phba
->mbox_mem_pool
);
623 lpfc_disc_flush_list(phba
);
624 psli
->ring
[(psli
->ip_ring
)].flag
&=
625 ~LPFC_STOP_IOCB_EVENT
;
626 psli
->ring
[(psli
->fcp_ring
)].flag
&=
627 ~LPFC_STOP_IOCB_EVENT
;
628 psli
->ring
[(psli
->next_ring
)].flag
&=
629 ~LPFC_STOP_IOCB_EVENT
;
630 phba
->hba_state
= LPFC_HBA_READY
;
633 mempool_free( pmb
, phba
->mbox_mem_pool
);
639 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
642 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
;
643 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
644 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
646 spin_lock_irq(phba
->host
->host_lock
);
647 switch(la
->UlnkSpeed
) {
649 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
652 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
655 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
658 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
662 phba
->fc_topology
= la
->topology
;
664 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
665 /* Get Loop Map information */
668 phba
->fc_flag
|= FC_LBIT
;
670 phba
->fc_myDID
= la
->granted_AL_PA
;
671 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
674 phba
->alpa_map
[0] = 0;
676 if (phba
->cfg_log_verbose
& LOG_LINK_EVENT
) {
687 numalpa
= phba
->alpa_map
[0];
689 while (j
< numalpa
) {
690 memset(un
.pamap
, 0, 16);
691 for (k
= 1; j
< numalpa
; k
++) {
693 phba
->alpa_map
[j
+ 1];
698 /* Link Up Event ALPA map */
699 lpfc_printf_log(phba
,
702 "%d:1304 Link Up Event "
703 "ALPA map Data: x%x "
706 un
.pa
.wd1
, un
.pa
.wd2
,
707 un
.pa
.wd3
, un
.pa
.wd4
);
712 phba
->fc_myDID
= phba
->fc_pref_DID
;
713 phba
->fc_flag
|= FC_LBIT
;
715 spin_unlock_irq(phba
->host
->host_lock
);
719 lpfc_read_sparam(phba
, sparam_mbox
);
720 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
721 lpfc_sli_issue_mbox(phba
, sparam_mbox
,
722 (MBX_NOWAIT
| MBX_STOP_IOCB
));
726 phba
->hba_state
= LPFC_LOCAL_CFG_LINK
;
727 lpfc_config_link(phba
, cfglink_mbox
);
728 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_config_link
;
729 lpfc_sli_issue_mbox(phba
, cfglink_mbox
,
730 (MBX_NOWAIT
| MBX_STOP_IOCB
));
735 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
) {
737 struct lpfc_sli
*psli
= &phba
->sli
;
741 /* turn on Link Attention interrupts - no CLEAR_LA needed */
742 spin_lock_irq(phba
->host
->host_lock
);
743 psli
->sli_flag
|= LPFC_PROCESS_LA
;
744 control
= readl(phba
->HCregaddr
);
745 control
|= HC_LAINT_ENA
;
746 writel(control
, phba
->HCregaddr
);
747 readl(phba
->HCregaddr
); /* flush */
748 spin_unlock_irq(phba
->host
->host_lock
);
752 * This routine handles processing a READ_LA mailbox
753 * command upon completion. It is setup in the LPFC_MBOXQ
754 * as the completion routine when the command is
755 * handed off to the SLI layer.
758 lpfc_mbx_cmpl_read_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
761 MAILBOX_t
*mb
= &pmb
->mb
;
762 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
764 /* Check for error */
766 lpfc_printf_log(phba
,
769 "%d:1307 READ_LA mbox error x%x state x%x\n",
771 mb
->mbxStatus
, phba
->hba_state
);
772 lpfc_mbx_issue_link_down(phba
);
773 phba
->hba_state
= LPFC_HBA_ERROR
;
774 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
777 la
= (READ_LA_VAR
*) & pmb
->mb
.un
.varReadLA
;
779 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
781 if (((phba
->fc_eventTag
+ 1) < la
->eventTag
) ||
782 (phba
->fc_eventTag
== la
->eventTag
)) {
783 phba
->fc_stat
.LinkMultiEvent
++;
784 if (la
->attType
== AT_LINK_UP
) {
785 if (phba
->fc_eventTag
!= 0)
790 phba
->fc_eventTag
= la
->eventTag
;
792 if (la
->attType
== AT_LINK_UP
) {
793 phba
->fc_stat
.LinkUp
++;
794 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
795 "%d:1303 Link Up Event x%x received "
796 "Data: x%x x%x x%x x%x\n",
797 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
798 la
->granted_AL_PA
, la
->UlnkSpeed
,
800 lpfc_mbx_process_link_up(phba
, la
);
802 phba
->fc_stat
.LinkDown
++;
803 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
804 "%d:1305 Link Down Event x%x received "
805 "Data: x%x x%x x%x\n",
806 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
807 phba
->hba_state
, phba
->fc_flag
);
808 lpfc_mbx_issue_link_down(phba
);
811 lpfc_mbx_cmpl_read_la_free_mbuf
:
812 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
814 mempool_free(pmb
, phba
->mbox_mem_pool
);
819 * This routine handles processing a REG_LOGIN mailbox
820 * command upon completion. It is setup in the LPFC_MBOXQ
821 * as the completion routine when the command is
822 * handed off to the SLI layer.
825 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
827 struct lpfc_sli
*psli
;
829 struct lpfc_dmabuf
*mp
;
830 struct lpfc_nodelist
*ndlp
;
835 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
836 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
838 pmb
->context1
= NULL
;
840 /* Good status, call state machine */
841 lpfc_disc_state_machine(phba
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
842 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
844 mempool_free( pmb
, phba
->mbox_mem_pool
);
850 * This routine handles processing a Fabric REG_LOGIN mailbox
851 * command upon completion. It is setup in the LPFC_MBOXQ
852 * as the completion routine when the command is
853 * handed off to the SLI layer.
856 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
858 struct lpfc_sli
*psli
;
860 struct lpfc_dmabuf
*mp
;
861 struct lpfc_nodelist
*ndlp
;
862 struct lpfc_nodelist
*ndlp_fdmi
;
868 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
869 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
872 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
874 mempool_free( pmb
, phba
->mbox_mem_pool
);
875 mempool_free( ndlp
, phba
->nlp_mem_pool
);
877 /* FLOGI failed, so just use loop map to make discovery list */
878 lpfc_disc_list_loopmap(phba
);
880 /* Start discovery */
881 lpfc_disc_start(phba
);
885 pmb
->context1
= NULL
;
887 if (ndlp
->nlp_rpi
!= 0)
888 lpfc_findnode_remove_rpi(phba
, ndlp
->nlp_rpi
);
889 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
890 lpfc_addnode_rpi(phba
, ndlp
, ndlp
->nlp_rpi
);
891 ndlp
->nlp_type
|= NLP_FABRIC
;
892 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
893 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
895 if (phba
->hba_state
== LPFC_FABRIC_CFG_LINK
) {
896 /* This NPort has been assigned an NPort_ID by the fabric as a
897 * result of the completed fabric login. Issue a State Change
898 * Registration (SCR) ELS request to the fabric controller
899 * (SCR_DID) so that this NPort gets RSCN events from the
902 lpfc_issue_els_scr(phba
, SCR_DID
, 0);
904 /* Allocate a new node instance. If the pool is empty, just
905 * start the discovery process and skip the Nameserver login
906 * process. This is attempted again later on. Otherwise, issue
907 * a Port Login (PLOGI) to the NameServer
909 if ((ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
))
911 lpfc_disc_start(phba
);
913 lpfc_nlp_init(phba
, ndlp
, NameServer_DID
);
914 ndlp
->nlp_type
|= NLP_FABRIC
;
915 ndlp
->nlp_state
= NLP_STE_PLOGI_ISSUE
;
916 lpfc_nlp_list(phba
, ndlp
, NLP_PLOGI_LIST
);
917 lpfc_issue_els_plogi(phba
, ndlp
, 0);
918 if (phba
->cfg_fdmi_on
) {
919 if ((ndlp_fdmi
= mempool_alloc(
922 lpfc_nlp_init(phba
, ndlp_fdmi
,
924 ndlp_fdmi
->nlp_type
|= NLP_FABRIC
;
925 ndlp_fdmi
->nlp_state
=
927 lpfc_issue_els_plogi(phba
, ndlp_fdmi
,
934 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
936 mempool_free( pmb
, phba
->mbox_mem_pool
);
942 * This routine handles processing a NameServer REG_LOGIN mailbox
943 * command upon completion. It is setup in the LPFC_MBOXQ
944 * as the completion routine when the command is
945 * handed off to the SLI layer.
948 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
950 struct lpfc_sli
*psli
;
952 struct lpfc_dmabuf
*mp
;
953 struct lpfc_nodelist
*ndlp
;
958 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
959 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
962 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
964 mempool_free( pmb
, phba
->mbox_mem_pool
);
965 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
967 /* RegLogin failed, so just use loop map to make discovery
969 lpfc_disc_list_loopmap(phba
);
971 /* Start discovery */
972 lpfc_disc_start(phba
);
976 pmb
->context1
= NULL
;
978 if (ndlp
->nlp_rpi
!= 0)
979 lpfc_findnode_remove_rpi(phba
, ndlp
->nlp_rpi
);
980 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
981 lpfc_addnode_rpi(phba
, ndlp
, ndlp
->nlp_rpi
);
982 ndlp
->nlp_type
|= NLP_FABRIC
;
983 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
984 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
986 if (phba
->hba_state
< LPFC_HBA_READY
) {
987 /* Link up discovery requires Fabrib registration. */
988 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RNN_ID
);
989 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RSNN_NN
);
990 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RFT_ID
);
993 phba
->fc_ns_retry
= 0;
994 /* Good status, issue CT Request to NameServer */
995 if (lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
)) {
996 /* Cannot issue NameServer Query, so finish up discovery */
997 lpfc_disc_start(phba
);
1000 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1002 mempool_free( pmb
, phba
->mbox_mem_pool
);
1008 lpfc_register_remote_port(struct lpfc_hba
* phba
,
1009 struct lpfc_nodelist
* ndlp
)
1011 struct fc_rport
*rport
;
1012 struct lpfc_rport_data
*rdata
;
1013 struct fc_rport_identifiers rport_ids
;
1016 /* Remote port has reappeared. Re-register w/ FC transport */
1017 memcpy(&wwn
, &ndlp
->nlp_nodename
, sizeof(uint64_t));
1018 rport_ids
.node_name
= be64_to_cpu(wwn
);
1019 memcpy(&wwn
, &ndlp
->nlp_portname
, sizeof(uint64_t));
1020 rport_ids
.port_name
= be64_to_cpu(wwn
);
1021 rport_ids
.port_id
= ndlp
->nlp_DID
;
1022 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
1023 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
1024 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
1025 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
1026 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
1028 ndlp
->rport
= rport
= fc_remote_port_add(phba
->host
, 0, &rport_ids
);
1030 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
1031 "Warning: fc_remote_port_add failed\n");
1035 /* initialize static port data */
1036 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
1037 rport
->supported_classes
= ndlp
->nlp_class_sup
;
1038 if ((rport
->scsi_target_id
!= -1) &&
1039 (rport
->scsi_target_id
< MAX_FCP_TARGET
)) {
1040 ndlp
->nlp_sid
= rport
->scsi_target_id
;
1042 rdata
= rport
->dd_data
;
1043 rdata
->pnode
= ndlp
;
1049 lpfc_nlp_list(struct lpfc_hba
* phba
, struct lpfc_nodelist
* nlp
, int list
)
1051 enum { none
, unmapped
, mapped
} rport_add
= none
, rport_del
= none
;
1052 struct lpfc_sli
*psli
;
1055 /* Sanity check to ensure we are not moving to / from the same list */
1056 if ((nlp
->nlp_flag
& NLP_LIST_MASK
) == list
) {
1057 if (list
!= NLP_NO_LIST
)
1061 switch(nlp
->nlp_flag
& NLP_LIST_MASK
) {
1062 case NLP_NO_LIST
: /* Not on any list */
1064 case NLP_UNUSED_LIST
:
1065 phba
->fc_unused_cnt
--;
1066 list_del(&nlp
->nlp_listp
);
1068 case NLP_PLOGI_LIST
:
1069 phba
->fc_plogi_cnt
--;
1070 list_del(&nlp
->nlp_listp
);
1072 case NLP_ADISC_LIST
:
1073 phba
->fc_adisc_cnt
--;
1074 list_del(&nlp
->nlp_listp
);
1076 case NLP_REGLOGIN_LIST
:
1077 phba
->fc_reglogin_cnt
--;
1078 list_del(&nlp
->nlp_listp
);
1081 phba
->fc_prli_cnt
--;
1082 list_del(&nlp
->nlp_listp
);
1084 case NLP_UNMAPPED_LIST
:
1085 phba
->fc_unmap_cnt
--;
1086 list_del(&nlp
->nlp_listp
);
1087 spin_lock_irq(phba
->host
->host_lock
);
1088 nlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
1089 nlp
->nlp_type
&= ~NLP_FC_NODE
;
1090 spin_unlock_irq(phba
->host
->host_lock
);
1091 phba
->nport_event_cnt
++;
1093 rport_del
= unmapped
;
1095 case NLP_MAPPED_LIST
:
1097 list_del(&nlp
->nlp_listp
);
1098 phba
->nport_event_cnt
++;
1104 list_del(&nlp
->nlp_listp
);
1105 /* Stop delay tmo if taking node off NPR list */
1106 if ((nlp
->nlp_flag
& NLP_DELAY_TMO
) &&
1107 (list
!= NLP_NPR_LIST
)) {
1108 spin_lock_irq(phba
->host
->host_lock
);
1109 nlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
1110 spin_unlock_irq(phba
->host
->host_lock
);
1111 del_timer_sync(&nlp
->nlp_delayfunc
);
1112 if (!list_empty(&nlp
->els_retry_evt
.evt_listp
))
1113 list_del_init(&nlp
->els_retry_evt
.evt_listp
);
1118 spin_lock_irq(phba
->host
->host_lock
);
1119 nlp
->nlp_flag
&= ~NLP_LIST_MASK
;
1120 spin_unlock_irq(phba
->host
->host_lock
);
1122 /* Add NPort <did> to <num> list */
1123 lpfc_printf_log(phba
,
1126 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1128 nlp
->nlp_DID
, list
, nlp
->nlp_flag
);
1131 case NLP_NO_LIST
: /* No list, just remove it */
1132 lpfc_nlp_remove(phba
, nlp
);
1134 case NLP_UNUSED_LIST
:
1135 spin_lock_irq(phba
->host
->host_lock
);
1136 nlp
->nlp_flag
|= list
;
1137 spin_unlock_irq(phba
->host
->host_lock
);
1138 /* Put it at the end of the unused list */
1139 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_unused_list
);
1140 phba
->fc_unused_cnt
++;
1142 case NLP_PLOGI_LIST
:
1143 spin_lock_irq(phba
->host
->host_lock
);
1144 nlp
->nlp_flag
|= list
;
1145 spin_unlock_irq(phba
->host
->host_lock
);
1146 /* Put it at the end of the plogi list */
1147 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_plogi_list
);
1148 phba
->fc_plogi_cnt
++;
1150 case NLP_ADISC_LIST
:
1151 spin_lock_irq(phba
->host
->host_lock
);
1152 nlp
->nlp_flag
|= list
;
1153 spin_unlock_irq(phba
->host
->host_lock
);
1154 /* Put it at the end of the adisc list */
1155 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_adisc_list
);
1156 phba
->fc_adisc_cnt
++;
1158 case NLP_REGLOGIN_LIST
:
1159 spin_lock_irq(phba
->host
->host_lock
);
1160 nlp
->nlp_flag
|= list
;
1161 spin_unlock_irq(phba
->host
->host_lock
);
1162 /* Put it at the end of the reglogin list */
1163 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_reglogin_list
);
1164 phba
->fc_reglogin_cnt
++;
1167 spin_lock_irq(phba
->host
->host_lock
);
1168 nlp
->nlp_flag
|= list
;
1169 spin_unlock_irq(phba
->host
->host_lock
);
1170 /* Put it at the end of the prli list */
1171 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_prli_list
);
1172 phba
->fc_prli_cnt
++;
1174 case NLP_UNMAPPED_LIST
:
1175 rport_add
= unmapped
;
1176 /* ensure all vestiges of "mapped" significance are gone */
1177 nlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
1178 spin_lock_irq(phba
->host
->host_lock
);
1179 nlp
->nlp_flag
|= list
;
1180 spin_unlock_irq(phba
->host
->host_lock
);
1181 /* Put it at the end of the unmap list */
1182 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_nlpunmap_list
);
1183 phba
->fc_unmap_cnt
++;
1184 phba
->nport_event_cnt
++;
1185 /* stop nodev tmo if running */
1186 if (nlp
->nlp_flag
& NLP_NODEV_TMO
) {
1187 spin_lock_irq(phba
->host
->host_lock
);
1188 nlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
1189 spin_unlock_irq(phba
->host
->host_lock
);
1190 del_timer_sync(&nlp
->nlp_tmofunc
);
1191 if (!list_empty(&nlp
->nodev_timeout_evt
.evt_listp
))
1192 list_del_init(&nlp
->nodev_timeout_evt
.
1196 nlp
->nlp_type
|= NLP_FC_NODE
;
1198 case NLP_MAPPED_LIST
:
1200 spin_lock_irq(phba
->host
->host_lock
);
1201 nlp
->nlp_flag
|= list
;
1202 spin_unlock_irq(phba
->host
->host_lock
);
1203 /* Put it at the end of the map list */
1204 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_nlpmap_list
);
1206 phba
->nport_event_cnt
++;
1207 /* stop nodev tmo if running */
1208 if (nlp
->nlp_flag
& NLP_NODEV_TMO
) {
1209 nlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
1210 del_timer_sync(&nlp
->nlp_tmofunc
);
1211 if (!list_empty(&nlp
->nodev_timeout_evt
.evt_listp
))
1212 list_del_init(&nlp
->nodev_timeout_evt
.
1218 spin_lock_irq(phba
->host
->host_lock
);
1219 nlp
->nlp_flag
|= list
;
1220 spin_unlock_irq(phba
->host
->host_lock
);
1221 /* Put it at the end of the npr list */
1222 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_npr_list
);
1226 * Sanity check for Fabric entity.
1227 * Set nodev_tmo for NPR state, for Fabric use 1 sec.
1229 if (nlp
->nlp_type
& NLP_FABRIC
) {
1230 mod_timer(&nlp
->nlp_tmofunc
, jiffies
+ HZ
);
1233 mod_timer(&nlp
->nlp_tmofunc
,
1234 jiffies
+ HZ
* phba
->cfg_nodev_tmo
);
1236 spin_lock_irq(phba
->host
->host_lock
);
1237 nlp
->nlp_flag
|= NLP_NODEV_TMO
;
1238 nlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
1239 spin_unlock_irq(phba
->host
->host_lock
);
1246 * We make all the calls into the transport after we have
1247 * moved the node between lists. This so that we don't
1248 * release the lock while in-between lists.
1251 /* Don't upcall midlayer if we're unloading */
1252 if (!(phba
->fc_flag
& FC_UNLOADING
)) {
1254 * We revalidate the rport pointer as the "add" function
1255 * may have removed the remote port.
1257 if ((rport_del
!= none
) && nlp
->rport
)
1258 fc_remote_port_block(nlp
->rport
);
1260 if (rport_add
!= none
) {
1262 * Tell the fc transport about the port, if we haven't
1263 * already. If we have, and it's a scsi entity, be
1264 * sure to unblock any attached scsi devices
1267 lpfc_register_remote_port(phba
, nlp
);
1269 fc_remote_port_unblock(nlp
->rport
);
1272 * if we added to Mapped list, but the remote port
1273 * registration failed or assigned a target id outside
1274 * our presentable range - move the node to the
1277 if ((rport_add
== mapped
) &&
1279 (nlp
->rport
->scsi_target_id
== -1) ||
1280 (nlp
->rport
->scsi_target_id
>= MAX_FCP_TARGET
))) {
1281 nlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
1282 spin_lock_irq(phba
->host
->host_lock
);
1283 nlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
1284 spin_unlock_irq(phba
->host
->host_lock
);
1285 lpfc_nlp_list(phba
, nlp
, NLP_UNMAPPED_LIST
);
1293 * Start / ReStart rescue timer for Discovery / RSCN handling
1296 lpfc_set_disctmo(struct lpfc_hba
* phba
)
1300 tmo
= ((phba
->fc_ratov
* 2) + 1);
1302 mod_timer(&phba
->fc_disctmo
, jiffies
+ HZ
* tmo
);
1303 spin_lock_irq(phba
->host
->host_lock
);
1304 phba
->fc_flag
|= FC_DISC_TMO
;
1305 spin_unlock_irq(phba
->host
->host_lock
);
1307 /* Start Discovery Timer state <hba_state> */
1308 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1309 "%d:0247 Start Discovery Timer state x%x "
1310 "Data: x%x x%lx x%x x%x\n",
1312 phba
->hba_state
, tmo
, (unsigned long)&phba
->fc_disctmo
,
1313 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1319 * Cancel rescue timer for Discovery / RSCN handling
1322 lpfc_can_disctmo(struct lpfc_hba
* phba
)
1324 /* Turn off discovery timer if its running */
1325 if (phba
->fc_flag
& FC_DISC_TMO
) {
1326 spin_lock_irq(phba
->host
->host_lock
);
1327 phba
->fc_flag
&= ~FC_DISC_TMO
;
1328 spin_unlock_irq(phba
->host
->host_lock
);
1329 del_timer_sync(&phba
->fc_disctmo
);
1330 phba
->work_hba_events
&= ~WORKER_DISC_TMO
;
1333 /* Cancel Discovery Timer state <hba_state> */
1334 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1335 "%d:0248 Cancel Discovery Timer state x%x "
1336 "Data: x%x x%x x%x\n",
1337 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
1338 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1344 * Check specified ring for outstanding IOCB on the SLI queue
1345 * Return true if iocb matches the specified nport
1348 lpfc_check_sli_ndlp(struct lpfc_hba
* phba
,
1349 struct lpfc_sli_ring
* pring
,
1350 struct lpfc_iocbq
* iocb
, struct lpfc_nodelist
* ndlp
)
1352 struct lpfc_sli
*psli
;
1357 if (pring
->ringno
== LPFC_ELS_RING
) {
1358 switch (icmd
->ulpCommand
) {
1359 case CMD_GEN_REQUEST64_CR
:
1360 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
)
1362 case CMD_ELS_REQUEST64_CR
:
1363 case CMD_XMIT_ELS_RSP64_CX
:
1364 if (iocb
->context1
== (uint8_t *) ndlp
)
1367 } else if (pring
->ringno
== psli
->ip_ring
) {
1369 } else if (pring
->ringno
== psli
->fcp_ring
) {
1370 /* Skip match check if waiting to relogin to FCP target */
1371 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
1372 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
1375 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
1378 } else if (pring
->ringno
== psli
->next_ring
) {
1385 * Free resources / clean up outstanding I/Os
1386 * associated with nlp_rpi in the LPFC_NODELIST entry.
1389 lpfc_no_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1391 struct lpfc_sli
*psli
;
1392 struct lpfc_sli_ring
*pring
;
1393 struct lpfc_iocbq
*iocb
, *next_iocb
;
1398 * Everything that matches on txcmplq will be returned
1399 * by firmware with a no rpi error.
1402 rpi
= ndlp
->nlp_rpi
;
1404 /* Now process each ring */
1405 for (i
= 0; i
< psli
->num_rings
; i
++) {
1406 pring
= &psli
->ring
[i
];
1408 spin_lock_irq(phba
->host
->host_lock
);
1409 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
1412 * Check to see if iocb matches the nport we are
1415 if ((lpfc_check_sli_ndlp
1416 (phba
, pring
, iocb
, ndlp
))) {
1417 /* It matches, so deque and call compl
1419 list_del(&iocb
->list
);
1421 if (iocb
->iocb_cmpl
) {
1424 IOSTAT_LOCAL_REJECT
;
1425 icmd
->un
.ulpWord
[4] =
1427 spin_unlock_irq(phba
->host
->
1429 (iocb
->iocb_cmpl
) (phba
,
1431 spin_lock_irq(phba
->host
->
1434 list_add_tail(&iocb
->list
,
1435 &phba
->lpfc_iocb_list
);
1439 spin_unlock_irq(phba
->host
->host_lock
);
1447 * Free rpi associated with LPFC_NODELIST entry.
1448 * This routine is called from lpfc_freenode(), when we are removing
1449 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1450 * LOGO that completes successfully, and we are waiting to PLOGI back
1451 * to the remote NPort. In addition, it is called after we receive
1452 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1453 * we are waiting to PLOGI back to the remote NPort.
1456 lpfc_unreg_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1461 if (ndlp
->nlp_rpi
) {
1462 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
1463 lpfc_unreg_login(phba
, ndlp
->nlp_rpi
, mbox
);
1464 mbox
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
1465 rc
= lpfc_sli_issue_mbox
1466 (phba
, mbox
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
1467 if (rc
== MBX_NOT_FINISHED
)
1468 mempool_free( mbox
, phba
->mbox_mem_pool
);
1470 lpfc_findnode_remove_rpi(phba
, ndlp
->nlp_rpi
);
1471 lpfc_no_rpi(phba
, ndlp
);
1479 * Free resources associated with LPFC_NODELIST entry
1480 * so it can be freed.
1483 lpfc_freenode(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1486 LPFC_MBOXQ_t
*nextmb
;
1487 struct lpfc_dmabuf
*mp
;
1488 struct fc_rport
*rport
;
1490 /* Cleanup node for NPort <nlp_DID> */
1491 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1492 "%d:0900 Cleanup node for NPort x%x "
1493 "Data: x%x x%x x%x\n",
1494 phba
->brd_no
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
1495 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
1497 lpfc_nlp_list(phba
, ndlp
, NLP_JUST_DQ
);
1500 * if unloading the driver - just leave the remote port in place.
1501 * The driver unload will force the attached devices to detach
1502 * and flush cache's w/o generating flush errors.
1504 if ((ndlp
->rport
) && !(phba
->fc_flag
& FC_UNLOADING
)) {
1505 rport
= ndlp
->rport
;
1507 fc_remote_port_unblock(rport
);
1508 fc_remote_port_delete(rport
);
1509 ndlp
->nlp_sid
= NLP_NO_SID
;
1512 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1513 if ((mb
= phba
->sli
.mbox_active
)) {
1514 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1515 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1516 mb
->context2
= NULL
;
1517 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1520 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
1521 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1522 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1523 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
1525 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1528 list_del(&mb
->list
);
1529 mempool_free(mb
, phba
->mbox_mem_pool
);
1533 lpfc_els_abort(phba
,ndlp
,0);
1534 spin_lock_irq(phba
->host
->host_lock
);
1535 ndlp
->nlp_flag
&= ~(NLP_NODEV_TMO
|NLP_DELAY_TMO
);
1536 spin_unlock_irq(phba
->host
->host_lock
);
1537 del_timer_sync(&ndlp
->nlp_tmofunc
);
1539 del_timer_sync(&ndlp
->nlp_delayfunc
);
1541 if (!list_empty(&ndlp
->nodev_timeout_evt
.evt_listp
))
1542 list_del_init(&ndlp
->nodev_timeout_evt
.evt_listp
);
1543 if (!list_empty(&ndlp
->els_retry_evt
.evt_listp
))
1544 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
1546 lpfc_unreg_rpi(phba
, ndlp
);
1552 * Check to see if we can free the nlp back to the freelist.
1553 * If we are in the middle of using the nlp in the discovery state
1554 * machine, defer the free till we reach the end of the state machine.
1557 lpfc_nlp_remove(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1559 if (ndlp
->nlp_flag
& NLP_NODEV_TMO
) {
1560 spin_lock_irq(phba
->host
->host_lock
);
1561 ndlp
->nlp_flag
&= ~NLP_NODEV_TMO
;
1562 spin_unlock_irq(phba
->host
->host_lock
);
1563 del_timer_sync(&ndlp
->nlp_tmofunc
);
1564 if (!list_empty(&ndlp
->nodev_timeout_evt
.evt_listp
))
1565 list_del_init(&ndlp
->nodev_timeout_evt
.evt_listp
);
1570 if (ndlp
->nlp_flag
& NLP_DELAY_TMO
) {
1571 spin_lock_irq(phba
->host
->host_lock
);
1572 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
1573 spin_unlock_irq(phba
->host
->host_lock
);
1574 del_timer_sync(&ndlp
->nlp_delayfunc
);
1575 if (!list_empty(&ndlp
->els_retry_evt
.evt_listp
))
1576 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
1579 if (ndlp
->nlp_disc_refcnt
) {
1580 spin_lock_irq(phba
->host
->host_lock
);
1581 ndlp
->nlp_flag
|= NLP_DELAY_REMOVE
;
1582 spin_unlock_irq(phba
->host
->host_lock
);
1585 lpfc_freenode(phba
, ndlp
);
1586 mempool_free( ndlp
, phba
->nlp_mem_pool
);
1592 lpfc_matchdid(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
, uint32_t did
)
1598 if (did
== Bcast_DID
)
1601 if (ndlp
->nlp_DID
== 0) {
1605 /* First check for Direct match */
1606 if (ndlp
->nlp_DID
== did
)
1609 /* Next check for area/domain identically equals 0 match */
1610 mydid
.un
.word
= phba
->fc_myDID
;
1611 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
1615 matchdid
.un
.word
= did
;
1616 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
1617 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
1618 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
1619 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
1620 if ((ndlpdid
.un
.b
.domain
== 0) &&
1621 (ndlpdid
.un
.b
.area
== 0)) {
1622 if (ndlpdid
.un
.b
.id
)
1628 matchdid
.un
.word
= ndlp
->nlp_DID
;
1629 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
1630 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
1631 if ((matchdid
.un
.b
.domain
== 0) &&
1632 (matchdid
.un
.b
.area
== 0)) {
1633 if (matchdid
.un
.b
.id
)
1641 /* Search for a nodelist entry on a specific list */
1642 struct lpfc_nodelist
*
1643 lpfc_findnode_did(struct lpfc_hba
* phba
, uint32_t order
, uint32_t did
)
1645 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1648 if (order
& NLP_SEARCH_UNMAPPED
) {
1649 list_for_each_entry_safe(ndlp
, next_ndlp
,
1650 &phba
->fc_nlpunmap_list
, nlp_listp
) {
1651 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1652 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1653 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1654 ((uint32_t) ndlp
->nlp_type
<< 8) |
1655 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1656 /* FIND node DID unmapped */
1657 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1658 "%d:0929 FIND node DID unmapped"
1659 " Data: x%p x%x x%x x%x\n",
1661 ndlp
, ndlp
->nlp_DID
,
1662 ndlp
->nlp_flag
, data1
);
1668 if (order
& NLP_SEARCH_MAPPED
) {
1669 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nlpmap_list
,
1671 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1673 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1674 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1675 ((uint32_t) ndlp
->nlp_type
<< 8) |
1676 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1677 /* FIND node DID mapped */
1678 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1679 "%d:0930 FIND node DID mapped "
1680 "Data: x%p x%x x%x x%x\n",
1682 ndlp
, ndlp
->nlp_DID
,
1683 ndlp
->nlp_flag
, data1
);
1689 if (order
& NLP_SEARCH_PLOGI
) {
1690 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_plogi_list
,
1692 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1694 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1695 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1696 ((uint32_t) ndlp
->nlp_type
<< 8) |
1697 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1698 /* LOG change to PLOGI */
1699 /* FIND node DID plogi */
1700 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1701 "%d:0908 FIND node DID plogi "
1702 "Data: x%p x%x x%x x%x\n",
1704 ndlp
, ndlp
->nlp_DID
,
1705 ndlp
->nlp_flag
, data1
);
1711 if (order
& NLP_SEARCH_ADISC
) {
1712 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
1714 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1716 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1717 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1718 ((uint32_t) ndlp
->nlp_type
<< 8) |
1719 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1720 /* LOG change to ADISC */
1721 /* FIND node DID adisc */
1722 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1723 "%d:0931 FIND node DID adisc "
1724 "Data: x%p x%x x%x x%x\n",
1726 ndlp
, ndlp
->nlp_DID
,
1727 ndlp
->nlp_flag
, data1
);
1733 if (order
& NLP_SEARCH_REGLOGIN
) {
1734 list_for_each_entry_safe(ndlp
, next_ndlp
,
1735 &phba
->fc_reglogin_list
, nlp_listp
) {
1736 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1738 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1739 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1740 ((uint32_t) ndlp
->nlp_type
<< 8) |
1741 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1742 /* LOG change to REGLOGIN */
1743 /* FIND node DID reglogin */
1744 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1745 "%d:0931 FIND node DID reglogin"
1746 " Data: x%p x%x x%x x%x\n",
1748 ndlp
, ndlp
->nlp_DID
,
1749 ndlp
->nlp_flag
, data1
);
1755 if (order
& NLP_SEARCH_PRLI
) {
1756 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_prli_list
,
1758 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1760 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1761 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1762 ((uint32_t) ndlp
->nlp_type
<< 8) |
1763 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1764 /* LOG change to PRLI */
1765 /* FIND node DID prli */
1766 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1767 "%d:0931 FIND node DID prli "
1768 "Data: x%p x%x x%x x%x\n",
1770 ndlp
, ndlp
->nlp_DID
,
1771 ndlp
->nlp_flag
, data1
);
1777 if (order
& NLP_SEARCH_NPR
) {
1778 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
1780 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1782 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1783 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1784 ((uint32_t) ndlp
->nlp_type
<< 8) |
1785 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1786 /* LOG change to NPR */
1787 /* FIND node DID npr */
1788 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1789 "%d:0931 FIND node DID npr "
1790 "Data: x%p x%x x%x x%x\n",
1792 ndlp
, ndlp
->nlp_DID
,
1793 ndlp
->nlp_flag
, data1
);
1799 if (order
& NLP_SEARCH_UNUSED
) {
1800 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
1802 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1804 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1805 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1806 ((uint32_t) ndlp
->nlp_type
<< 8) |
1807 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1808 /* LOG change to UNUSED */
1809 /* FIND node DID unused */
1810 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1811 "%d:0931 FIND node DID unused "
1812 "Data: x%p x%x x%x x%x\n",
1814 ndlp
, ndlp
->nlp_DID
,
1815 ndlp
->nlp_flag
, data1
);
1821 /* FIND node did <did> NOT FOUND */
1822 lpfc_printf_log(phba
,
1825 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1826 phba
->brd_no
, did
, order
);
1828 /* no match found */
1832 struct lpfc_nodelist
*
1833 lpfc_setup_disc_node(struct lpfc_hba
* phba
, uint32_t did
)
1835 struct lpfc_nodelist
*ndlp
;
1838 if ((ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, did
)) == 0) {
1839 if ((phba
->hba_state
== LPFC_HBA_READY
) &&
1840 ((lpfc_rscn_payload_check(phba
, did
) == 0)))
1842 ndlp
= (struct lpfc_nodelist
*)
1843 mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
1846 lpfc_nlp_init(phba
, ndlp
, did
);
1847 ndlp
->nlp_state
= NLP_STE_NPR_NODE
;
1848 lpfc_nlp_list(phba
, ndlp
, NLP_NPR_LIST
);
1849 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1852 if ((phba
->hba_state
== LPFC_HBA_READY
) &&
1853 (phba
->fc_flag
& FC_RSCN_MODE
)) {
1854 if (lpfc_rscn_payload_check(phba
, did
)) {
1855 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1858 ndlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
1863 flg
= ndlp
->nlp_flag
& NLP_LIST_MASK
;
1864 if ((flg
== NLP_ADISC_LIST
) ||
1865 (flg
== NLP_PLOGI_LIST
)) {
1868 ndlp
->nlp_state
= NLP_STE_NPR_NODE
;
1869 lpfc_nlp_list(phba
, ndlp
, NLP_NPR_LIST
);
1870 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1875 /* Build a list of nodes to discover based on the loopmap */
1877 lpfc_disc_list_loopmap(struct lpfc_hba
* phba
)
1880 uint32_t alpa
, index
;
1882 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1885 if (phba
->fc_topology
!= TOPOLOGY_LOOP
) {
1889 /* Check for loop map present or not */
1890 if (phba
->alpa_map
[0]) {
1891 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
1892 alpa
= phba
->alpa_map
[j
];
1894 if (((phba
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0)) {
1897 lpfc_setup_disc_node(phba
, alpa
);
1900 /* No alpamap, so try all alpa's */
1901 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
1902 /* If cfg_scan_down is set, start from highest
1903 * ALPA (0xef) to lowest (0x1).
1905 if (phba
->cfg_scan_down
)
1908 index
= FC_MAXLOOP
- j
- 1;
1909 alpa
= lpfcAlpaArray
[index
];
1910 if ((phba
->fc_myDID
& 0xff) == alpa
) {
1914 lpfc_setup_disc_node(phba
, alpa
);
1920 /* Start Link up / RSCN discovery on NPR list */
1922 lpfc_disc_start(struct lpfc_hba
* phba
)
1924 struct lpfc_sli
*psli
;
1926 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1927 uint32_t did_changed
, num_sent
;
1928 uint32_t clear_la_pending
;
1933 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1936 if (phba
->hba_state
== LPFC_CLEAR_LA
)
1937 clear_la_pending
= 1;
1939 clear_la_pending
= 0;
1941 if (phba
->hba_state
< LPFC_HBA_READY
) {
1942 phba
->hba_state
= LPFC_DISC_AUTH
;
1944 lpfc_set_disctmo(phba
);
1946 if (phba
->fc_prevDID
== phba
->fc_myDID
) {
1951 phba
->fc_prevDID
= phba
->fc_myDID
;
1952 phba
->num_disc_nodes
= 0;
1954 /* Start Discovery state <hba_state> */
1955 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1956 "%d:0202 Start Discovery hba state x%x "
1957 "Data: x%x x%x x%x\n",
1958 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
1959 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1961 /* If our did changed, we MUST do PLOGI */
1962 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
1964 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
1966 spin_lock_irq(phba
->host
->host_lock
);
1967 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
1968 spin_unlock_irq(phba
->host
->host_lock
);
1973 /* First do ADISCs - if any */
1974 num_sent
= lpfc_els_disc_adisc(phba
);
1979 if ((phba
->hba_state
< LPFC_HBA_READY
) && (!clear_la_pending
)) {
1980 /* If we get here, there is nothing to ADISC */
1981 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
1982 phba
->hba_state
= LPFC_CLEAR_LA
;
1983 lpfc_clear_la(phba
, mbox
);
1984 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
1985 rc
= lpfc_sli_issue_mbox(phba
, mbox
,
1986 (MBX_NOWAIT
| MBX_STOP_IOCB
));
1987 if (rc
== MBX_NOT_FINISHED
) {
1988 mempool_free( mbox
, phba
->mbox_mem_pool
);
1989 lpfc_disc_flush_list(phba
);
1990 psli
->ring
[(psli
->ip_ring
)].flag
&=
1991 ~LPFC_STOP_IOCB_EVENT
;
1992 psli
->ring
[(psli
->fcp_ring
)].flag
&=
1993 ~LPFC_STOP_IOCB_EVENT
;
1994 psli
->ring
[(psli
->next_ring
)].flag
&=
1995 ~LPFC_STOP_IOCB_EVENT
;
1996 phba
->hba_state
= LPFC_HBA_READY
;
2000 /* Next do PLOGIs - if any */
2001 num_sent
= lpfc_els_disc_plogi(phba
);
2006 if (phba
->fc_flag
& FC_RSCN_MODE
) {
2007 /* Check to see if more RSCNs came in while we
2008 * were processing this one.
2010 if ((phba
->fc_rscn_id_cnt
== 0) &&
2011 (!(phba
->fc_flag
& FC_RSCN_DISCOVERY
))) {
2012 spin_lock_irq(phba
->host
->host_lock
);
2013 phba
->fc_flag
&= ~FC_RSCN_MODE
;
2014 spin_unlock_irq(phba
->host
->host_lock
);
2017 lpfc_els_handle_rscn(phba
);
2024 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2025 * ring the match the sppecified nodelist.
2028 lpfc_free_tx(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
2030 struct lpfc_sli
*psli
;
2032 struct lpfc_iocbq
*iocb
, *next_iocb
;
2033 struct lpfc_sli_ring
*pring
;
2034 struct lpfc_dmabuf
*mp
;
2037 pring
= &psli
->ring
[LPFC_ELS_RING
];
2039 /* Error matching iocb on txq or txcmplq
2040 * First check the txq.
2042 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
2043 if (iocb
->context1
!= ndlp
) {
2047 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
2048 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
2050 list_del(&iocb
->list
);
2052 lpfc_els_free_iocb(phba
, iocb
);
2056 /* Next check the txcmplq */
2057 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
2058 if (iocb
->context1
!= ndlp
) {
2062 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
2063 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
2065 iocb
->iocb_cmpl
= NULL
;
2066 /* context2 = cmd, context2->next = rsp, context3 =
2068 if (iocb
->context2
) {
2069 /* Free the response IOCB before handling the
2072 mp
= (struct lpfc_dmabuf
*) (iocb
->context2
);
2073 mp
= list_get_first(&mp
->list
,
2077 /* Delay before releasing rsp buffer to
2078 * give UNREG mbox a chance to take
2082 &phba
->freebufList
);
2084 lpfc_mbuf_free(phba
,
2085 ((struct lpfc_dmabuf
*)
2086 iocb
->context2
)->virt
,
2087 ((struct lpfc_dmabuf
*)
2088 iocb
->context2
)->phys
);
2089 kfree(iocb
->context2
);
2092 if (iocb
->context3
) {
2093 lpfc_mbuf_free(phba
,
2094 ((struct lpfc_dmabuf
*)
2095 iocb
->context3
)->virt
,
2096 ((struct lpfc_dmabuf
*)
2097 iocb
->context3
)->phys
);
2098 kfree(iocb
->context3
);
2107 lpfc_disc_flush_list(struct lpfc_hba
* phba
)
2109 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2111 if (phba
->fc_plogi_cnt
) {
2112 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_plogi_list
,
2114 lpfc_free_tx(phba
, ndlp
);
2115 lpfc_nlp_remove(phba
, ndlp
);
2118 if (phba
->fc_adisc_cnt
) {
2119 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
2121 lpfc_free_tx(phba
, ndlp
);
2122 lpfc_nlp_remove(phba
, ndlp
);
2128 /*****************************************************************************/
2130 * NAME: lpfc_disc_timeout
2132 * FUNCTION: Fibre Channel driver discovery timeout routine.
2134 * EXECUTION ENVIRONMENT: interrupt only
2142 /*****************************************************************************/
2144 lpfc_disc_timeout(unsigned long ptr
)
2146 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
2147 unsigned long flags
= 0;
2149 if (unlikely(!phba
))
2152 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2153 if (!(phba
->work_hba_events
& WORKER_DISC_TMO
)) {
2154 phba
->work_hba_events
|= WORKER_DISC_TMO
;
2155 if (phba
->work_wait
)
2156 wake_up(phba
->work_wait
);
2158 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2163 lpfc_disc_timeout_handler(struct lpfc_hba
*phba
)
2165 struct lpfc_sli
*psli
;
2166 struct lpfc_nodelist
*ndlp
;
2167 LPFC_MBOXQ_t
*clearlambox
, *initlinkmbox
;
2168 int rc
, clrlaerr
= 0;
2170 if (unlikely(!phba
))
2173 if (!(phba
->fc_flag
& FC_DISC_TMO
))
2178 spin_lock_irq(phba
->host
->host_lock
);
2179 phba
->fc_flag
&= ~FC_DISC_TMO
;
2180 spin_unlock_irq(phba
->host
->host_lock
);
2182 switch (phba
->hba_state
) {
2184 case LPFC_LOCAL_CFG_LINK
:
2185 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2187 lpfc_printf_log(phba
,
2190 "%d:0221 FAN timeout\n",
2193 /* Forget about FAN, Start discovery by sending a FLOGI
2194 * hba_state is identically LPFC_FLOGI while waiting for FLOGI
2197 phba
->hba_state
= LPFC_FLOGI
;
2198 lpfc_set_disctmo(phba
);
2199 lpfc_initial_flogi(phba
);
2203 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2204 /* Initial FLOGI timeout */
2205 lpfc_printf_log(phba
,
2208 "%d:0222 Initial FLOGI timeout\n",
2211 /* Assume no Fabric and go on with discovery.
2212 * Check for outstanding ELS FLOGI to abort.
2215 /* FLOGI failed, so just use loop map to make discovery list */
2216 lpfc_disc_list_loopmap(phba
);
2218 /* Start discovery */
2219 lpfc_disc_start(phba
);
2222 case LPFC_FABRIC_CFG_LINK
:
2223 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2225 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2226 "%d:0223 Timeout while waiting for NameServer "
2227 "login\n", phba
->brd_no
);
2229 /* Next look for NameServer ndlp */
2230 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, NameServer_DID
);
2232 lpfc_nlp_remove(phba
, ndlp
);
2233 /* Start discovery */
2234 lpfc_disc_start(phba
);
2238 /* Check for wait for NameServer Rsp timeout */
2239 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2240 "%d:0224 NameServer Query timeout "
2243 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2245 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_UNMAPPED
,
2248 if (phba
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
2249 /* Try it one more time */
2250 rc
= lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
);
2254 phba
->fc_ns_retry
= 0;
2257 /* Nothing to authenticate, so CLEAR_LA right now */
2258 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2261 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2262 "%d:0226 Device Discovery "
2263 "completion error\n",
2265 phba
->hba_state
= LPFC_HBA_ERROR
;
2269 phba
->hba_state
= LPFC_CLEAR_LA
;
2270 lpfc_clear_la(phba
, clearlambox
);
2271 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2272 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2273 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2274 if (rc
== MBX_NOT_FINISHED
) {
2275 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2280 /* Setup and issue mailbox INITIALIZE LINK command */
2281 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2282 if (!initlinkmbox
) {
2283 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2284 "%d:0226 Device Discovery "
2285 "completion error\n",
2287 phba
->hba_state
= LPFC_HBA_ERROR
;
2291 lpfc_linkdown(phba
);
2292 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
2293 phba
->cfg_link_speed
);
2294 initlinkmbox
->mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
2295 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
,
2296 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2297 if (rc
== MBX_NOT_FINISHED
)
2298 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
2302 case LPFC_DISC_AUTH
:
2303 /* Node Authentication timeout */
2304 lpfc_printf_log(phba
,
2307 "%d:0227 Node Authentication timeout\n",
2309 lpfc_disc_flush_list(phba
);
2310 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2313 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2314 "%d:0226 Device Discovery "
2315 "completion error\n",
2317 phba
->hba_state
= LPFC_HBA_ERROR
;
2320 phba
->hba_state
= LPFC_CLEAR_LA
;
2321 lpfc_clear_la(phba
, clearlambox
);
2322 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2323 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2324 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2325 if (rc
== MBX_NOT_FINISHED
) {
2326 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2332 /* CLEAR LA timeout */
2333 lpfc_printf_log(phba
,
2336 "%d:0228 CLEAR LA timeout\n",
2341 case LPFC_HBA_READY
:
2342 if (phba
->fc_flag
& FC_RSCN_MODE
) {
2343 lpfc_printf_log(phba
,
2346 "%d:0231 RSCN timeout Data: x%x x%x\n",
2348 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2350 /* Cleanup any outstanding ELS commands */
2351 lpfc_els_flush_cmd(phba
);
2353 lpfc_els_flush_rscn(phba
);
2354 lpfc_disc_flush_list(phba
);
2360 lpfc_disc_flush_list(phba
);
2361 psli
->ring
[(psli
->ip_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2362 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2363 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2364 phba
->hba_state
= LPFC_HBA_READY
;
2371 lpfc_nodev_timeout(unsigned long ptr
)
2373 struct lpfc_hba
*phba
;
2374 struct lpfc_nodelist
*ndlp
;
2375 unsigned long iflag
;
2376 struct lpfc_work_evt
*evtp
;
2378 ndlp
= (struct lpfc_nodelist
*)ptr
;
2379 phba
= ndlp
->nlp_phba
;
2380 evtp
= &ndlp
->nodev_timeout_evt
;
2381 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
2383 if (!list_empty(&evtp
->evt_listp
)) {
2384 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
2387 evtp
->evt_arg1
= ndlp
;
2388 evtp
->evt
= LPFC_EVT_NODEV_TMO
;
2389 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
2390 if (phba
->work_wait
)
2391 wake_up(phba
->work_wait
);
2393 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
2399 * This routine handles processing a NameServer REG_LOGIN mailbox
2400 * command upon completion. It is setup in the LPFC_MBOXQ
2401 * as the completion routine when the command is
2402 * handed off to the SLI layer.
2405 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
2407 struct lpfc_sli
*psli
;
2409 struct lpfc_dmabuf
*mp
;
2410 struct lpfc_nodelist
*ndlp
;
2415 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2416 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2418 pmb
->context1
= NULL
;
2420 if (ndlp
->nlp_rpi
!= 0)
2421 lpfc_findnode_remove_rpi(phba
, ndlp
->nlp_rpi
);
2422 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2423 lpfc_addnode_rpi(phba
, ndlp
, ndlp
->nlp_rpi
);
2424 ndlp
->nlp_type
|= NLP_FABRIC
;
2425 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
2426 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
2428 /* Start issuing Fabric-Device Management Interface (FDMI)
2429 * command to 0xfffffa (FDMI well known port)
2431 if (phba
->cfg_fdmi_on
== 1) {
2432 lpfc_fdmi_cmd(phba
, ndlp
, SLI_MGMT_DHBA
);
2435 * Delay issuing FDMI command if fdmi-on=2
2436 * (supporting RPA/hostnmae)
2438 mod_timer(&phba
->fc_fdmitmo
, jiffies
+ HZ
* 60);
2441 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2443 mempool_free( pmb
, phba
->mbox_mem_pool
);
2449 * This routine looks up the ndlp hash
2450 * table for the given RPI. If rpi found
2451 * it return the node list pointer
2454 struct lpfc_nodelist
*
2455 lpfc_findnode_rpi(struct lpfc_hba
* phba
, uint16_t rpi
)
2457 struct lpfc_nodelist
*ret
;
2459 ret
= phba
->fc_nlplookup
[LPFC_RPI_HASH_FUNC(rpi
)];
2460 while ((ret
!= 0) && (ret
->nlp_rpi
!= rpi
)) {
2461 ret
= ret
->nlp_rpi_hash_next
;
2467 * This routine looks up the ndlp hash table for the
2468 * given RPI. If rpi found it return the node list
2469 * pointer else return 0 after deleting the entry
2472 struct lpfc_nodelist
*
2473 lpfc_findnode_remove_rpi(struct lpfc_hba
* phba
, uint16_t rpi
)
2475 struct lpfc_nodelist
*ret
, *temp
;;
2477 ret
= phba
->fc_nlplookup
[LPFC_RPI_HASH_FUNC(rpi
)];
2481 if (ret
->nlp_rpi
== rpi
) {
2482 phba
->fc_nlplookup
[LPFC_RPI_HASH_FUNC(rpi
)] =
2483 ret
->nlp_rpi_hash_next
;
2484 ret
->nlp_rpi_hash_next
= NULL
;
2488 while ((ret
->nlp_rpi_hash_next
!= 0) &&
2489 (ret
->nlp_rpi_hash_next
->nlp_rpi
!= rpi
)) {
2490 ret
= ret
->nlp_rpi_hash_next
;
2493 if (ret
->nlp_rpi_hash_next
!= 0) {
2494 temp
= ret
->nlp_rpi_hash_next
;
2495 ret
->nlp_rpi_hash_next
= temp
->nlp_rpi_hash_next
;
2496 temp
->nlp_rpi_hash_next
= NULL
;
2504 * This routine adds the node list entry to the
2508 lpfc_addnode_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
,
2514 index
= LPFC_RPI_HASH_FUNC(rpi
);
2515 ndlp
->nlp_rpi_hash_next
= phba
->fc_nlplookup
[index
];
2516 phba
->fc_nlplookup
[index
] = ndlp
;
2521 lpfc_nlp_init(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
,
2524 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
2525 INIT_LIST_HEAD(&ndlp
->nodev_timeout_evt
.evt_listp
);
2526 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
2527 init_timer(&ndlp
->nlp_tmofunc
);
2528 ndlp
->nlp_tmofunc
.function
= lpfc_nodev_timeout
;
2529 ndlp
->nlp_tmofunc
.data
= (unsigned long)ndlp
;
2530 init_timer(&ndlp
->nlp_delayfunc
);
2531 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
2532 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
2533 ndlp
->nlp_DID
= did
;
2534 ndlp
->nlp_phba
= phba
;
2535 ndlp
->nlp_sid
= NLP_NO_SID
;