1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_version.h"
47 static int lpfc_parse_vpd(struct lpfc_hba
*, uint8_t *, int);
48 static void lpfc_get_hba_model_desc(struct lpfc_hba
*, uint8_t *, uint8_t *);
49 static int lpfc_post_rcv_buf(struct lpfc_hba
*);
51 static struct scsi_transport_template
*lpfc_transport_template
= NULL
;
52 static struct scsi_transport_template
*lpfc_vport_transport_template
= NULL
;
53 static DEFINE_IDR(lpfc_hba_index
);
55 /************************************************************************/
57 /* lpfc_config_port_prep */
58 /* This routine will do LPFC initialization prior to the */
59 /* CONFIG_PORT mailbox command. This will be initialized */
60 /* as a SLI layer callback routine. */
61 /* This routine returns 0 on success or -ERESTART if it wants */
62 /* the SLI layer to reset the HBA and try again. Any */
63 /* other return value indicates an error. */
65 /************************************************************************/
67 lpfc_config_port_prep(struct lpfc_hba
*phba
)
69 lpfc_vpd_t
*vp
= &phba
->vpd
;
73 char *lpfc_vpd_data
= NULL
;
75 static char licensed
[56] =
76 "key unlock for use with gnu public licensed code only\0";
77 static int init_key
= 1;
79 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
81 phba
->link_state
= LPFC_HBA_ERROR
;
86 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
88 if (lpfc_is_LC_HBA(phba
->pcidev
->device
)) {
90 uint32_t *ptext
= (uint32_t *) licensed
;
92 for (i
= 0; i
< 56; i
+= sizeof (uint32_t), ptext
++)
93 *ptext
= cpu_to_be32(*ptext
);
97 lpfc_read_nv(phba
, pmb
);
98 memset((char*)mb
->un
.varRDnvp
.rsvd3
, 0,
99 sizeof (mb
->un
.varRDnvp
.rsvd3
));
100 memcpy((char*)mb
->un
.varRDnvp
.rsvd3
, licensed
,
103 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
105 if (rc
!= MBX_SUCCESS
) {
106 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
107 "0324 Config Port initialization "
108 "error, mbxCmd x%x READ_NVPARM, "
110 mb
->mbxCommand
, mb
->mbxStatus
);
111 mempool_free(pmb
, phba
->mbox_mem_pool
);
114 memcpy(phba
->wwnn
, (char *)mb
->un
.varRDnvp
.nodename
,
116 memcpy(phba
->wwpn
, (char *)mb
->un
.varRDnvp
.portname
,
120 phba
->sli3_options
= 0x0;
122 /* Setup and issue mailbox READ REV command */
123 lpfc_read_rev(phba
, pmb
);
124 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
125 if (rc
!= MBX_SUCCESS
) {
126 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
127 "0439 Adapter failed to init, mbxCmd x%x "
128 "READ_REV, mbxStatus x%x\n",
129 mb
->mbxCommand
, mb
->mbxStatus
);
130 mempool_free( pmb
, phba
->mbox_mem_pool
);
136 * The value of rr must be 1 since the driver set the cv field to 1.
137 * This setting requires the FW to set all revision fields.
139 if (mb
->un
.varRdRev
.rr
== 0) {
141 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
142 "0440 Adapter failed to init, READ_REV has "
143 "missing revision information.\n");
144 mempool_free(pmb
, phba
->mbox_mem_pool
);
148 if (phba
->sli_rev
== 3 && !mb
->un
.varRdRev
.v3rsp
)
151 /* Save information as VPD data */
153 memcpy(&vp
->sli3Feat
, &mb
->un
.varRdRev
.sli3Feat
, sizeof(uint32_t));
154 vp
->rev
.sli1FwRev
= mb
->un
.varRdRev
.sli1FwRev
;
155 memcpy(vp
->rev
.sli1FwName
, (char*) mb
->un
.varRdRev
.sli1FwName
, 16);
156 vp
->rev
.sli2FwRev
= mb
->un
.varRdRev
.sli2FwRev
;
157 memcpy(vp
->rev
.sli2FwName
, (char *) mb
->un
.varRdRev
.sli2FwName
, 16);
158 vp
->rev
.biuRev
= mb
->un
.varRdRev
.biuRev
;
159 vp
->rev
.smRev
= mb
->un
.varRdRev
.smRev
;
160 vp
->rev
.smFwRev
= mb
->un
.varRdRev
.un
.smFwRev
;
161 vp
->rev
.endecRev
= mb
->un
.varRdRev
.endecRev
;
162 vp
->rev
.fcphHigh
= mb
->un
.varRdRev
.fcphHigh
;
163 vp
->rev
.fcphLow
= mb
->un
.varRdRev
.fcphLow
;
164 vp
->rev
.feaLevelHigh
= mb
->un
.varRdRev
.feaLevelHigh
;
165 vp
->rev
.feaLevelLow
= mb
->un
.varRdRev
.feaLevelLow
;
166 vp
->rev
.postKernRev
= mb
->un
.varRdRev
.postKernRev
;
167 vp
->rev
.opFwRev
= mb
->un
.varRdRev
.opFwRev
;
169 /* If the sli feature level is less then 9, we must
170 * tear down all RPIs and VPIs on link down if NPIV
173 if (vp
->rev
.feaLevelHigh
< 9)
174 phba
->sli3_options
|= LPFC_SLI3_VPORT_TEARDOWN
;
176 if (lpfc_is_LC_HBA(phba
->pcidev
->device
))
177 memcpy(phba
->RandomData
, (char *)&mb
->un
.varWords
[24],
178 sizeof (phba
->RandomData
));
180 /* Get adapter VPD information */
181 pmb
->context2
= kmalloc(DMP_RSP_SIZE
, GFP_KERNEL
);
184 lpfc_vpd_data
= kmalloc(DMP_VPD_SIZE
, GFP_KERNEL
);
186 goto out_free_context2
;
189 lpfc_dump_mem(phba
, pmb
, offset
);
190 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
192 if (rc
!= MBX_SUCCESS
) {
193 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
194 "0441 VPD not present on adapter, "
195 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
196 mb
->mbxCommand
, mb
->mbxStatus
);
197 mb
->un
.varDmp
.word_cnt
= 0;
199 if (mb
->un
.varDmp
.word_cnt
> DMP_VPD_SIZE
- offset
)
200 mb
->un
.varDmp
.word_cnt
= DMP_VPD_SIZE
- offset
;
201 lpfc_sli_pcimem_bcopy(pmb
->context2
, lpfc_vpd_data
+ offset
,
202 mb
->un
.varDmp
.word_cnt
);
203 offset
+= mb
->un
.varDmp
.word_cnt
;
204 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_VPD_SIZE
);
205 lpfc_parse_vpd(phba
, lpfc_vpd_data
, offset
);
207 kfree(lpfc_vpd_data
);
209 kfree(pmb
->context2
);
211 mempool_free(pmb
, phba
->mbox_mem_pool
);
215 /************************************************************************/
217 /* lpfc_config_port_post */
218 /* This routine will do LPFC initialization after the */
219 /* CONFIG_PORT mailbox command. This will be initialized */
220 /* as a SLI layer callback routine. */
221 /* This routine returns 0 on success. Any other return value */
222 /* indicates an error. */
224 /************************************************************************/
226 lpfc_config_port_post(struct lpfc_hba
*phba
)
228 struct lpfc_vport
*vport
= phba
->pport
;
231 struct lpfc_dmabuf
*mp
;
232 struct lpfc_sli
*psli
= &phba
->sli
;
233 uint32_t status
, timeout
;
237 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
239 phba
->link_state
= LPFC_HBA_ERROR
;
244 /* Get login parameters for NID. */
245 lpfc_read_sparam(phba
, pmb
, 0);
247 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
248 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
249 "0448 Adapter failed init, mbxCmd x%x "
250 "READ_SPARM mbxStatus x%x\n",
251 mb
->mbxCommand
, mb
->mbxStatus
);
252 phba
->link_state
= LPFC_HBA_ERROR
;
253 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
254 mempool_free( pmb
, phba
->mbox_mem_pool
);
255 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
260 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
262 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof (struct serv_parm
));
263 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
265 pmb
->context1
= NULL
;
267 if (phba
->cfg_soft_wwnn
)
268 u64_to_wwn(phba
->cfg_soft_wwnn
,
269 vport
->fc_sparam
.nodeName
.u
.wwn
);
270 if (phba
->cfg_soft_wwpn
)
271 u64_to_wwn(phba
->cfg_soft_wwpn
,
272 vport
->fc_sparam
.portName
.u
.wwn
);
273 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
274 sizeof (struct lpfc_name
));
275 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
276 sizeof (struct lpfc_name
));
277 /* If no serial number in VPD data, use low 6 bytes of WWNN */
278 /* This should be consolidated into parse_vpd ? - mr */
279 if (phba
->SerialNumber
[0] == 0) {
282 outptr
= &vport
->fc_nodename
.u
.s
.IEEE
[0];
283 for (i
= 0; i
< 12; i
++) {
285 j
= ((status
& 0xf0) >> 4);
287 phba
->SerialNumber
[i
] =
288 (char)((uint8_t) 0x30 + (uint8_t) j
);
290 phba
->SerialNumber
[i
] =
291 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
295 phba
->SerialNumber
[i
] =
296 (char)((uint8_t) 0x30 + (uint8_t) j
);
298 phba
->SerialNumber
[i
] =
299 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
303 lpfc_read_config(phba
, pmb
);
305 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
306 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
307 "0453 Adapter failed to init, mbxCmd x%x "
308 "READ_CONFIG, mbxStatus x%x\n",
309 mb
->mbxCommand
, mb
->mbxStatus
);
310 phba
->link_state
= LPFC_HBA_ERROR
;
311 mempool_free( pmb
, phba
->mbox_mem_pool
);
315 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
316 if (phba
->cfg_hba_queue_depth
> (mb
->un
.varRdConfig
.max_xri
+1))
317 phba
->cfg_hba_queue_depth
=
318 mb
->un
.varRdConfig
.max_xri
+ 1;
320 phba
->lmt
= mb
->un
.varRdConfig
.lmt
;
322 /* Get the default values for Model Name and Description */
323 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
325 if ((phba
->cfg_link_speed
> LINK_SPEED_10G
)
326 || ((phba
->cfg_link_speed
== LINK_SPEED_1G
)
327 && !(phba
->lmt
& LMT_1Gb
))
328 || ((phba
->cfg_link_speed
== LINK_SPEED_2G
)
329 && !(phba
->lmt
& LMT_2Gb
))
330 || ((phba
->cfg_link_speed
== LINK_SPEED_4G
)
331 && !(phba
->lmt
& LMT_4Gb
))
332 || ((phba
->cfg_link_speed
== LINK_SPEED_8G
)
333 && !(phba
->lmt
& LMT_8Gb
))
334 || ((phba
->cfg_link_speed
== LINK_SPEED_10G
)
335 && !(phba
->lmt
& LMT_10Gb
))) {
336 /* Reset link speed to auto */
337 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LINK_EVENT
,
338 "1302 Invalid speed for this board: "
339 "Reset link speed to auto: x%x\n",
340 phba
->cfg_link_speed
);
341 phba
->cfg_link_speed
= LINK_SPEED_AUTO
;
344 phba
->link_state
= LPFC_LINK_DOWN
;
346 /* Only process IOCBs on ring 0 till hba_state is READY */
347 if (psli
->ring
[psli
->extra_ring
].cmdringaddr
)
348 psli
->ring
[psli
->extra_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
349 if (psli
->ring
[psli
->fcp_ring
].cmdringaddr
)
350 psli
->ring
[psli
->fcp_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
351 if (psli
->ring
[psli
->next_ring
].cmdringaddr
)
352 psli
->ring
[psli
->next_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
354 /* Post receive buffers for desired rings */
355 if (phba
->sli_rev
!= 3)
356 lpfc_post_rcv_buf(phba
);
358 /* Enable appropriate host interrupts */
359 spin_lock_irq(&phba
->hbalock
);
360 status
= readl(phba
->HCregaddr
);
361 status
|= HC_MBINT_ENA
| HC_ERINT_ENA
| HC_LAINT_ENA
;
362 if (psli
->num_rings
> 0)
363 status
|= HC_R0INT_ENA
;
364 if (psli
->num_rings
> 1)
365 status
|= HC_R1INT_ENA
;
366 if (psli
->num_rings
> 2)
367 status
|= HC_R2INT_ENA
;
368 if (psli
->num_rings
> 3)
369 status
|= HC_R3INT_ENA
;
371 if ((phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) &&
372 (phba
->cfg_poll
& DISABLE_FCP_RING_INT
))
373 status
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
375 writel(status
, phba
->HCregaddr
);
376 readl(phba
->HCregaddr
); /* flush */
377 spin_unlock_irq(&phba
->hbalock
);
380 * Setup the ring 0 (els) timeout handler
382 timeout
= phba
->fc_ratov
<< 1;
383 mod_timer(&vport
->els_tmofunc
, jiffies
+ HZ
* timeout
);
384 mod_timer(&phba
->hb_tmofunc
, jiffies
+ HZ
* LPFC_HB_MBOX_INTERVAL
);
385 phba
->hb_outstanding
= 0;
386 phba
->last_completion_time
= jiffies
;
388 lpfc_init_link(phba
, pmb
, phba
->cfg_topology
, phba
->cfg_link_speed
);
389 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
391 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
392 lpfc_set_loopback_flag(phba
);
393 if (rc
!= MBX_SUCCESS
) {
394 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
395 "0454 Adapter failed to init, mbxCmd x%x "
396 "INIT_LINK, mbxStatus x%x\n",
397 mb
->mbxCommand
, mb
->mbxStatus
);
399 /* Clear all interrupt enable conditions */
400 writel(0, phba
->HCregaddr
);
401 readl(phba
->HCregaddr
); /* flush */
402 /* Clear all pending interrupts */
403 writel(0xffffffff, phba
->HAregaddr
);
404 readl(phba
->HAregaddr
); /* flush */
406 phba
->link_state
= LPFC_HBA_ERROR
;
408 mempool_free(pmb
, phba
->mbox_mem_pool
);
411 /* MBOX buffer will be freed in mbox compl */
416 /************************************************************************/
418 /* lpfc_hba_down_prep */
419 /* This routine will do LPFC uninitialization before the */
420 /* HBA is reset when bringing down the SLI Layer. This will be */
421 /* initialized as a SLI layer callback routine. */
422 /* This routine returns 0 on success. Any other return value */
423 /* indicates an error. */
425 /************************************************************************/
427 lpfc_hba_down_prep(struct lpfc_hba
*phba
)
429 /* Disable interrupts */
430 writel(0, phba
->HCregaddr
);
431 readl(phba
->HCregaddr
); /* flush */
433 lpfc_cleanup_discovery_resources(phba
->pport
);
437 /************************************************************************/
439 /* lpfc_hba_down_post */
440 /* This routine will do uninitialization after the HBA is reset */
441 /* when bringing down the SLI Layer. */
442 /* This routine returns 0 on success. Any other return value */
443 /* indicates an error. */
445 /************************************************************************/
447 lpfc_hba_down_post(struct lpfc_hba
*phba
)
449 struct lpfc_sli
*psli
= &phba
->sli
;
450 struct lpfc_sli_ring
*pring
;
451 struct lpfc_dmabuf
*mp
, *next_mp
;
454 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
455 lpfc_sli_hbqbuf_free_all(phba
);
457 /* Cleanup preposted buffers on the ELS ring */
458 pring
= &psli
->ring
[LPFC_ELS_RING
];
459 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
461 pring
->postbufq_cnt
--;
462 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
467 for (i
= 0; i
< psli
->num_rings
; i
++) {
468 pring
= &psli
->ring
[i
];
469 lpfc_sli_abort_iocb_ring(phba
, pring
);
475 /* HBA heart beat timeout handler */
477 lpfc_hb_timeout(unsigned long ptr
)
479 struct lpfc_hba
*phba
;
482 phba
= (struct lpfc_hba
*)ptr
;
483 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
484 if (!(phba
->pport
->work_port_events
& WORKER_HB_TMO
))
485 phba
->pport
->work_port_events
|= WORKER_HB_TMO
;
486 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
489 wake_up(phba
->work_wait
);
494 lpfc_hb_mbox_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
496 unsigned long drvr_flag
;
498 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
499 phba
->hb_outstanding
= 0;
500 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
502 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
503 if (!(phba
->pport
->fc_flag
& FC_OFFLINE_MODE
) &&
504 !(phba
->link_state
== LPFC_HBA_ERROR
) &&
505 !(phba
->pport
->load_flag
& FC_UNLOADING
))
506 mod_timer(&phba
->hb_tmofunc
,
507 jiffies
+ HZ
* LPFC_HB_MBOX_INTERVAL
);
512 lpfc_hb_timeout_handler(struct lpfc_hba
*phba
)
514 LPFC_MBOXQ_t
*pmboxq
;
516 struct lpfc_sli
*psli
= &phba
->sli
;
518 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
519 (phba
->pport
->load_flag
& FC_UNLOADING
) ||
520 (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
))
523 spin_lock_irq(&phba
->pport
->work_port_lock
);
524 /* If the timer is already canceled do nothing */
525 if (!(phba
->pport
->work_port_events
& WORKER_HB_TMO
)) {
526 spin_unlock_irq(&phba
->pport
->work_port_lock
);
530 if (time_after(phba
->last_completion_time
+ LPFC_HB_MBOX_INTERVAL
* HZ
,
532 spin_unlock_irq(&phba
->pport
->work_port_lock
);
533 if (!phba
->hb_outstanding
)
534 mod_timer(&phba
->hb_tmofunc
,
535 jiffies
+ HZ
* LPFC_HB_MBOX_INTERVAL
);
537 mod_timer(&phba
->hb_tmofunc
,
538 jiffies
+ HZ
* LPFC_HB_MBOX_TIMEOUT
);
541 spin_unlock_irq(&phba
->pport
->work_port_lock
);
543 /* If there is no heart beat outstanding, issue a heartbeat command */
544 if (!phba
->hb_outstanding
) {
545 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
,GFP_KERNEL
);
547 mod_timer(&phba
->hb_tmofunc
,
548 jiffies
+ HZ
* LPFC_HB_MBOX_INTERVAL
);
552 lpfc_heart_beat(phba
, pmboxq
);
553 pmboxq
->mbox_cmpl
= lpfc_hb_mbox_cmpl
;
554 pmboxq
->vport
= phba
->pport
;
555 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
557 if (retval
!= MBX_BUSY
&& retval
!= MBX_SUCCESS
) {
558 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
559 mod_timer(&phba
->hb_tmofunc
,
560 jiffies
+ HZ
* LPFC_HB_MBOX_INTERVAL
);
563 mod_timer(&phba
->hb_tmofunc
,
564 jiffies
+ HZ
* LPFC_HB_MBOX_TIMEOUT
);
565 phba
->hb_outstanding
= 1;
569 * If heart beat timeout called with hb_outstanding set we
570 * need to take the HBA offline.
572 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
573 "0459 Adapter heartbeat failure, taking "
574 "this port offline.\n");
576 spin_lock_irq(&phba
->hbalock
);
577 psli
->sli_flag
&= ~LPFC_SLI2_ACTIVE
;
578 spin_unlock_irq(&phba
->hbalock
);
580 lpfc_offline_prep(phba
);
582 lpfc_unblock_mgmt_io(phba
);
583 phba
->link_state
= LPFC_HBA_ERROR
;
584 lpfc_hba_down_post(phba
);
588 /************************************************************************/
590 /* lpfc_handle_eratt */
591 /* This routine will handle processing a Host Attention */
592 /* Error Status event. This will be initialized */
593 /* as a SLI layer callback routine. */
595 /************************************************************************/
597 lpfc_handle_eratt(struct lpfc_hba
*phba
)
599 struct lpfc_vport
*vport
= phba
->pport
;
600 struct lpfc_sli
*psli
= &phba
->sli
;
601 struct lpfc_sli_ring
*pring
;
602 struct lpfc_vport
**vports
;
604 struct Scsi_Host
*shost
;
607 /* If the pci channel is offline, ignore possible errors,
608 * since we cannot communicate with the pci card anyway. */
609 if (pci_channel_offline(phba
->pcidev
))
612 if (phba
->work_hs
& HS_FFER6
||
613 phba
->work_hs
& HS_FFER5
) {
614 /* Re-establishing Link */
615 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
616 "1301 Re-establishing Link "
617 "Data: x%x x%x x%x\n",
619 phba
->work_status
[0], phba
->work_status
[1]);
620 vports
= lpfc_create_vport_work_array(phba
);
623 i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
;
625 shost
= lpfc_shost_from_vport(vports
[i
]);
626 spin_lock_irq(shost
->host_lock
);
627 vports
[i
]->fc_flag
|= FC_ESTABLISH_LINK
;
628 spin_unlock_irq(shost
->host_lock
);
630 lpfc_destroy_vport_work_array(vports
);
631 spin_lock_irq(&phba
->hbalock
);
632 psli
->sli_flag
&= ~LPFC_SLI2_ACTIVE
;
633 spin_unlock_irq(&phba
->hbalock
);
636 * Firmware stops when it triggled erratt with HS_FFER6.
637 * That could cause the I/Os dropped by the firmware.
638 * Error iocb (I/O) on txcmplq and let the SCSI layer
639 * retry it after re-establishing link.
641 pring
= &psli
->ring
[psli
->fcp_ring
];
642 lpfc_sli_abort_iocb_ring(phba
, pring
);
646 * There was a firmware error. Take the hba offline and then
647 * attempt to restart it.
649 lpfc_offline_prep(phba
);
651 lpfc_sli_brdrestart(phba
);
652 if (lpfc_online(phba
) == 0) { /* Initialize the HBA */
653 mod_timer(&phba
->fc_estabtmo
, jiffies
+ HZ
* 60);
654 lpfc_unblock_mgmt_io(phba
);
657 lpfc_unblock_mgmt_io(phba
);
659 /* The if clause above forces this code path when the status
660 * failure is a value other than FFER6. Do not call the offline
661 * twice. This is the adapter hardware error path.
663 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
664 "0457 Adapter Hardware Error "
665 "Data: x%x x%x x%x\n",
667 phba
->work_status
[0], phba
->work_status
[1]);
669 event_data
= FC_REG_DUMP_EVENT
;
670 shost
= lpfc_shost_from_vport(vport
);
671 fc_host_post_vendor_event(shost
, fc_get_event_number(),
672 sizeof(event_data
), (char *) &event_data
,
673 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
675 spin_lock_irq(&phba
->hbalock
);
676 psli
->sli_flag
&= ~LPFC_SLI2_ACTIVE
;
677 spin_unlock_irq(&phba
->hbalock
);
678 lpfc_offline_prep(phba
);
680 lpfc_unblock_mgmt_io(phba
);
681 phba
->link_state
= LPFC_HBA_ERROR
;
682 lpfc_hba_down_post(phba
);
686 /************************************************************************/
688 /* lpfc_handle_latt */
689 /* This routine will handle processing a Host Attention */
690 /* Link Status event. This will be initialized */
691 /* as a SLI layer callback routine. */
693 /************************************************************************/
695 lpfc_handle_latt(struct lpfc_hba
*phba
)
697 struct lpfc_vport
*vport
= phba
->pport
;
698 struct lpfc_sli
*psli
= &phba
->sli
;
700 volatile uint32_t control
;
701 struct lpfc_dmabuf
*mp
;
704 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
706 goto lpfc_handle_latt_err_exit
;
708 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
710 goto lpfc_handle_latt_free_pmb
;
712 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
714 goto lpfc_handle_latt_free_mp
;
718 /* Cleanup any outstanding ELS commands */
719 lpfc_els_flush_all_cmd(phba
);
721 psli
->slistat
.link_event
++;
722 lpfc_read_la(phba
, pmb
, mp
);
723 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_la
;
725 rc
= lpfc_sli_issue_mbox (phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
726 if (rc
== MBX_NOT_FINISHED
)
727 goto lpfc_handle_latt_free_mbuf
;
729 /* Clear Link Attention in HA REG */
730 spin_lock_irq(&phba
->hbalock
);
731 writel(HA_LATT
, phba
->HAregaddr
);
732 readl(phba
->HAregaddr
); /* flush */
733 spin_unlock_irq(&phba
->hbalock
);
737 lpfc_handle_latt_free_mbuf
:
738 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
739 lpfc_handle_latt_free_mp
:
741 lpfc_handle_latt_free_pmb
:
742 mempool_free(pmb
, phba
->mbox_mem_pool
);
743 lpfc_handle_latt_err_exit
:
744 /* Enable Link attention interrupts */
745 spin_lock_irq(&phba
->hbalock
);
746 psli
->sli_flag
|= LPFC_PROCESS_LA
;
747 control
= readl(phba
->HCregaddr
);
748 control
|= HC_LAINT_ENA
;
749 writel(control
, phba
->HCregaddr
);
750 readl(phba
->HCregaddr
); /* flush */
752 /* Clear Link Attention in HA REG */
753 writel(HA_LATT
, phba
->HAregaddr
);
754 readl(phba
->HAregaddr
); /* flush */
755 spin_unlock_irq(&phba
->hbalock
);
757 phba
->link_state
= LPFC_HBA_ERROR
;
759 /* The other case is an error from issue_mbox */
761 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
762 "0300 READ_LA: no buffers\n");
767 /************************************************************************/
770 /* This routine will parse the VPD data */
772 /************************************************************************/
774 lpfc_parse_vpd(struct lpfc_hba
*phba
, uint8_t *vpd
, int len
)
776 uint8_t lenlo
, lenhi
;
786 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
787 "0455 Vital Product Data: x%x x%x x%x x%x\n",
788 (uint32_t) vpd
[0], (uint32_t) vpd
[1], (uint32_t) vpd
[2],
790 while (!finished
&& (index
< (len
- 4))) {
791 switch (vpd
[index
]) {
799 i
= ((((unsigned short)lenhi
) << 8) + lenlo
);
808 Length
= ((((unsigned short)lenhi
) << 8) + lenlo
);
809 if (Length
> len
- index
)
810 Length
= len
- index
;
812 /* Look for Serial Number */
813 if ((vpd
[index
] == 'S') && (vpd
[index
+1] == 'N')) {
820 phba
->SerialNumber
[j
++] = vpd
[index
++];
824 phba
->SerialNumber
[j
] = 0;
827 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '1')) {
828 phba
->vpd_flag
|= VPD_MODEL_DESC
;
835 phba
->ModelDesc
[j
++] = vpd
[index
++];
839 phba
->ModelDesc
[j
] = 0;
842 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '2')) {
843 phba
->vpd_flag
|= VPD_MODEL_NAME
;
850 phba
->ModelName
[j
++] = vpd
[index
++];
854 phba
->ModelName
[j
] = 0;
857 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '3')) {
858 phba
->vpd_flag
|= VPD_PROGRAM_TYPE
;
865 phba
->ProgramType
[j
++] = vpd
[index
++];
869 phba
->ProgramType
[j
] = 0;
872 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '4')) {
873 phba
->vpd_flag
|= VPD_PORT
;
880 phba
->Port
[j
++] = vpd
[index
++];
910 lpfc_get_hba_model_desc(struct lpfc_hba
*phba
, uint8_t *mdp
, uint8_t *descp
)
913 uint16_t dev_id
= phba
->pcidev
->device
;
919 } m
= {"<Unknown>", 0, ""};
921 if (mdp
&& mdp
[0] != '\0'
922 && descp
&& descp
[0] != '\0')
925 if (phba
->lmt
& LMT_10Gb
)
927 else if (phba
->lmt
& LMT_8Gb
)
929 else if (phba
->lmt
& LMT_4Gb
)
931 else if (phba
->lmt
& LMT_2Gb
)
939 case PCI_DEVICE_ID_FIREFLY
:
940 m
= (typeof(m
)){"LP6000", max_speed
, "PCI"};
942 case PCI_DEVICE_ID_SUPERFLY
:
943 if (vp
->rev
.biuRev
>= 1 && vp
->rev
.biuRev
<= 3)
944 m
= (typeof(m
)){"LP7000", max_speed
, "PCI"};
946 m
= (typeof(m
)){"LP7000E", max_speed
, "PCI"};
948 case PCI_DEVICE_ID_DRAGONFLY
:
949 m
= (typeof(m
)){"LP8000", max_speed
, "PCI"};
951 case PCI_DEVICE_ID_CENTAUR
:
952 if (FC_JEDEC_ID(vp
->rev
.biuRev
) == CENTAUR_2G_JEDEC_ID
)
953 m
= (typeof(m
)){"LP9002", max_speed
, "PCI"};
955 m
= (typeof(m
)){"LP9000", max_speed
, "PCI"};
957 case PCI_DEVICE_ID_RFLY
:
958 m
= (typeof(m
)){"LP952", max_speed
, "PCI"};
960 case PCI_DEVICE_ID_PEGASUS
:
961 m
= (typeof(m
)){"LP9802", max_speed
, "PCI-X"};
963 case PCI_DEVICE_ID_THOR
:
964 m
= (typeof(m
)){"LP10000", max_speed
, "PCI-X"};
966 case PCI_DEVICE_ID_VIPER
:
967 m
= (typeof(m
)){"LPX1000", max_speed
, "PCI-X"};
969 case PCI_DEVICE_ID_PFLY
:
970 m
= (typeof(m
)){"LP982", max_speed
, "PCI-X"};
972 case PCI_DEVICE_ID_TFLY
:
973 m
= (typeof(m
)){"LP1050", max_speed
, "PCI-X"};
975 case PCI_DEVICE_ID_HELIOS
:
976 m
= (typeof(m
)){"LP11000", max_speed
, "PCI-X2"};
978 case PCI_DEVICE_ID_HELIOS_SCSP
:
979 m
= (typeof(m
)){"LP11000-SP", max_speed
, "PCI-X2"};
981 case PCI_DEVICE_ID_HELIOS_DCSP
:
982 m
= (typeof(m
)){"LP11002-SP", max_speed
, "PCI-X2"};
984 case PCI_DEVICE_ID_NEPTUNE
:
985 m
= (typeof(m
)){"LPe1000", max_speed
, "PCIe"};
987 case PCI_DEVICE_ID_NEPTUNE_SCSP
:
988 m
= (typeof(m
)){"LPe1000-SP", max_speed
, "PCIe"};
990 case PCI_DEVICE_ID_NEPTUNE_DCSP
:
991 m
= (typeof(m
)){"LPe1002-SP", max_speed
, "PCIe"};
993 case PCI_DEVICE_ID_BMID
:
994 m
= (typeof(m
)){"LP1150", max_speed
, "PCI-X2"};
996 case PCI_DEVICE_ID_BSMB
:
997 m
= (typeof(m
)){"LP111", max_speed
, "PCI-X2"};
999 case PCI_DEVICE_ID_ZEPHYR
:
1000 m
= (typeof(m
)){"LPe11000", max_speed
, "PCIe"};
1002 case PCI_DEVICE_ID_ZEPHYR_SCSP
:
1003 m
= (typeof(m
)){"LPe11000", max_speed
, "PCIe"};
1005 case PCI_DEVICE_ID_ZEPHYR_DCSP
:
1006 m
= (typeof(m
)){"LPe11002-SP", max_speed
, "PCIe"};
1008 case PCI_DEVICE_ID_ZMID
:
1009 m
= (typeof(m
)){"LPe1150", max_speed
, "PCIe"};
1011 case PCI_DEVICE_ID_ZSMB
:
1012 m
= (typeof(m
)){"LPe111", max_speed
, "PCIe"};
1014 case PCI_DEVICE_ID_LP101
:
1015 m
= (typeof(m
)){"LP101", max_speed
, "PCI-X"};
1017 case PCI_DEVICE_ID_LP10000S
:
1018 m
= (typeof(m
)){"LP10000-S", max_speed
, "PCI"};
1020 case PCI_DEVICE_ID_LP11000S
:
1021 m
= (typeof(m
)){"LP11000-S", max_speed
,
1024 case PCI_DEVICE_ID_LPE11000S
:
1025 m
= (typeof(m
)){"LPe11000-S", max_speed
,
1028 case PCI_DEVICE_ID_SAT
:
1029 m
= (typeof(m
)){"LPe12000", max_speed
, "PCIe"};
1031 case PCI_DEVICE_ID_SAT_MID
:
1032 m
= (typeof(m
)){"LPe1250", max_speed
, "PCIe"};
1034 case PCI_DEVICE_ID_SAT_SMB
:
1035 m
= (typeof(m
)){"LPe121", max_speed
, "PCIe"};
1037 case PCI_DEVICE_ID_SAT_DCSP
:
1038 m
= (typeof(m
)){"LPe12002-SP", max_speed
, "PCIe"};
1040 case PCI_DEVICE_ID_SAT_SCSP
:
1041 m
= (typeof(m
)){"LPe12000-SP", max_speed
, "PCIe"};
1043 case PCI_DEVICE_ID_SAT_S
:
1044 m
= (typeof(m
)){"LPe12000-S", max_speed
, "PCIe"};
1047 m
= (typeof(m
)){ NULL
};
1051 if (mdp
&& mdp
[0] == '\0')
1052 snprintf(mdp
, 79,"%s", m
.name
);
1053 if (descp
&& descp
[0] == '\0')
1054 snprintf(descp
, 255,
1055 "Emulex %s %dGb %s Fibre Channel Adapter",
1056 m
.name
, m
.max_speed
, m
.bus
);
1059 /**************************************************/
1060 /* lpfc_post_buffer */
1062 /* This routine will post count buffers to the */
1063 /* ring with the QUE_RING_BUF_CN command. This */
1064 /* allows 3 buffers / command to be posted. */
1065 /* Returns the number of buffers NOT posted. */
1066 /**************************************************/
1068 lpfc_post_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
, int cnt
,
1072 struct lpfc_iocbq
*iocb
;
1073 struct lpfc_dmabuf
*mp1
, *mp2
;
1075 cnt
+= pring
->missbufcnt
;
1077 /* While there are buffers to post */
1079 /* Allocate buffer for command iocb */
1080 iocb
= lpfc_sli_get_iocbq(phba
);
1082 pring
->missbufcnt
= cnt
;
1087 /* 2 buffers can be posted per command */
1088 /* Allocate buffer to post */
1089 mp1
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
1091 mp1
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
1093 if (mp1
== 0 || mp1
->virt
== 0) {
1095 lpfc_sli_release_iocbq(phba
, iocb
);
1096 pring
->missbufcnt
= cnt
;
1100 INIT_LIST_HEAD(&mp1
->list
);
1101 /* Allocate buffer to post */
1103 mp2
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
1105 mp2
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
1107 if (mp2
== 0 || mp2
->virt
== 0) {
1109 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
1111 lpfc_sli_release_iocbq(phba
, iocb
);
1112 pring
->missbufcnt
= cnt
;
1116 INIT_LIST_HEAD(&mp2
->list
);
1121 icmd
->un
.cont64
[0].addrHigh
= putPaddrHigh(mp1
->phys
);
1122 icmd
->un
.cont64
[0].addrLow
= putPaddrLow(mp1
->phys
);
1123 icmd
->un
.cont64
[0].tus
.f
.bdeSize
= FCELSSIZE
;
1124 icmd
->ulpBdeCount
= 1;
1127 icmd
->un
.cont64
[1].addrHigh
= putPaddrHigh(mp2
->phys
);
1128 icmd
->un
.cont64
[1].addrLow
= putPaddrLow(mp2
->phys
);
1129 icmd
->un
.cont64
[1].tus
.f
.bdeSize
= FCELSSIZE
;
1131 icmd
->ulpBdeCount
= 2;
1134 icmd
->ulpCommand
= CMD_QUE_RING_BUF64_CN
;
1137 if (lpfc_sli_issue_iocb(phba
, pring
, iocb
, 0) == IOCB_ERROR
) {
1138 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
1142 lpfc_mbuf_free(phba
, mp2
->virt
, mp2
->phys
);
1146 lpfc_sli_release_iocbq(phba
, iocb
);
1147 pring
->missbufcnt
= cnt
;
1150 lpfc_sli_ringpostbuf_put(phba
, pring
, mp1
);
1152 lpfc_sli_ringpostbuf_put(phba
, pring
, mp2
);
1154 pring
->missbufcnt
= 0;
1158 /************************************************************************/
1160 /* lpfc_post_rcv_buf */
1161 /* This routine post initial rcv buffers to the configured rings */
1163 /************************************************************************/
1165 lpfc_post_rcv_buf(struct lpfc_hba
*phba
)
1167 struct lpfc_sli
*psli
= &phba
->sli
;
1169 /* Ring 0, ELS / CT buffers */
1170 lpfc_post_buffer(phba
, &psli
->ring
[LPFC_ELS_RING
], LPFC_BUF_RING0
, 1);
1171 /* Ring 2 - FCP no buffers needed */
1176 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1178 /************************************************************************/
1182 /************************************************************************/
1184 lpfc_sha_init(uint32_t * HashResultPointer
)
1186 HashResultPointer
[0] = 0x67452301;
1187 HashResultPointer
[1] = 0xEFCDAB89;
1188 HashResultPointer
[2] = 0x98BADCFE;
1189 HashResultPointer
[3] = 0x10325476;
1190 HashResultPointer
[4] = 0xC3D2E1F0;
1193 /************************************************************************/
1195 /* lpfc_sha_iterate */
1197 /************************************************************************/
1199 lpfc_sha_iterate(uint32_t * HashResultPointer
, uint32_t * HashWorkingPointer
)
1203 uint32_t A
, B
, C
, D
, E
;
1206 HashWorkingPointer
[t
] =
1208 HashWorkingPointer
[t
- 3] ^ HashWorkingPointer
[t
-
1210 HashWorkingPointer
[t
- 14] ^ HashWorkingPointer
[t
- 16]);
1211 } while (++t
<= 79);
1213 A
= HashResultPointer
[0];
1214 B
= HashResultPointer
[1];
1215 C
= HashResultPointer
[2];
1216 D
= HashResultPointer
[3];
1217 E
= HashResultPointer
[4];
1221 TEMP
= ((B
& C
) | ((~B
) & D
)) + 0x5A827999;
1222 } else if (t
< 40) {
1223 TEMP
= (B
^ C
^ D
) + 0x6ED9EBA1;
1224 } else if (t
< 60) {
1225 TEMP
= ((B
& C
) | (B
& D
) | (C
& D
)) + 0x8F1BBCDC;
1227 TEMP
= (B
^ C
^ D
) + 0xCA62C1D6;
1229 TEMP
+= S(5, A
) + E
+ HashWorkingPointer
[t
];
1235 } while (++t
<= 79);
1237 HashResultPointer
[0] += A
;
1238 HashResultPointer
[1] += B
;
1239 HashResultPointer
[2] += C
;
1240 HashResultPointer
[3] += D
;
1241 HashResultPointer
[4] += E
;
1245 /************************************************************************/
1247 /* lpfc_challenge_key */
1249 /************************************************************************/
1251 lpfc_challenge_key(uint32_t * RandomChallenge
, uint32_t * HashWorking
)
1253 *HashWorking
= (*RandomChallenge
^ *HashWorking
);
1256 /************************************************************************/
1260 /************************************************************************/
1262 lpfc_hba_init(struct lpfc_hba
*phba
, uint32_t *hbainit
)
1265 uint32_t *HashWorking
;
1266 uint32_t *pwwnn
= (uint32_t *) phba
->wwnn
;
1268 HashWorking
= kcalloc(80, sizeof(uint32_t), GFP_KERNEL
);
1272 HashWorking
[0] = HashWorking
[78] = *pwwnn
++;
1273 HashWorking
[1] = HashWorking
[79] = *pwwnn
;
1275 for (t
= 0; t
< 7; t
++)
1276 lpfc_challenge_key(phba
->RandomData
+ t
, HashWorking
+ t
);
1278 lpfc_sha_init(hbainit
);
1279 lpfc_sha_iterate(hbainit
, HashWorking
);
1284 lpfc_cleanup(struct lpfc_vport
*vport
)
1286 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1288 /* clean up phba - lpfc specific */
1289 lpfc_can_disctmo(vport
);
1290 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
)
1296 lpfc_establish_link_tmo(unsigned long ptr
)
1298 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
1299 struct lpfc_vport
**vports
;
1300 unsigned long iflag
;
1303 /* Re-establishing Link, timer expired */
1304 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1305 "1300 Re-establishing Link, timer expired "
1307 phba
->pport
->fc_flag
, phba
->pport
->port_state
);
1308 vports
= lpfc_create_vport_work_array(phba
);
1310 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
1311 struct Scsi_Host
*shost
;
1312 shost
= lpfc_shost_from_vport(vports
[i
]);
1313 spin_lock_irqsave(shost
->host_lock
, iflag
);
1314 vports
[i
]->fc_flag
&= ~FC_ESTABLISH_LINK
;
1315 spin_unlock_irqrestore(shost
->host_lock
, iflag
);
1317 lpfc_destroy_vport_work_array(vports
);
1321 lpfc_stop_vport_timers(struct lpfc_vport
*vport
)
1323 del_timer_sync(&vport
->els_tmofunc
);
1324 del_timer_sync(&vport
->fc_fdmitmo
);
1325 lpfc_can_disctmo(vport
);
1330 lpfc_stop_phba_timers(struct lpfc_hba
*phba
)
1332 del_timer_sync(&phba
->fcp_poll_timer
);
1333 del_timer_sync(&phba
->fc_estabtmo
);
1334 lpfc_stop_vport_timers(phba
->pport
);
1335 del_timer_sync(&phba
->sli
.mbox_tmo
);
1336 del_timer_sync(&phba
->fabric_block_timer
);
1337 phba
->hb_outstanding
= 0;
1338 del_timer_sync(&phba
->hb_tmofunc
);
1343 lpfc_online(struct lpfc_hba
*phba
)
1345 struct lpfc_vport
*vport
= phba
->pport
;
1346 struct lpfc_vport
**vports
;
1352 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
))
1355 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1356 "0458 Bring Adapter online\n");
1358 lpfc_block_mgmt_io(phba
);
1360 if (!lpfc_sli_queue_setup(phba
)) {
1361 lpfc_unblock_mgmt_io(phba
);
1365 if (lpfc_sli_hba_setup(phba
)) { /* Initialize the HBA */
1366 lpfc_unblock_mgmt_io(phba
);
1370 vports
= lpfc_create_vport_work_array(phba
);
1372 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
1373 struct Scsi_Host
*shost
;
1374 shost
= lpfc_shost_from_vport(vports
[i
]);
1375 spin_lock_irq(shost
->host_lock
);
1376 vports
[i
]->fc_flag
&= ~FC_OFFLINE_MODE
;
1377 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
1378 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
1379 spin_unlock_irq(shost
->host_lock
);
1381 lpfc_destroy_vport_work_array(vports
);
1383 lpfc_unblock_mgmt_io(phba
);
1388 lpfc_block_mgmt_io(struct lpfc_hba
* phba
)
1390 unsigned long iflag
;
1392 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1393 phba
->sli
.sli_flag
|= LPFC_BLOCK_MGMT_IO
;
1394 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1398 lpfc_unblock_mgmt_io(struct lpfc_hba
* phba
)
1400 unsigned long iflag
;
1402 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1403 phba
->sli
.sli_flag
&= ~LPFC_BLOCK_MGMT_IO
;
1404 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1408 lpfc_offline_prep(struct lpfc_hba
* phba
)
1410 struct lpfc_vport
*vport
= phba
->pport
;
1411 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1413 if (vport
->fc_flag
& FC_OFFLINE_MODE
)
1416 lpfc_block_mgmt_io(phba
);
1418 lpfc_linkdown(phba
);
1420 /* Issue an unreg_login to all nodes */
1421 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
)
1422 if (ndlp
->nlp_state
!= NLP_STE_UNUSED_NODE
)
1423 lpfc_unreg_rpi(vport
, ndlp
);
1425 lpfc_sli_flush_mbox_queue(phba
);
1429 lpfc_offline(struct lpfc_hba
*phba
)
1431 struct Scsi_Host
*shost
;
1432 struct lpfc_vport
**vports
;
1435 if (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
)
1438 /* stop all timers associated with this hba */
1439 lpfc_stop_phba_timers(phba
);
1440 vports
= lpfc_create_vport_work_array(phba
);
1442 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++)
1443 lpfc_stop_vport_timers(vports
[i
]);
1444 lpfc_destroy_vport_work_array(vports
);
1445 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1446 "0460 Bring Adapter offline\n");
1447 /* Bring down the SLI Layer and cleanup. The HBA is offline
1449 lpfc_sli_hba_down(phba
);
1450 spin_lock_irq(&phba
->hbalock
);
1452 spin_unlock_irq(&phba
->hbalock
);
1453 vports
= lpfc_create_vport_work_array(phba
);
1455 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
1456 shost
= lpfc_shost_from_vport(vports
[i
]);
1457 lpfc_cleanup(vports
[i
]);
1458 spin_lock_irq(shost
->host_lock
);
1459 vports
[i
]->work_port_events
= 0;
1460 vports
[i
]->fc_flag
|= FC_OFFLINE_MODE
;
1461 spin_unlock_irq(shost
->host_lock
);
1463 lpfc_destroy_vport_work_array(vports
);
1466 /******************************************************************************
1467 * Function name: lpfc_scsi_free
1469 * Description: Called from lpfc_pci_remove_one free internal driver resources
1471 ******************************************************************************/
1473 lpfc_scsi_free(struct lpfc_hba
*phba
)
1475 struct lpfc_scsi_buf
*sb
, *sb_next
;
1476 struct lpfc_iocbq
*io
, *io_next
;
1478 spin_lock_irq(&phba
->hbalock
);
1479 /* Release all the lpfc_scsi_bufs maintained by this host. */
1480 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list
, list
) {
1481 list_del(&sb
->list
);
1482 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
, sb
->data
,
1485 phba
->total_scsi_bufs
--;
1488 /* Release all the lpfc_iocbq entries maintained by this host. */
1489 list_for_each_entry_safe(io
, io_next
, &phba
->lpfc_iocb_list
, list
) {
1490 list_del(&io
->list
);
1492 phba
->total_iocbq_bufs
--;
1495 spin_unlock_irq(&phba
->hbalock
);
1501 lpfc_create_port(struct lpfc_hba
*phba
, int instance
, struct device
*dev
)
1503 struct lpfc_vport
*vport
;
1504 struct Scsi_Host
*shost
;
1507 if (dev
!= &phba
->pcidev
->dev
)
1508 shost
= scsi_host_alloc(&lpfc_vport_template
,
1509 sizeof(struct lpfc_vport
));
1511 shost
= scsi_host_alloc(&lpfc_template
,
1512 sizeof(struct lpfc_vport
));
1516 vport
= (struct lpfc_vport
*) shost
->hostdata
;
1519 vport
->load_flag
|= FC_LOADING
;
1520 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
1522 lpfc_get_vport_cfgparam(vport
);
1523 shost
->unique_id
= instance
;
1524 shost
->max_id
= LPFC_MAX_TARGET
;
1525 shost
->max_lun
= vport
->cfg_max_luns
;
1526 shost
->this_id
= -1;
1527 shost
->max_cmd_len
= 16;
1529 * Set initial can_queue value since 0 is no longer supported and
1530 * scsi_add_host will fail. This will be adjusted later based on the
1531 * max xri value determined in hba setup.
1533 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
1534 if (dev
!= &phba
->pcidev
->dev
) {
1535 shost
->transportt
= lpfc_vport_transport_template
;
1536 vport
->port_type
= LPFC_NPIV_PORT
;
1538 shost
->transportt
= lpfc_transport_template
;
1539 vport
->port_type
= LPFC_PHYSICAL_PORT
;
1542 /* Initialize all internally managed lists. */
1543 INIT_LIST_HEAD(&vport
->fc_nodes
);
1544 spin_lock_init(&vport
->work_port_lock
);
1546 init_timer(&vport
->fc_disctmo
);
1547 vport
->fc_disctmo
.function
= lpfc_disc_timeout
;
1548 vport
->fc_disctmo
.data
= (unsigned long)vport
;
1550 init_timer(&vport
->fc_fdmitmo
);
1551 vport
->fc_fdmitmo
.function
= lpfc_fdmi_tmo
;
1552 vport
->fc_fdmitmo
.data
= (unsigned long)vport
;
1554 init_timer(&vport
->els_tmofunc
);
1555 vport
->els_tmofunc
.function
= lpfc_els_timeout
;
1556 vport
->els_tmofunc
.data
= (unsigned long)vport
;
1558 error
= scsi_add_host(shost
, dev
);
1562 spin_lock_irq(&phba
->hbalock
);
1563 list_add_tail(&vport
->listentry
, &phba
->port_list
);
1564 spin_unlock_irq(&phba
->hbalock
);
1568 scsi_host_put(shost
);
1574 destroy_port(struct lpfc_vport
*vport
)
1576 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1577 struct lpfc_hba
*phba
= vport
->phba
;
1579 kfree(vport
->vname
);
1581 lpfc_debugfs_terminate(vport
);
1582 fc_remove_host(shost
);
1583 scsi_remove_host(shost
);
1585 spin_lock_irq(&phba
->hbalock
);
1586 list_del_init(&vport
->listentry
);
1587 spin_unlock_irq(&phba
->hbalock
);
1589 lpfc_cleanup(vport
);
1594 lpfc_get_instance(void)
1598 /* Assign an unused number */
1599 if (!idr_pre_get(&lpfc_hba_index
, GFP_KERNEL
))
1601 if (idr_get_new(&lpfc_hba_index
, NULL
, &instance
))
1607 * Note: there is no scan_start function as adapter initialization
1608 * will have asynchronously kicked off the link initialization.
1611 int lpfc_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
1613 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1614 struct lpfc_hba
*phba
= vport
->phba
;
1617 spin_lock_irq(shost
->host_lock
);
1619 if (vport
->load_flag
& FC_UNLOADING
) {
1623 if (time
>= 30 * HZ
) {
1624 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1625 "0461 Scanning longer than 30 "
1626 "seconds. Continuing initialization\n");
1630 if (time
>= 15 * HZ
&& phba
->link_state
<= LPFC_LINK_DOWN
) {
1631 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1632 "0465 Link down longer than 15 "
1633 "seconds. Continuing initialization\n");
1638 if (vport
->port_state
!= LPFC_VPORT_READY
)
1640 if (vport
->num_disc_nodes
|| vport
->fc_prli_sent
)
1642 if (vport
->fc_map_cnt
== 0 && time
< 2 * HZ
)
1644 if ((phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) != 0)
1650 spin_unlock_irq(shost
->host_lock
);
1654 void lpfc_host_attrib_init(struct Scsi_Host
*shost
)
1656 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1657 struct lpfc_hba
*phba
= vport
->phba
;
1659 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
1662 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
1663 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
1664 fc_host_supported_classes(shost
) = FC_COS_CLASS3
;
1666 memset(fc_host_supported_fc4s(shost
), 0,
1667 sizeof(fc_host_supported_fc4s(shost
)));
1668 fc_host_supported_fc4s(shost
)[2] = 1;
1669 fc_host_supported_fc4s(shost
)[7] = 1;
1671 lpfc_vport_symbolic_node_name(vport
, fc_host_symbolic_name(shost
),
1672 sizeof fc_host_symbolic_name(shost
));
1674 fc_host_supported_speeds(shost
) = 0;
1675 if (phba
->lmt
& LMT_10Gb
)
1676 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_10GBIT
;
1677 if (phba
->lmt
& LMT_4Gb
)
1678 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_4GBIT
;
1679 if (phba
->lmt
& LMT_2Gb
)
1680 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_2GBIT
;
1681 if (phba
->lmt
& LMT_1Gb
)
1682 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_1GBIT
;
1684 fc_host_maxframe_size(shost
) =
1685 (((uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
1686 (uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeLsb
;
1688 /* This value is also unchanging */
1689 memset(fc_host_active_fc4s(shost
), 0,
1690 sizeof(fc_host_active_fc4s(shost
)));
1691 fc_host_active_fc4s(shost
)[2] = 1;
1692 fc_host_active_fc4s(shost
)[7] = 1;
1694 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
1695 spin_lock_irq(shost
->host_lock
);
1696 vport
->load_flag
&= ~FC_LOADING
;
1697 spin_unlock_irq(shost
->host_lock
);
1700 static int __devinit
1701 lpfc_pci_probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
1703 struct lpfc_vport
*vport
= NULL
;
1704 struct lpfc_hba
*phba
;
1705 struct lpfc_sli
*psli
;
1706 struct lpfc_iocbq
*iocbq_entry
= NULL
, *iocbq_next
= NULL
;
1707 struct Scsi_Host
*shost
= NULL
;
1709 unsigned long bar0map_len
, bar2map_len
;
1710 int error
= -ENODEV
;
1714 if (pci_enable_device(pdev
))
1716 if (pci_request_regions(pdev
, LPFC_DRIVER_NAME
))
1717 goto out_disable_device
;
1719 phba
= kzalloc(sizeof (struct lpfc_hba
), GFP_KERNEL
);
1721 goto out_release_regions
;
1723 spin_lock_init(&phba
->hbalock
);
1725 phba
->pcidev
= pdev
;
1727 /* Assign an unused board number */
1728 if ((phba
->brd_no
= lpfc_get_instance()) < 0)
1731 INIT_LIST_HEAD(&phba
->port_list
);
1733 * Get all the module params for configuring this host and then
1734 * establish the host.
1736 lpfc_get_cfgparam(phba
);
1737 phba
->max_vpi
= LPFC_MAX_VPI
;
1739 /* Initialize timers used by driver */
1740 init_timer(&phba
->fc_estabtmo
);
1741 phba
->fc_estabtmo
.function
= lpfc_establish_link_tmo
;
1742 phba
->fc_estabtmo
.data
= (unsigned long)phba
;
1744 init_timer(&phba
->hb_tmofunc
);
1745 phba
->hb_tmofunc
.function
= lpfc_hb_timeout
;
1746 phba
->hb_tmofunc
.data
= (unsigned long)phba
;
1749 init_timer(&psli
->mbox_tmo
);
1750 psli
->mbox_tmo
.function
= lpfc_mbox_timeout
;
1751 psli
->mbox_tmo
.data
= (unsigned long) phba
;
1752 init_timer(&phba
->fcp_poll_timer
);
1753 phba
->fcp_poll_timer
.function
= lpfc_poll_timeout
;
1754 phba
->fcp_poll_timer
.data
= (unsigned long) phba
;
1755 init_timer(&phba
->fabric_block_timer
);
1756 phba
->fabric_block_timer
.function
= lpfc_fabric_block_timeout
;
1757 phba
->fabric_block_timer
.data
= (unsigned long) phba
;
1759 pci_set_master(pdev
);
1760 pci_try_set_mwi(pdev
);
1762 if (pci_set_dma_mask(phba
->pcidev
, DMA_64BIT_MASK
) != 0)
1763 if (pci_set_dma_mask(phba
->pcidev
, DMA_32BIT_MASK
) != 0)
1764 goto out_idr_remove
;
1767 * Get the bus address of Bar0 and Bar2 and the number of bytes
1768 * required by each mapping.
1770 phba
->pci_bar0_map
= pci_resource_start(phba
->pcidev
, 0);
1771 bar0map_len
= pci_resource_len(phba
->pcidev
, 0);
1773 phba
->pci_bar2_map
= pci_resource_start(phba
->pcidev
, 2);
1774 bar2map_len
= pci_resource_len(phba
->pcidev
, 2);
1776 /* Map HBA SLIM to a kernel virtual address. */
1777 phba
->slim_memmap_p
= ioremap(phba
->pci_bar0_map
, bar0map_len
);
1778 if (!phba
->slim_memmap_p
) {
1780 dev_printk(KERN_ERR
, &pdev
->dev
,
1781 "ioremap failed for SLIM memory.\n");
1782 goto out_idr_remove
;
1785 /* Map HBA Control Registers to a kernel virtual address. */
1786 phba
->ctrl_regs_memmap_p
= ioremap(phba
->pci_bar2_map
, bar2map_len
);
1787 if (!phba
->ctrl_regs_memmap_p
) {
1789 dev_printk(KERN_ERR
, &pdev
->dev
,
1790 "ioremap failed for HBA control registers.\n");
1791 goto out_iounmap_slim
;
1794 /* Allocate memory for SLI-2 structures */
1795 phba
->slim2p
= dma_alloc_coherent(&phba
->pcidev
->dev
, SLI2_SLIM_SIZE
,
1796 &phba
->slim2p_mapping
, GFP_KERNEL
);
1800 memset(phba
->slim2p
, 0, SLI2_SLIM_SIZE
);
1802 phba
->hbqslimp
.virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
1803 lpfc_sli_hbq_size(),
1804 &phba
->hbqslimp
.phys
,
1806 if (!phba
->hbqslimp
.virt
)
1809 hbq_count
= lpfc_sli_hbq_count();
1810 ptr
= phba
->hbqslimp
.virt
;
1811 for (i
= 0; i
< hbq_count
; ++i
) {
1812 phba
->hbqs
[i
].hbq_virt
= ptr
;
1813 INIT_LIST_HEAD(&phba
->hbqs
[i
].hbq_buffer_list
);
1814 ptr
+= (lpfc_hbq_defs
[i
]->entry_count
*
1815 sizeof(struct lpfc_hbq_entry
));
1817 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_els_hbq_alloc
;
1818 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_els_hbq_free
;
1820 memset(phba
->hbqslimp
.virt
, 0, lpfc_sli_hbq_size());
1822 /* Initialize the SLI Layer to run with lpfc HBAs. */
1823 lpfc_sli_setup(phba
);
1824 lpfc_sli_queue_setup(phba
);
1826 error
= lpfc_mem_alloc(phba
);
1828 goto out_free_hbqslimp
;
1830 /* Initialize and populate the iocb list per host. */
1831 INIT_LIST_HEAD(&phba
->lpfc_iocb_list
);
1832 for (i
= 0; i
< LPFC_IOCB_LIST_CNT
; i
++) {
1833 iocbq_entry
= kzalloc(sizeof(struct lpfc_iocbq
), GFP_KERNEL
);
1834 if (iocbq_entry
== NULL
) {
1835 printk(KERN_ERR
"%s: only allocated %d iocbs of "
1836 "expected %d count. Unloading driver.\n",
1837 __FUNCTION__
, i
, LPFC_IOCB_LIST_CNT
);
1839 goto out_free_iocbq
;
1842 iotag
= lpfc_sli_next_iotag(phba
, iocbq_entry
);
1844 kfree (iocbq_entry
);
1845 printk(KERN_ERR
"%s: failed to allocate IOTAG. "
1846 "Unloading driver.\n",
1849 goto out_free_iocbq
;
1852 spin_lock_irq(&phba
->hbalock
);
1853 list_add(&iocbq_entry
->list
, &phba
->lpfc_iocb_list
);
1854 phba
->total_iocbq_bufs
++;
1855 spin_unlock_irq(&phba
->hbalock
);
1858 /* Initialize HBA structure */
1859 phba
->fc_edtov
= FF_DEF_EDTOV
;
1860 phba
->fc_ratov
= FF_DEF_RATOV
;
1861 phba
->fc_altov
= FF_DEF_ALTOV
;
1862 phba
->fc_arbtov
= FF_DEF_ARBTOV
;
1864 INIT_LIST_HEAD(&phba
->work_list
);
1865 phba
->work_ha_mask
= (HA_ERATT
|HA_MBATT
|HA_LATT
);
1866 phba
->work_ha_mask
|= (HA_RXMASK
<< (LPFC_ELS_RING
* 4));
1868 /* Startup the kernel thread for this host adapter. */
1869 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
1870 "lpfc_worker_%d", phba
->brd_no
);
1871 if (IS_ERR(phba
->worker_thread
)) {
1872 error
= PTR_ERR(phba
->worker_thread
);
1873 goto out_free_iocbq
;
1876 /* Initialize the list of scsi buffers used by driver for scsi IO. */
1877 spin_lock_init(&phba
->scsi_buf_list_lock
);
1878 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list
);
1880 /* Initialize list of fabric iocbs */
1881 INIT_LIST_HEAD(&phba
->fabric_iocb_list
);
1883 vport
= lpfc_create_port(phba
, phba
->brd_no
, &phba
->pcidev
->dev
);
1885 goto out_kthread_stop
;
1887 shost
= lpfc_shost_from_vport(vport
);
1888 phba
->pport
= vport
;
1889 lpfc_debugfs_initialize(vport
);
1891 pci_set_drvdata(pdev
, shost
);
1893 if (phba
->cfg_use_msi
) {
1894 error
= pci_enable_msi(phba
->pcidev
);
1896 phba
->using_msi
= 1;
1898 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1899 "0452 Enable MSI failed, continuing "
1903 error
= request_irq(phba
->pcidev
->irq
, lpfc_intr_handler
, IRQF_SHARED
,
1904 LPFC_DRIVER_NAME
, phba
);
1906 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1907 "0451 Enable interrupt handler failed\n");
1908 goto out_disable_msi
;
1911 phba
->MBslimaddr
= phba
->slim_memmap_p
;
1912 phba
->HAregaddr
= phba
->ctrl_regs_memmap_p
+ HA_REG_OFFSET
;
1913 phba
->CAregaddr
= phba
->ctrl_regs_memmap_p
+ CA_REG_OFFSET
;
1914 phba
->HSregaddr
= phba
->ctrl_regs_memmap_p
+ HS_REG_OFFSET
;
1915 phba
->HCregaddr
= phba
->ctrl_regs_memmap_p
+ HC_REG_OFFSET
;
1917 if (lpfc_alloc_sysfs_attr(vport
))
1920 if (lpfc_sli_hba_setup(phba
))
1921 goto out_remove_device
;
1924 * hba setup may have changed the hba_queue_depth so we need to adjust
1925 * the value of can_queue.
1927 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
1929 lpfc_host_attrib_init(shost
);
1931 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
1932 spin_lock_irq(shost
->host_lock
);
1933 lpfc_poll_start_timer(phba
);
1934 spin_unlock_irq(shost
->host_lock
);
1937 scsi_scan_host(shost
);
1942 lpfc_free_sysfs_attr(vport
);
1943 spin_lock_irq(shost
->host_lock
);
1944 vport
->load_flag
|= FC_UNLOADING
;
1945 spin_unlock_irq(shost
->host_lock
);
1947 lpfc_stop_phba_timers(phba
);
1948 phba
->pport
->work_port_events
= 0;
1949 free_irq(phba
->pcidev
->irq
, phba
);
1951 if (phba
->using_msi
)
1952 pci_disable_msi(phba
->pcidev
);
1953 destroy_port(vport
);
1955 kthread_stop(phba
->worker_thread
);
1957 list_for_each_entry_safe(iocbq_entry
, iocbq_next
,
1958 &phba
->lpfc_iocb_list
, list
) {
1960 phba
->total_iocbq_bufs
--;
1962 lpfc_mem_free(phba
);
1964 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(), phba
->hbqslimp
.virt
,
1965 phba
->hbqslimp
.phys
);
1967 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
, phba
->slim2p
,
1968 phba
->slim2p_mapping
);
1970 iounmap(phba
->ctrl_regs_memmap_p
);
1972 iounmap(phba
->slim_memmap_p
);
1974 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
1977 out_release_regions
:
1978 pci_release_regions(pdev
);
1980 pci_disable_device(pdev
);
1982 pci_set_drvdata(pdev
, NULL
);
1984 scsi_host_put(shost
);
1988 static void __devexit
1989 lpfc_pci_remove_one(struct pci_dev
*pdev
)
1991 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
1992 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1993 struct lpfc_hba
*phba
= vport
->phba
;
1994 spin_lock_irq(&phba
->hbalock
);
1995 vport
->load_flag
|= FC_UNLOADING
;
1996 spin_unlock_irq(&phba
->hbalock
);
1998 kfree(vport
->vname
);
1999 lpfc_free_sysfs_attr(vport
);
2001 fc_remove_host(shost
);
2002 scsi_remove_host(shost
);
2004 * Bring down the SLI Layer. This step disable all interrupts,
2005 * clears the rings, discards all mailbox commands, and resets
2008 lpfc_sli_hba_down(phba
);
2009 lpfc_sli_brdrestart(phba
);
2011 lpfc_stop_phba_timers(phba
);
2012 spin_lock_irq(&phba
->hbalock
);
2013 list_del_init(&vport
->listentry
);
2014 spin_unlock_irq(&phba
->hbalock
);
2016 lpfc_debugfs_terminate(vport
);
2017 lpfc_cleanup(vport
);
2019 kthread_stop(phba
->worker_thread
);
2021 /* Release the irq reservation */
2022 free_irq(phba
->pcidev
->irq
, phba
);
2023 if (phba
->using_msi
)
2024 pci_disable_msi(phba
->pcidev
);
2026 pci_set_drvdata(pdev
, NULL
);
2027 scsi_host_put(shost
);
2030 * Call scsi_free before mem_free since scsi bufs are released to their
2031 * corresponding pools here.
2033 lpfc_scsi_free(phba
);
2034 lpfc_mem_free(phba
);
2036 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(), phba
->hbqslimp
.virt
,
2037 phba
->hbqslimp
.phys
);
2039 /* Free resources associated with SLI2 interface */
2040 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
2041 phba
->slim2p
, phba
->slim2p_mapping
);
2043 /* unmap adapter SLIM and Control Registers */
2044 iounmap(phba
->ctrl_regs_memmap_p
);
2045 iounmap(phba
->slim_memmap_p
);
2047 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
2051 pci_release_regions(pdev
);
2052 pci_disable_device(pdev
);
2056 * lpfc_io_error_detected - called when PCI error is detected
2057 * @pdev: Pointer to PCI device
2058 * @state: The current pci conneection state
2060 * This function is called after a PCI bus error affecting
2061 * this device has been detected.
2063 static pci_ers_result_t
lpfc_io_error_detected(struct pci_dev
*pdev
,
2064 pci_channel_state_t state
)
2066 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
2067 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
2068 struct lpfc_sli
*psli
= &phba
->sli
;
2069 struct lpfc_sli_ring
*pring
;
2071 if (state
== pci_channel_io_perm_failure
)
2072 return PCI_ERS_RESULT_DISCONNECT
;
2074 pci_disable_device(pdev
);
2076 * There may be I/Os dropped by the firmware.
2077 * Error iocb (I/O) on txcmplq and let the SCSI layer
2078 * retry it after re-establishing link.
2080 pring
= &psli
->ring
[psli
->fcp_ring
];
2081 lpfc_sli_abort_iocb_ring(phba
, pring
);
2083 /* Release the irq reservation */
2084 free_irq(phba
->pcidev
->irq
, phba
);
2085 if (phba
->using_msi
)
2086 pci_disable_msi(phba
->pcidev
);
2088 /* Request a slot reset. */
2089 return PCI_ERS_RESULT_NEED_RESET
;
2093 * lpfc_io_slot_reset - called after the pci bus has been reset.
2094 * @pdev: Pointer to PCI device
2096 * Restart the card from scratch, as if from a cold-boot.
2098 static pci_ers_result_t
lpfc_io_slot_reset(struct pci_dev
*pdev
)
2100 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
2101 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
2102 struct lpfc_sli
*psli
= &phba
->sli
;
2103 int bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
2105 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
2106 if (pci_enable_device_bars(pdev
, bars
)) {
2107 printk(KERN_ERR
"lpfc: Cannot re-enable "
2108 "PCI device after reset.\n");
2109 return PCI_ERS_RESULT_DISCONNECT
;
2112 pci_set_master(pdev
);
2114 /* Re-establishing Link */
2115 spin_lock_irq(shost
->host_lock
);
2116 phba
->pport
->fc_flag
|= FC_ESTABLISH_LINK
;
2117 spin_unlock_irq(shost
->host_lock
);
2119 spin_lock_irq(&phba
->hbalock
);
2120 psli
->sli_flag
&= ~LPFC_SLI2_ACTIVE
;
2121 spin_unlock_irq(&phba
->hbalock
);
2124 /* Take device offline; this will perform cleanup */
2126 lpfc_sli_brdrestart(phba
);
2128 return PCI_ERS_RESULT_RECOVERED
;
2132 * lpfc_io_resume - called when traffic can start flowing again.
2133 * @pdev: Pointer to PCI device
2135 * This callback is called when the error recovery driver tells us that
2136 * its OK to resume normal operation.
2138 static void lpfc_io_resume(struct pci_dev
*pdev
)
2140 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
2141 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
2143 if (lpfc_online(phba
) == 0) {
2144 mod_timer(&phba
->fc_estabtmo
, jiffies
+ HZ
* 60);
2148 static struct pci_device_id lpfc_id_table
[] = {
2149 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_VIPER
,
2150 PCI_ANY_ID
, PCI_ANY_ID
, },
2151 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_FIREFLY
,
2152 PCI_ANY_ID
, PCI_ANY_ID
, },
2153 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_THOR
,
2154 PCI_ANY_ID
, PCI_ANY_ID
, },
2155 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PEGASUS
,
2156 PCI_ANY_ID
, PCI_ANY_ID
, },
2157 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_CENTAUR
,
2158 PCI_ANY_ID
, PCI_ANY_ID
, },
2159 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_DRAGONFLY
,
2160 PCI_ANY_ID
, PCI_ANY_ID
, },
2161 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SUPERFLY
,
2162 PCI_ANY_ID
, PCI_ANY_ID
, },
2163 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_RFLY
,
2164 PCI_ANY_ID
, PCI_ANY_ID
, },
2165 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PFLY
,
2166 PCI_ANY_ID
, PCI_ANY_ID
, },
2167 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE
,
2168 PCI_ANY_ID
, PCI_ANY_ID
, },
2169 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE_SCSP
,
2170 PCI_ANY_ID
, PCI_ANY_ID
, },
2171 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE_DCSP
,
2172 PCI_ANY_ID
, PCI_ANY_ID
, },
2173 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS
,
2174 PCI_ANY_ID
, PCI_ANY_ID
, },
2175 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS_SCSP
,
2176 PCI_ANY_ID
, PCI_ANY_ID
, },
2177 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS_DCSP
,
2178 PCI_ANY_ID
, PCI_ANY_ID
, },
2179 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_BMID
,
2180 PCI_ANY_ID
, PCI_ANY_ID
, },
2181 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_BSMB
,
2182 PCI_ANY_ID
, PCI_ANY_ID
, },
2183 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR
,
2184 PCI_ANY_ID
, PCI_ANY_ID
, },
2185 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR_SCSP
,
2186 PCI_ANY_ID
, PCI_ANY_ID
, },
2187 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR_DCSP
,
2188 PCI_ANY_ID
, PCI_ANY_ID
, },
2189 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZMID
,
2190 PCI_ANY_ID
, PCI_ANY_ID
, },
2191 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZSMB
,
2192 PCI_ANY_ID
, PCI_ANY_ID
, },
2193 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_TFLY
,
2194 PCI_ANY_ID
, PCI_ANY_ID
, },
2195 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP101
,
2196 PCI_ANY_ID
, PCI_ANY_ID
, },
2197 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP10000S
,
2198 PCI_ANY_ID
, PCI_ANY_ID
, },
2199 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP11000S
,
2200 PCI_ANY_ID
, PCI_ANY_ID
, },
2201 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LPE11000S
,
2202 PCI_ANY_ID
, PCI_ANY_ID
, },
2203 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT
,
2204 PCI_ANY_ID
, PCI_ANY_ID
, },
2205 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_MID
,
2206 PCI_ANY_ID
, PCI_ANY_ID
, },
2207 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_SMB
,
2208 PCI_ANY_ID
, PCI_ANY_ID
, },
2209 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_DCSP
,
2210 PCI_ANY_ID
, PCI_ANY_ID
, },
2211 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_SCSP
,
2212 PCI_ANY_ID
, PCI_ANY_ID
, },
2213 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_S
,
2214 PCI_ANY_ID
, PCI_ANY_ID
, },
2218 MODULE_DEVICE_TABLE(pci
, lpfc_id_table
);
2220 static struct pci_error_handlers lpfc_err_handler
= {
2221 .error_detected
= lpfc_io_error_detected
,
2222 .slot_reset
= lpfc_io_slot_reset
,
2223 .resume
= lpfc_io_resume
,
2226 static struct pci_driver lpfc_driver
= {
2227 .name
= LPFC_DRIVER_NAME
,
2228 .id_table
= lpfc_id_table
,
2229 .probe
= lpfc_pci_probe_one
,
2230 .remove
= __devexit_p(lpfc_pci_remove_one
),
2231 .err_handler
= &lpfc_err_handler
,
2239 printk(LPFC_MODULE_DESC
"\n");
2240 printk(LPFC_COPYRIGHT
"\n");
2242 lpfc_transport_template
=
2243 fc_attach_transport(&lpfc_transport_functions
);
2244 lpfc_vport_transport_template
=
2245 fc_attach_transport(&lpfc_vport_transport_functions
);
2246 if (!lpfc_transport_template
|| !lpfc_vport_transport_template
)
2248 error
= pci_register_driver(&lpfc_driver
);
2250 fc_release_transport(lpfc_transport_template
);
2251 fc_release_transport(lpfc_vport_transport_template
);
2260 pci_unregister_driver(&lpfc_driver
);
2261 fc_release_transport(lpfc_transport_template
);
2262 fc_release_transport(lpfc_vport_transport_template
);
2265 module_init(lpfc_init
);
2266 module_exit(lpfc_exit
);
2267 MODULE_LICENSE("GPL");
2268 MODULE_DESCRIPTION(LPFC_MODULE_DESC
);
2269 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
2270 MODULE_VERSION("0:" LPFC_DRIVER_VERSION
);