1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport_fc.h>
38 #include "lpfc_disc.h"
39 #include "lpfc_scsi.h"
41 #include "lpfc_logmsg.h"
42 #include "lpfc_crtn.h"
43 #include "lpfc_version.h"
45 static int lpfc_parse_vpd(struct lpfc_hba
*, uint8_t *, int);
46 static void lpfc_get_hba_model_desc(struct lpfc_hba
*, uint8_t *, uint8_t *);
47 static int lpfc_post_rcv_buf(struct lpfc_hba
*);
49 static struct scsi_transport_template
*lpfc_transport_template
= NULL
;
50 static DEFINE_IDR(lpfc_hba_index
);
52 /************************************************************************/
54 /* lpfc_config_port_prep */
55 /* This routine will do LPFC initialization prior to the */
56 /* CONFIG_PORT mailbox command. This will be initialized */
57 /* as a SLI layer callback routine. */
58 /* This routine returns 0 on success or -ERESTART if it wants */
59 /* the SLI layer to reset the HBA and try again. Any */
60 /* other return value indicates an error. */
62 /************************************************************************/
64 lpfc_config_port_prep(struct lpfc_hba
* phba
)
66 lpfc_vpd_t
*vp
= &phba
->vpd
;
70 char *lpfc_vpd_data
= NULL
;
72 static char licensed
[56] =
73 "key unlock for use with gnu public licensed code only\0";
74 static int init_key
= 1;
76 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
78 phba
->hba_state
= LPFC_HBA_ERROR
;
83 phba
->hba_state
= LPFC_INIT_MBX_CMDS
;
85 if (lpfc_is_LC_HBA(phba
->pcidev
->device
)) {
87 uint32_t *ptext
= (uint32_t *) licensed
;
89 for (i
= 0; i
< 56; i
+= sizeof (uint32_t), ptext
++)
90 *ptext
= cpu_to_be32(*ptext
);
94 lpfc_read_nv(phba
, pmb
);
95 memset((char*)mb
->un
.varRDnvp
.rsvd3
, 0,
96 sizeof (mb
->un
.varRDnvp
.rsvd3
));
97 memcpy((char*)mb
->un
.varRDnvp
.rsvd3
, licensed
,
100 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
102 if (rc
!= MBX_SUCCESS
) {
103 lpfc_printf_log(phba
,
106 "%d:0324 Config Port initialization "
107 "error, mbxCmd x%x READ_NVPARM, "
110 mb
->mbxCommand
, mb
->mbxStatus
);
111 mempool_free(pmb
, phba
->mbox_mem_pool
);
114 memcpy(phba
->wwnn
, (char *)mb
->un
.varRDnvp
.nodename
,
115 sizeof (mb
->un
.varRDnvp
.nodename
));
118 /* Setup and issue mailbox READ REV command */
119 lpfc_read_rev(phba
, pmb
);
120 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
121 if (rc
!= MBX_SUCCESS
) {
122 lpfc_printf_log(phba
,
125 "%d:0439 Adapter failed to init, mbxCmd x%x "
126 "READ_REV, mbxStatus x%x\n",
128 mb
->mbxCommand
, mb
->mbxStatus
);
129 mempool_free( pmb
, phba
->mbox_mem_pool
);
134 * The value of rr must be 1 since the driver set the cv field to 1.
135 * This setting requires the FW to set all revision fields.
137 if (mb
->un
.varRdRev
.rr
== 0) {
139 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
140 "%d:0440 Adapter failed to init, READ_REV has "
141 "missing revision information.\n",
143 mempool_free(pmb
, phba
->mbox_mem_pool
);
147 /* Save information as VPD data */
149 vp
->rev
.sli1FwRev
= mb
->un
.varRdRev
.sli1FwRev
;
150 memcpy(vp
->rev
.sli1FwName
, (char*) mb
->un
.varRdRev
.sli1FwName
, 16);
151 vp
->rev
.sli2FwRev
= mb
->un
.varRdRev
.sli2FwRev
;
152 memcpy(vp
->rev
.sli2FwName
, (char *) mb
->un
.varRdRev
.sli2FwName
, 16);
153 vp
->rev
.biuRev
= mb
->un
.varRdRev
.biuRev
;
154 vp
->rev
.smRev
= mb
->un
.varRdRev
.smRev
;
155 vp
->rev
.smFwRev
= mb
->un
.varRdRev
.un
.smFwRev
;
156 vp
->rev
.endecRev
= mb
->un
.varRdRev
.endecRev
;
157 vp
->rev
.fcphHigh
= mb
->un
.varRdRev
.fcphHigh
;
158 vp
->rev
.fcphLow
= mb
->un
.varRdRev
.fcphLow
;
159 vp
->rev
.feaLevelHigh
= mb
->un
.varRdRev
.feaLevelHigh
;
160 vp
->rev
.feaLevelLow
= mb
->un
.varRdRev
.feaLevelLow
;
161 vp
->rev
.postKernRev
= mb
->un
.varRdRev
.postKernRev
;
162 vp
->rev
.opFwRev
= mb
->un
.varRdRev
.opFwRev
;
164 if (lpfc_is_LC_HBA(phba
->pcidev
->device
))
165 memcpy(phba
->RandomData
, (char *)&mb
->un
.varWords
[24],
166 sizeof (phba
->RandomData
));
168 /* Get adapter VPD information */
169 pmb
->context2
= kmalloc(DMP_RSP_SIZE
, GFP_KERNEL
);
172 lpfc_vpd_data
= kmalloc(DMP_VPD_SIZE
, GFP_KERNEL
);
174 goto out_free_context2
;
177 lpfc_dump_mem(phba
, pmb
, offset
);
178 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
180 if (rc
!= MBX_SUCCESS
) {
181 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
182 "%d:0441 VPD not present on adapter, "
183 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
185 mb
->mbxCommand
, mb
->mbxStatus
);
186 mb
->un
.varDmp
.word_cnt
= 0;
188 if (mb
->un
.varDmp
.word_cnt
> DMP_VPD_SIZE
- offset
)
189 mb
->un
.varDmp
.word_cnt
= DMP_VPD_SIZE
- offset
;
190 lpfc_sli_pcimem_bcopy(pmb
->context2
, lpfc_vpd_data
+ offset
,
191 mb
->un
.varDmp
.word_cnt
);
192 offset
+= mb
->un
.varDmp
.word_cnt
;
193 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_VPD_SIZE
);
194 lpfc_parse_vpd(phba
, lpfc_vpd_data
, offset
);
196 kfree(lpfc_vpd_data
);
198 kfree(pmb
->context2
);
200 mempool_free(pmb
, phba
->mbox_mem_pool
);
204 /************************************************************************/
206 /* lpfc_config_port_post */
207 /* This routine will do LPFC initialization after the */
208 /* CONFIG_PORT mailbox command. This will be initialized */
209 /* as a SLI layer callback routine. */
210 /* This routine returns 0 on success. Any other return value */
211 /* indicates an error. */
213 /************************************************************************/
215 lpfc_config_port_post(struct lpfc_hba
* phba
)
219 struct lpfc_dmabuf
*mp
;
220 struct lpfc_sli
*psli
= &phba
->sli
;
221 uint32_t status
, timeout
;
224 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
226 phba
->hba_state
= LPFC_HBA_ERROR
;
231 lpfc_config_link(phba
, pmb
);
232 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
233 if (rc
!= MBX_SUCCESS
) {
234 lpfc_printf_log(phba
,
237 "%d:0447 Adapter failed init, mbxCmd x%x "
238 "CONFIG_LINK mbxStatus x%x\n",
240 mb
->mbxCommand
, mb
->mbxStatus
);
241 phba
->hba_state
= LPFC_HBA_ERROR
;
242 mempool_free( pmb
, phba
->mbox_mem_pool
);
246 /* Get login parameters for NID. */
247 lpfc_read_sparam(phba
, pmb
);
248 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
249 lpfc_printf_log(phba
,
252 "%d:0448 Adapter failed init, mbxCmd x%x "
253 "READ_SPARM mbxStatus x%x\n",
255 mb
->mbxCommand
, mb
->mbxStatus
);
256 phba
->hba_state
= LPFC_HBA_ERROR
;
257 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
258 mempool_free( pmb
, phba
->mbox_mem_pool
);
259 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
264 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
266 memcpy(&phba
->fc_sparam
, mp
->virt
, sizeof (struct serv_parm
));
267 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
269 pmb
->context1
= NULL
;
271 if (phba
->cfg_soft_wwnn
)
272 u64_to_wwn(phba
->cfg_soft_wwnn
, phba
->fc_sparam
.nodeName
.u
.wwn
);
273 if (phba
->cfg_soft_wwpn
)
274 u64_to_wwn(phba
->cfg_soft_wwpn
, phba
->fc_sparam
.portName
.u
.wwn
);
275 memcpy(&phba
->fc_nodename
, &phba
->fc_sparam
.nodeName
,
276 sizeof (struct lpfc_name
));
277 memcpy(&phba
->fc_portname
, &phba
->fc_sparam
.portName
,
278 sizeof (struct lpfc_name
));
279 /* If no serial number in VPD data, use low 6 bytes of WWNN */
280 /* This should be consolidated into parse_vpd ? - mr */
281 if (phba
->SerialNumber
[0] == 0) {
284 outptr
= &phba
->fc_nodename
.u
.s
.IEEE
[0];
285 for (i
= 0; i
< 12; i
++) {
287 j
= ((status
& 0xf0) >> 4);
289 phba
->SerialNumber
[i
] =
290 (char)((uint8_t) 0x30 + (uint8_t) j
);
292 phba
->SerialNumber
[i
] =
293 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
297 phba
->SerialNumber
[i
] =
298 (char)((uint8_t) 0x30 + (uint8_t) j
);
300 phba
->SerialNumber
[i
] =
301 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
305 lpfc_read_config(phba
, pmb
);
306 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
307 lpfc_printf_log(phba
,
310 "%d:0453 Adapter failed to init, mbxCmd x%x "
311 "READ_CONFIG, mbxStatus x%x\n",
313 mb
->mbxCommand
, mb
->mbxStatus
);
314 phba
->hba_state
= LPFC_HBA_ERROR
;
315 mempool_free( pmb
, phba
->mbox_mem_pool
);
319 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
320 if (phba
->cfg_hba_queue_depth
> (mb
->un
.varRdConfig
.max_xri
+1))
321 phba
->cfg_hba_queue_depth
=
322 mb
->un
.varRdConfig
.max_xri
+ 1;
324 phba
->lmt
= mb
->un
.varRdConfig
.lmt
;
326 /* Get the default values for Model Name and Description */
327 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
329 if ((phba
->cfg_link_speed
> LINK_SPEED_10G
)
330 || ((phba
->cfg_link_speed
== LINK_SPEED_1G
)
331 && !(phba
->lmt
& LMT_1Gb
))
332 || ((phba
->cfg_link_speed
== LINK_SPEED_2G
)
333 && !(phba
->lmt
& LMT_2Gb
))
334 || ((phba
->cfg_link_speed
== LINK_SPEED_4G
)
335 && !(phba
->lmt
& LMT_4Gb
))
336 || ((phba
->cfg_link_speed
== LINK_SPEED_8G
)
337 && !(phba
->lmt
& LMT_8Gb
))
338 || ((phba
->cfg_link_speed
== LINK_SPEED_10G
)
339 && !(phba
->lmt
& LMT_10Gb
))) {
340 /* Reset link speed to auto */
341 lpfc_printf_log(phba
,
344 "%d:1302 Invalid speed for this board: "
345 "Reset link speed to auto: x%x\n",
347 phba
->cfg_link_speed
);
348 phba
->cfg_link_speed
= LINK_SPEED_AUTO
;
351 phba
->hba_state
= LPFC_LINK_DOWN
;
353 /* Only process IOCBs on ring 0 till hba_state is READY */
354 if (psli
->ring
[psli
->extra_ring
].cmdringaddr
)
355 psli
->ring
[psli
->extra_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
356 if (psli
->ring
[psli
->fcp_ring
].cmdringaddr
)
357 psli
->ring
[psli
->fcp_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
358 if (psli
->ring
[psli
->next_ring
].cmdringaddr
)
359 psli
->ring
[psli
->next_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
361 /* Post receive buffers for desired rings */
362 lpfc_post_rcv_buf(phba
);
364 /* Enable appropriate host interrupts */
365 spin_lock_irq(phba
->host
->host_lock
);
366 status
= readl(phba
->HCregaddr
);
367 status
|= HC_MBINT_ENA
| HC_ERINT_ENA
| HC_LAINT_ENA
;
368 if (psli
->num_rings
> 0)
369 status
|= HC_R0INT_ENA
;
370 if (psli
->num_rings
> 1)
371 status
|= HC_R1INT_ENA
;
372 if (psli
->num_rings
> 2)
373 status
|= HC_R2INT_ENA
;
374 if (psli
->num_rings
> 3)
375 status
|= HC_R3INT_ENA
;
377 if ((phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) &&
378 (phba
->cfg_poll
& DISABLE_FCP_RING_INT
))
379 status
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
381 writel(status
, phba
->HCregaddr
);
382 readl(phba
->HCregaddr
); /* flush */
383 spin_unlock_irq(phba
->host
->host_lock
);
386 * Setup the ring 0 (els) timeout handler
388 timeout
= phba
->fc_ratov
<< 1;
389 phba
->els_tmofunc
.expires
= jiffies
+ HZ
* timeout
;
390 add_timer(&phba
->els_tmofunc
);
392 lpfc_init_link(phba
, pmb
, phba
->cfg_topology
, phba
->cfg_link_speed
);
393 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
394 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
395 if (rc
!= MBX_SUCCESS
) {
396 lpfc_printf_log(phba
,
399 "%d:0454 Adapter failed to init, mbxCmd x%x "
400 "INIT_LINK, mbxStatus x%x\n",
402 mb
->mbxCommand
, mb
->mbxStatus
);
404 /* Clear all interrupt enable conditions */
405 writel(0, phba
->HCregaddr
);
406 readl(phba
->HCregaddr
); /* flush */
407 /* Clear all pending interrupts */
408 writel(0xffffffff, phba
->HAregaddr
);
409 readl(phba
->HAregaddr
); /* flush */
411 phba
->hba_state
= LPFC_HBA_ERROR
;
413 mempool_free(pmb
, phba
->mbox_mem_pool
);
416 /* MBOX buffer will be freed in mbox compl */
422 lpfc_discovery_wait(struct lpfc_hba
*phba
)
426 while ((phba
->hba_state
!= LPFC_HBA_READY
) ||
427 (phba
->num_disc_nodes
) || (phba
->fc_prli_sent
) ||
428 ((phba
->fc_map_cnt
== 0) && (i
<2)) ||
429 (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
)) {
430 /* Check every second for 30 retries. */
435 if ((i
>= 15) && (phba
->hba_state
<= LPFC_LINK_DOWN
)) {
436 /* The link is down. Set linkdown timeout */
440 /* Delay for 1 second to give discovery time to complete. */
448 /************************************************************************/
450 /* lpfc_hba_down_prep */
451 /* This routine will do LPFC uninitialization before the */
452 /* HBA is reset when bringing down the SLI Layer. This will be */
453 /* initialized as a SLI layer callback routine. */
454 /* This routine returns 0 on success. Any other return value */
455 /* indicates an error. */
457 /************************************************************************/
459 lpfc_hba_down_prep(struct lpfc_hba
* phba
)
461 /* Disable interrupts */
462 writel(0, phba
->HCregaddr
);
463 readl(phba
->HCregaddr
); /* flush */
465 /* Cleanup potential discovery resources */
466 lpfc_els_flush_rscn(phba
);
467 lpfc_els_flush_cmd(phba
);
468 lpfc_disc_flush_list(phba
);
473 /************************************************************************/
475 /* lpfc_hba_down_post */
476 /* This routine will do uninitialization after the HBA is reset */
477 /* when bringing down the SLI Layer. */
478 /* This routine returns 0 on success. Any other return value */
479 /* indicates an error. */
481 /************************************************************************/
483 lpfc_hba_down_post(struct lpfc_hba
* phba
)
485 struct lpfc_sli
*psli
= &phba
->sli
;
486 struct lpfc_sli_ring
*pring
;
487 struct lpfc_dmabuf
*mp
, *next_mp
;
490 /* Cleanup preposted buffers on the ELS ring */
491 pring
= &psli
->ring
[LPFC_ELS_RING
];
492 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
494 pring
->postbufq_cnt
--;
495 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
499 for (i
= 0; i
< psli
->num_rings
; i
++) {
500 pring
= &psli
->ring
[i
];
501 lpfc_sli_abort_iocb_ring(phba
, pring
);
507 /************************************************************************/
509 /* lpfc_handle_eratt */
510 /* This routine will handle processing a Host Attention */
511 /* Error Status event. This will be initialized */
512 /* as a SLI layer callback routine. */
514 /************************************************************************/
516 lpfc_handle_eratt(struct lpfc_hba
* phba
)
518 struct lpfc_sli
*psli
= &phba
->sli
;
519 struct lpfc_sli_ring
*pring
;
521 /* If the pci channel is offline, ignore possible errors,
522 * since we cannot communicate with the pci card anyway. */
523 if (pci_channel_offline(phba
->pcidev
))
526 if (phba
->work_hs
& HS_FFER6
||
527 phba
->work_hs
& HS_FFER5
) {
528 /* Re-establishing Link */
529 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
530 "%d:1301 Re-establishing Link "
531 "Data: x%x x%x x%x\n",
532 phba
->brd_no
, phba
->work_hs
,
533 phba
->work_status
[0], phba
->work_status
[1]);
534 spin_lock_irq(phba
->host
->host_lock
);
535 phba
->fc_flag
|= FC_ESTABLISH_LINK
;
536 psli
->sli_flag
&= ~LPFC_SLI2_ACTIVE
;
537 spin_unlock_irq(phba
->host
->host_lock
);
540 * Firmware stops when it triggled erratt with HS_FFER6.
541 * That could cause the I/Os dropped by the firmware.
542 * Error iocb (I/O) on txcmplq and let the SCSI layer
543 * retry it after re-establishing link.
545 pring
= &psli
->ring
[psli
->fcp_ring
];
546 lpfc_sli_abort_iocb_ring(phba
, pring
);
550 * There was a firmware error. Take the hba offline and then
551 * attempt to restart it.
554 lpfc_sli_brdrestart(phba
);
555 if (lpfc_online(phba
) == 0) { /* Initialize the HBA */
556 mod_timer(&phba
->fc_estabtmo
, jiffies
+ HZ
* 60);
560 /* The if clause above forces this code path when the status
561 * failure is a value other than FFER6. Do not call the offline
562 * twice. This is the adapter hardware error path.
564 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
565 "%d:0457 Adapter Hardware Error "
566 "Data: x%x x%x x%x\n",
567 phba
->brd_no
, phba
->work_hs
,
568 phba
->work_status
[0], phba
->work_status
[1]);
570 event_data
= FC_REG_DUMP_EVENT
;
571 fc_host_post_vendor_event(phba
->host
, fc_get_event_number(),
572 sizeof(event_data
), (char *) &event_data
,
573 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
575 psli
->sli_flag
&= ~LPFC_SLI2_ACTIVE
;
577 phba
->hba_state
= LPFC_HBA_ERROR
;
578 lpfc_hba_down_post(phba
);
582 /************************************************************************/
584 /* lpfc_handle_latt */
585 /* This routine will handle processing a Host Attention */
586 /* Link Status event. This will be initialized */
587 /* as a SLI layer callback routine. */
589 /************************************************************************/
591 lpfc_handle_latt(struct lpfc_hba
* phba
)
593 struct lpfc_sli
*psli
= &phba
->sli
;
595 volatile uint32_t control
;
596 struct lpfc_dmabuf
*mp
;
599 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
601 goto lpfc_handle_latt_err_exit
;
603 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
605 goto lpfc_handle_latt_free_pmb
;
607 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
609 goto lpfc_handle_latt_free_mp
;
613 /* Cleanup any outstanding ELS commands */
614 lpfc_els_flush_cmd(phba
);
616 psli
->slistat
.link_event
++;
617 lpfc_read_la(phba
, pmb
, mp
);
618 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_la
;
619 rc
= lpfc_sli_issue_mbox (phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
620 if (rc
== MBX_NOT_FINISHED
)
621 goto lpfc_handle_latt_free_mbuf
;
623 /* Clear Link Attention in HA REG */
624 spin_lock_irq(phba
->host
->host_lock
);
625 writel(HA_LATT
, phba
->HAregaddr
);
626 readl(phba
->HAregaddr
); /* flush */
627 spin_unlock_irq(phba
->host
->host_lock
);
631 lpfc_handle_latt_free_mbuf
:
632 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
633 lpfc_handle_latt_free_mp
:
635 lpfc_handle_latt_free_pmb
:
637 lpfc_handle_latt_err_exit
:
638 /* Enable Link attention interrupts */
639 spin_lock_irq(phba
->host
->host_lock
);
640 psli
->sli_flag
|= LPFC_PROCESS_LA
;
641 control
= readl(phba
->HCregaddr
);
642 control
|= HC_LAINT_ENA
;
643 writel(control
, phba
->HCregaddr
);
644 readl(phba
->HCregaddr
); /* flush */
646 /* Clear Link Attention in HA REG */
647 writel(HA_LATT
, phba
->HAregaddr
);
648 readl(phba
->HAregaddr
); /* flush */
649 spin_unlock_irq(phba
->host
->host_lock
);
651 phba
->hba_state
= LPFC_HBA_ERROR
;
653 /* The other case is an error from issue_mbox */
655 lpfc_printf_log(phba
,
658 "%d:0300 READ_LA: no buffers\n",
664 /************************************************************************/
667 /* This routine will parse the VPD data */
669 /************************************************************************/
671 lpfc_parse_vpd(struct lpfc_hba
* phba
, uint8_t * vpd
, int len
)
673 uint8_t lenlo
, lenhi
;
683 lpfc_printf_log(phba
,
686 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
688 (uint32_t) vpd
[0], (uint32_t) vpd
[1], (uint32_t) vpd
[2],
690 while (!finished
&& (index
< (len
- 4))) {
691 switch (vpd
[index
]) {
699 i
= ((((unsigned short)lenhi
) << 8) + lenlo
);
708 Length
= ((((unsigned short)lenhi
) << 8) + lenlo
);
709 if (Length
> len
- index
)
710 Length
= len
- index
;
712 /* Look for Serial Number */
713 if ((vpd
[index
] == 'S') && (vpd
[index
+1] == 'N')) {
720 phba
->SerialNumber
[j
++] = vpd
[index
++];
724 phba
->SerialNumber
[j
] = 0;
727 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '1')) {
728 phba
->vpd_flag
|= VPD_MODEL_DESC
;
735 phba
->ModelDesc
[j
++] = vpd
[index
++];
739 phba
->ModelDesc
[j
] = 0;
742 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '2')) {
743 phba
->vpd_flag
|= VPD_MODEL_NAME
;
750 phba
->ModelName
[j
++] = vpd
[index
++];
754 phba
->ModelName
[j
] = 0;
757 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '3')) {
758 phba
->vpd_flag
|= VPD_PROGRAM_TYPE
;
765 phba
->ProgramType
[j
++] = vpd
[index
++];
769 phba
->ProgramType
[j
] = 0;
772 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '4')) {
773 phba
->vpd_flag
|= VPD_PORT
;
780 phba
->Port
[j
++] = vpd
[index
++];
810 lpfc_get_hba_model_desc(struct lpfc_hba
* phba
, uint8_t * mdp
, uint8_t * descp
)
813 uint16_t dev_id
= phba
->pcidev
->device
;
819 } m
= {"<Unknown>", 0, ""};
821 if (mdp
&& mdp
[0] != '\0'
822 && descp
&& descp
[0] != '\0')
825 if (phba
->lmt
& LMT_10Gb
)
827 else if (phba
->lmt
& LMT_8Gb
)
829 else if (phba
->lmt
& LMT_4Gb
)
831 else if (phba
->lmt
& LMT_2Gb
)
839 case PCI_DEVICE_ID_FIREFLY
:
840 m
= (typeof(m
)){"LP6000", max_speed
, "PCI"};
842 case PCI_DEVICE_ID_SUPERFLY
:
843 if (vp
->rev
.biuRev
>= 1 && vp
->rev
.biuRev
<= 3)
844 m
= (typeof(m
)){"LP7000", max_speed
, "PCI"};
846 m
= (typeof(m
)){"LP7000E", max_speed
, "PCI"};
848 case PCI_DEVICE_ID_DRAGONFLY
:
849 m
= (typeof(m
)){"LP8000", max_speed
, "PCI"};
851 case PCI_DEVICE_ID_CENTAUR
:
852 if (FC_JEDEC_ID(vp
->rev
.biuRev
) == CENTAUR_2G_JEDEC_ID
)
853 m
= (typeof(m
)){"LP9002", max_speed
, "PCI"};
855 m
= (typeof(m
)){"LP9000", max_speed
, "PCI"};
857 case PCI_DEVICE_ID_RFLY
:
858 m
= (typeof(m
)){"LP952", max_speed
, "PCI"};
860 case PCI_DEVICE_ID_PEGASUS
:
861 m
= (typeof(m
)){"LP9802", max_speed
, "PCI-X"};
863 case PCI_DEVICE_ID_THOR
:
864 m
= (typeof(m
)){"LP10000", max_speed
, "PCI-X"};
866 case PCI_DEVICE_ID_VIPER
:
867 m
= (typeof(m
)){"LPX1000", max_speed
, "PCI-X"};
869 case PCI_DEVICE_ID_PFLY
:
870 m
= (typeof(m
)){"LP982", max_speed
, "PCI-X"};
872 case PCI_DEVICE_ID_TFLY
:
873 m
= (typeof(m
)){"LP1050", max_speed
, "PCI-X"};
875 case PCI_DEVICE_ID_HELIOS
:
876 m
= (typeof(m
)){"LP11000", max_speed
, "PCI-X2"};
878 case PCI_DEVICE_ID_HELIOS_SCSP
:
879 m
= (typeof(m
)){"LP11000-SP", max_speed
, "PCI-X2"};
881 case PCI_DEVICE_ID_HELIOS_DCSP
:
882 m
= (typeof(m
)){"LP11002-SP", max_speed
, "PCI-X2"};
884 case PCI_DEVICE_ID_NEPTUNE
:
885 m
= (typeof(m
)){"LPe1000", max_speed
, "PCIe"};
887 case PCI_DEVICE_ID_NEPTUNE_SCSP
:
888 m
= (typeof(m
)){"LPe1000-SP", max_speed
, "PCIe"};
890 case PCI_DEVICE_ID_NEPTUNE_DCSP
:
891 m
= (typeof(m
)){"LPe1002-SP", max_speed
, "PCIe"};
893 case PCI_DEVICE_ID_BMID
:
894 m
= (typeof(m
)){"LP1150", max_speed
, "PCI-X2"};
896 case PCI_DEVICE_ID_BSMB
:
897 m
= (typeof(m
)){"LP111", max_speed
, "PCI-X2"};
899 case PCI_DEVICE_ID_ZEPHYR
:
900 m
= (typeof(m
)){"LPe11000", max_speed
, "PCIe"};
902 case PCI_DEVICE_ID_ZEPHYR_SCSP
:
903 m
= (typeof(m
)){"LPe11000", max_speed
, "PCIe"};
905 case PCI_DEVICE_ID_ZEPHYR_DCSP
:
906 m
= (typeof(m
)){"LPe11002-SP", max_speed
, "PCIe"};
908 case PCI_DEVICE_ID_ZMID
:
909 m
= (typeof(m
)){"LPe1150", max_speed
, "PCIe"};
911 case PCI_DEVICE_ID_ZSMB
:
912 m
= (typeof(m
)){"LPe111", max_speed
, "PCIe"};
914 case PCI_DEVICE_ID_LP101
:
915 m
= (typeof(m
)){"LP101", max_speed
, "PCI-X"};
917 case PCI_DEVICE_ID_LP10000S
:
918 m
= (typeof(m
)){"LP10000-S", max_speed
, "PCI"};
920 case PCI_DEVICE_ID_LP11000S
:
921 m
= (typeof(m
)){"LP11000-S", max_speed
,
924 case PCI_DEVICE_ID_LPE11000S
:
925 m
= (typeof(m
)){"LPe11000-S", max_speed
,
929 m
= (typeof(m
)){ NULL
};
933 if (mdp
&& mdp
[0] == '\0')
934 snprintf(mdp
, 79,"%s", m
.name
);
935 if (descp
&& descp
[0] == '\0')
937 "Emulex %s %dGb %s Fibre Channel Adapter",
938 m
.name
, m
.max_speed
, m
.bus
);
941 /**************************************************/
942 /* lpfc_post_buffer */
944 /* This routine will post count buffers to the */
945 /* ring with the QUE_RING_BUF_CN command. This */
946 /* allows 3 buffers / command to be posted. */
947 /* Returns the number of buffers NOT posted. */
948 /**************************************************/
950 lpfc_post_buffer(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
, int cnt
,
954 struct lpfc_iocbq
*iocb
;
955 struct lpfc_dmabuf
*mp1
, *mp2
;
957 cnt
+= pring
->missbufcnt
;
959 /* While there are buffers to post */
961 /* Allocate buffer for command iocb */
962 spin_lock_irq(phba
->host
->host_lock
);
963 iocb
= lpfc_sli_get_iocbq(phba
);
964 spin_unlock_irq(phba
->host
->host_lock
);
966 pring
->missbufcnt
= cnt
;
971 /* 2 buffers can be posted per command */
972 /* Allocate buffer to post */
973 mp1
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
975 mp1
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
977 if (mp1
== 0 || mp1
->virt
== 0) {
979 spin_lock_irq(phba
->host
->host_lock
);
980 lpfc_sli_release_iocbq(phba
, iocb
);
981 spin_unlock_irq(phba
->host
->host_lock
);
982 pring
->missbufcnt
= cnt
;
986 INIT_LIST_HEAD(&mp1
->list
);
987 /* Allocate buffer to post */
989 mp2
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
991 mp2
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
993 if (mp2
== 0 || mp2
->virt
== 0) {
995 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
997 spin_lock_irq(phba
->host
->host_lock
);
998 lpfc_sli_release_iocbq(phba
, iocb
);
999 spin_unlock_irq(phba
->host
->host_lock
);
1000 pring
->missbufcnt
= cnt
;
1004 INIT_LIST_HEAD(&mp2
->list
);
1009 icmd
->un
.cont64
[0].addrHigh
= putPaddrHigh(mp1
->phys
);
1010 icmd
->un
.cont64
[0].addrLow
= putPaddrLow(mp1
->phys
);
1011 icmd
->un
.cont64
[0].tus
.f
.bdeSize
= FCELSSIZE
;
1012 icmd
->ulpBdeCount
= 1;
1015 icmd
->un
.cont64
[1].addrHigh
= putPaddrHigh(mp2
->phys
);
1016 icmd
->un
.cont64
[1].addrLow
= putPaddrLow(mp2
->phys
);
1017 icmd
->un
.cont64
[1].tus
.f
.bdeSize
= FCELSSIZE
;
1019 icmd
->ulpBdeCount
= 2;
1022 icmd
->ulpCommand
= CMD_QUE_RING_BUF64_CN
;
1025 spin_lock_irq(phba
->host
->host_lock
);
1026 if (lpfc_sli_issue_iocb(phba
, pring
, iocb
, 0) == IOCB_ERROR
) {
1027 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
1031 lpfc_mbuf_free(phba
, mp2
->virt
, mp2
->phys
);
1035 lpfc_sli_release_iocbq(phba
, iocb
);
1036 pring
->missbufcnt
= cnt
;
1037 spin_unlock_irq(phba
->host
->host_lock
);
1040 spin_unlock_irq(phba
->host
->host_lock
);
1041 lpfc_sli_ringpostbuf_put(phba
, pring
, mp1
);
1043 lpfc_sli_ringpostbuf_put(phba
, pring
, mp2
);
1046 pring
->missbufcnt
= 0;
1050 /************************************************************************/
1052 /* lpfc_post_rcv_buf */
1053 /* This routine post initial rcv buffers to the configured rings */
1055 /************************************************************************/
1057 lpfc_post_rcv_buf(struct lpfc_hba
* phba
)
1059 struct lpfc_sli
*psli
= &phba
->sli
;
1061 /* Ring 0, ELS / CT buffers */
1062 lpfc_post_buffer(phba
, &psli
->ring
[LPFC_ELS_RING
], LPFC_BUF_RING0
, 1);
1063 /* Ring 2 - FCP no buffers needed */
1068 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1070 /************************************************************************/
1074 /************************************************************************/
1076 lpfc_sha_init(uint32_t * HashResultPointer
)
1078 HashResultPointer
[0] = 0x67452301;
1079 HashResultPointer
[1] = 0xEFCDAB89;
1080 HashResultPointer
[2] = 0x98BADCFE;
1081 HashResultPointer
[3] = 0x10325476;
1082 HashResultPointer
[4] = 0xC3D2E1F0;
1085 /************************************************************************/
1087 /* lpfc_sha_iterate */
1089 /************************************************************************/
1091 lpfc_sha_iterate(uint32_t * HashResultPointer
, uint32_t * HashWorkingPointer
)
1095 uint32_t A
, B
, C
, D
, E
;
1098 HashWorkingPointer
[t
] =
1100 HashWorkingPointer
[t
- 3] ^ HashWorkingPointer
[t
-
1102 HashWorkingPointer
[t
- 14] ^ HashWorkingPointer
[t
- 16]);
1103 } while (++t
<= 79);
1105 A
= HashResultPointer
[0];
1106 B
= HashResultPointer
[1];
1107 C
= HashResultPointer
[2];
1108 D
= HashResultPointer
[3];
1109 E
= HashResultPointer
[4];
1113 TEMP
= ((B
& C
) | ((~B
) & D
)) + 0x5A827999;
1114 } else if (t
< 40) {
1115 TEMP
= (B
^ C
^ D
) + 0x6ED9EBA1;
1116 } else if (t
< 60) {
1117 TEMP
= ((B
& C
) | (B
& D
) | (C
& D
)) + 0x8F1BBCDC;
1119 TEMP
= (B
^ C
^ D
) + 0xCA62C1D6;
1121 TEMP
+= S(5, A
) + E
+ HashWorkingPointer
[t
];
1127 } while (++t
<= 79);
1129 HashResultPointer
[0] += A
;
1130 HashResultPointer
[1] += B
;
1131 HashResultPointer
[2] += C
;
1132 HashResultPointer
[3] += D
;
1133 HashResultPointer
[4] += E
;
1137 /************************************************************************/
1139 /* lpfc_challenge_key */
1141 /************************************************************************/
1143 lpfc_challenge_key(uint32_t * RandomChallenge
, uint32_t * HashWorking
)
1145 *HashWorking
= (*RandomChallenge
^ *HashWorking
);
1148 /************************************************************************/
1152 /************************************************************************/
1154 lpfc_hba_init(struct lpfc_hba
*phba
, uint32_t *hbainit
)
1157 uint32_t *HashWorking
;
1158 uint32_t *pwwnn
= phba
->wwnn
;
1160 HashWorking
= kmalloc(80 * sizeof(uint32_t), GFP_KERNEL
);
1164 memset(HashWorking
, 0, (80 * sizeof(uint32_t)));
1165 HashWorking
[0] = HashWorking
[78] = *pwwnn
++;
1166 HashWorking
[1] = HashWorking
[79] = *pwwnn
;
1168 for (t
= 0; t
< 7; t
++)
1169 lpfc_challenge_key(phba
->RandomData
+ t
, HashWorking
+ t
);
1171 lpfc_sha_init(hbainit
);
1172 lpfc_sha_iterate(hbainit
, HashWorking
);
1177 lpfc_cleanup(struct lpfc_hba
* phba
, uint32_t save_bind
)
1179 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1181 /* clean up phba - lpfc specific */
1182 lpfc_can_disctmo(phba
);
1183 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nlpunmap_list
,
1185 lpfc_nlp_remove(phba
, ndlp
);
1188 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nlpmap_list
,
1190 lpfc_nlp_remove(phba
, ndlp
);
1193 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_unused_list
,
1195 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
1198 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_plogi_list
,
1200 lpfc_nlp_remove(phba
, ndlp
);
1203 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
1205 lpfc_nlp_remove(phba
, ndlp
);
1208 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_reglogin_list
,
1210 lpfc_nlp_remove(phba
, ndlp
);
1213 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_prli_list
,
1215 lpfc_nlp_remove(phba
, ndlp
);
1218 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
1220 lpfc_nlp_remove(phba
, ndlp
);
1223 INIT_LIST_HEAD(&phba
->fc_nlpmap_list
);
1224 INIT_LIST_HEAD(&phba
->fc_nlpunmap_list
);
1225 INIT_LIST_HEAD(&phba
->fc_unused_list
);
1226 INIT_LIST_HEAD(&phba
->fc_plogi_list
);
1227 INIT_LIST_HEAD(&phba
->fc_adisc_list
);
1228 INIT_LIST_HEAD(&phba
->fc_reglogin_list
);
1229 INIT_LIST_HEAD(&phba
->fc_prli_list
);
1230 INIT_LIST_HEAD(&phba
->fc_npr_list
);
1232 phba
->fc_map_cnt
= 0;
1233 phba
->fc_unmap_cnt
= 0;
1234 phba
->fc_plogi_cnt
= 0;
1235 phba
->fc_adisc_cnt
= 0;
1236 phba
->fc_reglogin_cnt
= 0;
1237 phba
->fc_prli_cnt
= 0;
1238 phba
->fc_npr_cnt
= 0;
1239 phba
->fc_unused_cnt
= 0;
1244 lpfc_establish_link_tmo(unsigned long ptr
)
1246 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
1247 unsigned long iflag
;
1250 /* Re-establishing Link, timer expired */
1251 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1252 "%d:1300 Re-establishing Link, timer expired "
1254 phba
->brd_no
, phba
->fc_flag
, phba
->hba_state
);
1255 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1256 phba
->fc_flag
&= ~FC_ESTABLISH_LINK
;
1257 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1261 lpfc_stop_timer(struct lpfc_hba
* phba
)
1263 struct lpfc_sli
*psli
= &phba
->sli
;
1265 /* Instead of a timer, this has been converted to a
1266 * deferred procedding list.
1268 while (!list_empty(&phba
->freebufList
)) {
1270 struct lpfc_dmabuf
*mp
= NULL
;
1272 list_remove_head((&phba
->freebufList
), mp
,
1273 struct lpfc_dmabuf
, list
);
1275 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1280 del_timer_sync(&phba
->fcp_poll_timer
);
1281 del_timer_sync(&phba
->fc_estabtmo
);
1282 del_timer_sync(&phba
->fc_disctmo
);
1283 del_timer_sync(&phba
->fc_fdmitmo
);
1284 del_timer_sync(&phba
->els_tmofunc
);
1286 del_timer_sync(&psli
->mbox_tmo
);
1291 lpfc_online(struct lpfc_hba
* phba
)
1296 if (!(phba
->fc_flag
& FC_OFFLINE_MODE
))
1299 lpfc_printf_log(phba
,
1302 "%d:0458 Bring Adapter online\n",
1305 if (!lpfc_sli_queue_setup(phba
))
1308 if (lpfc_sli_hba_setup(phba
)) /* Initialize the HBA */
1311 spin_lock_irq(phba
->host
->host_lock
);
1312 phba
->fc_flag
&= ~FC_OFFLINE_MODE
;
1313 spin_unlock_irq(phba
->host
->host_lock
);
1319 lpfc_offline(struct lpfc_hba
* phba
)
1321 struct lpfc_sli_ring
*pring
;
1322 struct lpfc_sli
*psli
;
1323 unsigned long iflag
;
1330 if (phba
->fc_flag
& FC_OFFLINE_MODE
)
1335 lpfc_linkdown(phba
);
1336 lpfc_sli_flush_mbox_queue(phba
);
1338 for (i
= 0; i
< psli
->num_rings
; i
++) {
1339 pring
= &psli
->ring
[i
];
1340 /* The linkdown event takes 30 seconds to timeout. */
1341 while (pring
->txcmplq_cnt
) {
1344 lpfc_printf_log(phba
,
1345 KERN_WARNING
, LOG_INIT
,
1346 "%d:0466 Outstanding IO when "
1347 "bringing Adapter offline\n",
1355 /* stop all timers associated with this hba */
1356 lpfc_stop_timer(phba
);
1357 phba
->work_hba_events
= 0;
1360 lpfc_printf_log(phba
,
1363 "%d:0460 Bring Adapter offline\n",
1366 /* Bring down the SLI Layer and cleanup. The HBA is offline
1368 lpfc_sli_hba_down(phba
);
1369 lpfc_cleanup(phba
, 1);
1370 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1371 phba
->fc_flag
|= FC_OFFLINE_MODE
;
1372 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1376 /******************************************************************************
1377 * Function name: lpfc_scsi_free
1379 * Description: Called from lpfc_pci_remove_one free internal driver resources
1381 ******************************************************************************/
1383 lpfc_scsi_free(struct lpfc_hba
* phba
)
1385 struct lpfc_scsi_buf
*sb
, *sb_next
;
1386 struct lpfc_iocbq
*io
, *io_next
;
1388 spin_lock_irq(phba
->host
->host_lock
);
1389 /* Release all the lpfc_scsi_bufs maintained by this host. */
1390 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list
, list
) {
1391 list_del(&sb
->list
);
1392 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
, sb
->data
,
1395 phba
->total_scsi_bufs
--;
1398 /* Release all the lpfc_iocbq entries maintained by this host. */
1399 list_for_each_entry_safe(io
, io_next
, &phba
->lpfc_iocb_list
, list
) {
1400 list_del(&io
->list
);
1402 phba
->total_iocbq_bufs
--;
1405 spin_unlock_irq(phba
->host
->host_lock
);
1411 static int __devinit
1412 lpfc_pci_probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
1414 struct Scsi_Host
*host
;
1415 struct lpfc_hba
*phba
;
1416 struct lpfc_sli
*psli
;
1417 struct lpfc_iocbq
*iocbq_entry
= NULL
, *iocbq_next
= NULL
;
1418 unsigned long bar0map_len
, bar2map_len
;
1419 int error
= -ENODEV
, retval
;
1423 if (pci_enable_device(pdev
))
1425 if (pci_request_regions(pdev
, LPFC_DRIVER_NAME
))
1426 goto out_disable_device
;
1428 host
= scsi_host_alloc(&lpfc_template
, sizeof (struct lpfc_hba
));
1430 goto out_release_regions
;
1432 phba
= (struct lpfc_hba
*)host
->hostdata
;
1433 memset(phba
, 0, sizeof (struct lpfc_hba
));
1436 phba
->fc_flag
|= FC_LOADING
;
1437 phba
->pcidev
= pdev
;
1439 /* Assign an unused board number */
1440 if (!idr_pre_get(&lpfc_hba_index
, GFP_KERNEL
))
1443 error
= idr_get_new(&lpfc_hba_index
, NULL
, &phba
->brd_no
);
1447 host
->unique_id
= phba
->brd_no
;
1448 INIT_LIST_HEAD(&phba
->ctrspbuflist
);
1449 INIT_LIST_HEAD(&phba
->rnidrspbuflist
);
1450 INIT_LIST_HEAD(&phba
->freebufList
);
1452 /* Initialize timers used by driver */
1453 init_timer(&phba
->fc_estabtmo
);
1454 phba
->fc_estabtmo
.function
= lpfc_establish_link_tmo
;
1455 phba
->fc_estabtmo
.data
= (unsigned long)phba
;
1456 init_timer(&phba
->fc_disctmo
);
1457 phba
->fc_disctmo
.function
= lpfc_disc_timeout
;
1458 phba
->fc_disctmo
.data
= (unsigned long)phba
;
1460 init_timer(&phba
->fc_fdmitmo
);
1461 phba
->fc_fdmitmo
.function
= lpfc_fdmi_tmo
;
1462 phba
->fc_fdmitmo
.data
= (unsigned long)phba
;
1463 init_timer(&phba
->els_tmofunc
);
1464 phba
->els_tmofunc
.function
= lpfc_els_timeout
;
1465 phba
->els_tmofunc
.data
= (unsigned long)phba
;
1467 init_timer(&psli
->mbox_tmo
);
1468 psli
->mbox_tmo
.function
= lpfc_mbox_timeout
;
1469 psli
->mbox_tmo
.data
= (unsigned long)phba
;
1471 init_timer(&phba
->fcp_poll_timer
);
1472 phba
->fcp_poll_timer
.function
= lpfc_poll_timeout
;
1473 phba
->fcp_poll_timer
.data
= (unsigned long)phba
;
1476 * Get all the module params for configuring this host and then
1477 * establish the host parameters.
1479 lpfc_get_cfgparam(phba
);
1481 host
->max_id
= LPFC_MAX_TARGET
;
1482 host
->max_lun
= phba
->cfg_max_luns
;
1485 /* Initialize all internally managed lists. */
1486 INIT_LIST_HEAD(&phba
->fc_nlpmap_list
);
1487 INIT_LIST_HEAD(&phba
->fc_nlpunmap_list
);
1488 INIT_LIST_HEAD(&phba
->fc_unused_list
);
1489 INIT_LIST_HEAD(&phba
->fc_plogi_list
);
1490 INIT_LIST_HEAD(&phba
->fc_adisc_list
);
1491 INIT_LIST_HEAD(&phba
->fc_reglogin_list
);
1492 INIT_LIST_HEAD(&phba
->fc_prli_list
);
1493 INIT_LIST_HEAD(&phba
->fc_npr_list
);
1496 pci_set_master(pdev
);
1497 retval
= pci_set_mwi(pdev
);
1499 dev_printk(KERN_WARNING
, &pdev
->dev
,
1500 "Warning: pci_set_mwi returned %d\n", retval
);
1502 if (pci_set_dma_mask(phba
->pcidev
, DMA_64BIT_MASK
) != 0)
1503 if (pci_set_dma_mask(phba
->pcidev
, DMA_32BIT_MASK
) != 0)
1504 goto out_idr_remove
;
1507 * Get the bus address of Bar0 and Bar2 and the number of bytes
1508 * required by each mapping.
1510 phba
->pci_bar0_map
= pci_resource_start(phba
->pcidev
, 0);
1511 bar0map_len
= pci_resource_len(phba
->pcidev
, 0);
1513 phba
->pci_bar2_map
= pci_resource_start(phba
->pcidev
, 2);
1514 bar2map_len
= pci_resource_len(phba
->pcidev
, 2);
1516 /* Map HBA SLIM to a kernel virtual address. */
1517 phba
->slim_memmap_p
= ioremap(phba
->pci_bar0_map
, bar0map_len
);
1518 if (!phba
->slim_memmap_p
) {
1520 dev_printk(KERN_ERR
, &pdev
->dev
,
1521 "ioremap failed for SLIM memory.\n");
1522 goto out_idr_remove
;
1525 /* Map HBA Control Registers to a kernel virtual address. */
1526 phba
->ctrl_regs_memmap_p
= ioremap(phba
->pci_bar2_map
, bar2map_len
);
1527 if (!phba
->ctrl_regs_memmap_p
) {
1529 dev_printk(KERN_ERR
, &pdev
->dev
,
1530 "ioremap failed for HBA control registers.\n");
1531 goto out_iounmap_slim
;
1534 /* Allocate memory for SLI-2 structures */
1535 phba
->slim2p
= dma_alloc_coherent(&phba
->pcidev
->dev
, SLI2_SLIM_SIZE
,
1536 &phba
->slim2p_mapping
, GFP_KERNEL
);
1540 memset(phba
->slim2p
, 0, SLI2_SLIM_SIZE
);
1542 /* Initialize the SLI Layer to run with lpfc HBAs. */
1543 lpfc_sli_setup(phba
);
1544 lpfc_sli_queue_setup(phba
);
1546 error
= lpfc_mem_alloc(phba
);
1550 /* Initialize and populate the iocb list per host. */
1551 INIT_LIST_HEAD(&phba
->lpfc_iocb_list
);
1552 for (i
= 0; i
< LPFC_IOCB_LIST_CNT
; i
++) {
1553 iocbq_entry
= kmalloc(sizeof(struct lpfc_iocbq
), GFP_KERNEL
);
1554 if (iocbq_entry
== NULL
) {
1555 printk(KERN_ERR
"%s: only allocated %d iocbs of "
1556 "expected %d count. Unloading driver.\n",
1557 __FUNCTION__
, i
, LPFC_IOCB_LIST_CNT
);
1559 goto out_free_iocbq
;
1562 memset(iocbq_entry
, 0, sizeof(struct lpfc_iocbq
));
1563 iotag
= lpfc_sli_next_iotag(phba
, iocbq_entry
);
1565 kfree (iocbq_entry
);
1566 printk(KERN_ERR
"%s: failed to allocate IOTAG. "
1567 "Unloading driver.\n",
1570 goto out_free_iocbq
;
1572 spin_lock_irq(phba
->host
->host_lock
);
1573 list_add(&iocbq_entry
->list
, &phba
->lpfc_iocb_list
);
1574 phba
->total_iocbq_bufs
++;
1575 spin_unlock_irq(phba
->host
->host_lock
);
1578 /* Initialize HBA structure */
1579 phba
->fc_edtov
= FF_DEF_EDTOV
;
1580 phba
->fc_ratov
= FF_DEF_RATOV
;
1581 phba
->fc_altov
= FF_DEF_ALTOV
;
1582 phba
->fc_arbtov
= FF_DEF_ARBTOV
;
1584 INIT_LIST_HEAD(&phba
->work_list
);
1585 phba
->work_ha_mask
= (HA_ERATT
|HA_MBATT
|HA_LATT
);
1586 phba
->work_ha_mask
|= (HA_RXMASK
<< (LPFC_ELS_RING
* 4));
1588 /* Startup the kernel thread for this host adapter. */
1589 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
1590 "lpfc_worker_%d", phba
->brd_no
);
1591 if (IS_ERR(phba
->worker_thread
)) {
1592 error
= PTR_ERR(phba
->worker_thread
);
1593 goto out_free_iocbq
;
1597 * Set initial can_queue value since 0 is no longer supported and
1598 * scsi_add_host will fail. This will be adjusted later based on the
1599 * max xri value determined in hba setup.
1601 host
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
1603 /* Tell the midlayer we support 16 byte commands */
1604 host
->max_cmd_len
= 16;
1606 /* Initialize the list of scsi buffers used by driver for scsi IO. */
1607 spin_lock_init(&phba
->scsi_buf_list_lock
);
1608 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list
);
1610 host
->transportt
= lpfc_transport_template
;
1611 pci_set_drvdata(pdev
, host
);
1612 error
= scsi_add_host(host
, &pdev
->dev
);
1614 goto out_kthread_stop
;
1616 error
= lpfc_alloc_sysfs_attr(phba
);
1618 goto out_remove_host
;
1620 if (phba
->cfg_use_msi
) {
1621 error
= pci_enable_msi(phba
->pcidev
);
1623 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
, "%d:0452 "
1624 "Enable MSI failed, continuing with "
1625 "IRQ\n", phba
->brd_no
);
1628 error
= request_irq(phba
->pcidev
->irq
, lpfc_intr_handler
, IRQF_SHARED
,
1629 LPFC_DRIVER_NAME
, phba
);
1631 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1632 "%d:0451 Enable interrupt handler failed\n",
1634 goto out_free_sysfs_attr
;
1636 phba
->MBslimaddr
= phba
->slim_memmap_p
;
1637 phba
->HAregaddr
= phba
->ctrl_regs_memmap_p
+ HA_REG_OFFSET
;
1638 phba
->CAregaddr
= phba
->ctrl_regs_memmap_p
+ CA_REG_OFFSET
;
1639 phba
->HSregaddr
= phba
->ctrl_regs_memmap_p
+ HS_REG_OFFSET
;
1640 phba
->HCregaddr
= phba
->ctrl_regs_memmap_p
+ HC_REG_OFFSET
;
1642 error
= lpfc_sli_hba_setup(phba
);
1649 * hba setup may have changed the hba_queue_depth so we need to adjust
1650 * the value of can_queue.
1652 host
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
1654 lpfc_discovery_wait(phba
);
1656 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
1657 spin_lock_irq(phba
->host
->host_lock
);
1658 lpfc_poll_start_timer(phba
);
1659 spin_unlock_irq(phba
->host
->host_lock
);
1663 * set fixed host attributes
1664 * Must done after lpfc_sli_hba_setup()
1667 fc_host_node_name(host
) = wwn_to_u64(phba
->fc_nodename
.u
.wwn
);
1668 fc_host_port_name(host
) = wwn_to_u64(phba
->fc_portname
.u
.wwn
);
1669 fc_host_supported_classes(host
) = FC_COS_CLASS3
;
1671 memset(fc_host_supported_fc4s(host
), 0,
1672 sizeof(fc_host_supported_fc4s(host
)));
1673 fc_host_supported_fc4s(host
)[2] = 1;
1674 fc_host_supported_fc4s(host
)[7] = 1;
1676 lpfc_get_hba_sym_node_name(phba
, fc_host_symbolic_name(host
));
1678 fc_host_supported_speeds(host
) = 0;
1679 if (phba
->lmt
& LMT_10Gb
)
1680 fc_host_supported_speeds(host
) |= FC_PORTSPEED_10GBIT
;
1681 if (phba
->lmt
& LMT_4Gb
)
1682 fc_host_supported_speeds(host
) |= FC_PORTSPEED_4GBIT
;
1683 if (phba
->lmt
& LMT_2Gb
)
1684 fc_host_supported_speeds(host
) |= FC_PORTSPEED_2GBIT
;
1685 if (phba
->lmt
& LMT_1Gb
)
1686 fc_host_supported_speeds(host
) |= FC_PORTSPEED_1GBIT
;
1688 fc_host_maxframe_size(host
) =
1689 ((((uint32_t) phba
->fc_sparam
.cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
1690 (uint32_t) phba
->fc_sparam
.cmn
.bbRcvSizeLsb
);
1692 /* This value is also unchanging */
1693 memset(fc_host_active_fc4s(host
), 0,
1694 sizeof(fc_host_active_fc4s(host
)));
1695 fc_host_active_fc4s(host
)[2] = 1;
1696 fc_host_active_fc4s(host
)[7] = 1;
1698 spin_lock_irq(phba
->host
->host_lock
);
1699 phba
->fc_flag
&= ~FC_LOADING
;
1700 spin_unlock_irq(phba
->host
->host_lock
);
1704 lpfc_stop_timer(phba
);
1705 phba
->work_hba_events
= 0;
1706 free_irq(phba
->pcidev
->irq
, phba
);
1707 pci_disable_msi(phba
->pcidev
);
1708 out_free_sysfs_attr
:
1709 lpfc_free_sysfs_attr(phba
);
1711 fc_remove_host(phba
->host
);
1712 scsi_remove_host(phba
->host
);
1714 kthread_stop(phba
->worker_thread
);
1716 list_for_each_entry_safe(iocbq_entry
, iocbq_next
,
1717 &phba
->lpfc_iocb_list
, list
) {
1718 spin_lock_irq(phba
->host
->host_lock
);
1720 phba
->total_iocbq_bufs
--;
1721 spin_unlock_irq(phba
->host
->host_lock
);
1723 lpfc_mem_free(phba
);
1725 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
, phba
->slim2p
,
1726 phba
->slim2p_mapping
);
1728 iounmap(phba
->ctrl_regs_memmap_p
);
1730 iounmap(phba
->slim_memmap_p
);
1732 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
1735 scsi_host_put(host
);
1736 out_release_regions
:
1737 pci_release_regions(pdev
);
1739 pci_disable_device(pdev
);
1741 pci_set_drvdata(pdev
, NULL
);
1745 static void __devexit
1746 lpfc_pci_remove_one(struct pci_dev
*pdev
)
1748 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1749 struct lpfc_hba
*phba
= (struct lpfc_hba
*)host
->hostdata
;
1750 unsigned long iflag
;
1752 lpfc_free_sysfs_attr(phba
);
1754 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1755 phba
->fc_flag
|= FC_UNLOADING
;
1757 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1759 fc_remove_host(phba
->host
);
1760 scsi_remove_host(phba
->host
);
1762 kthread_stop(phba
->worker_thread
);
1765 * Bring down the SLI Layer. This step disable all interrupts,
1766 * clears the rings, discards all mailbox commands, and resets
1769 lpfc_sli_hba_down(phba
);
1770 lpfc_sli_brdrestart(phba
);
1772 /* Release the irq reservation */
1773 free_irq(phba
->pcidev
->irq
, phba
);
1774 pci_disable_msi(phba
->pcidev
);
1776 lpfc_cleanup(phba
, 0);
1777 lpfc_stop_timer(phba
);
1778 phba
->work_hba_events
= 0;
1781 * Call scsi_free before mem_free since scsi bufs are released to their
1782 * corresponding pools here.
1784 lpfc_scsi_free(phba
);
1785 lpfc_mem_free(phba
);
1787 /* Free resources associated with SLI2 interface */
1788 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
1789 phba
->slim2p
, phba
->slim2p_mapping
);
1791 /* unmap adapter SLIM and Control Registers */
1792 iounmap(phba
->ctrl_regs_memmap_p
);
1793 iounmap(phba
->slim_memmap_p
);
1795 pci_release_regions(phba
->pcidev
);
1796 pci_disable_device(phba
->pcidev
);
1798 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
1799 scsi_host_put(phba
->host
);
1801 pci_set_drvdata(pdev
, NULL
);
1805 * lpfc_io_error_detected - called when PCI error is detected
1806 * @pdev: Pointer to PCI device
1807 * @state: The current pci conneection state
1809 * This function is called after a PCI bus error affecting
1810 * this device has been detected.
1812 static pci_ers_result_t
lpfc_io_error_detected(struct pci_dev
*pdev
,
1813 pci_channel_state_t state
)
1815 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1816 struct lpfc_hba
*phba
= (struct lpfc_hba
*)host
->hostdata
;
1817 struct lpfc_sli
*psli
= &phba
->sli
;
1818 struct lpfc_sli_ring
*pring
;
1820 if (state
== pci_channel_io_perm_failure
) {
1821 lpfc_pci_remove_one(pdev
);
1822 return PCI_ERS_RESULT_DISCONNECT
;
1824 pci_disable_device(pdev
);
1826 * There may be I/Os dropped by the firmware.
1827 * Error iocb (I/O) on txcmplq and let the SCSI layer
1828 * retry it after re-establishing link.
1830 pring
= &psli
->ring
[psli
->fcp_ring
];
1831 lpfc_sli_abort_iocb_ring(phba
, pring
);
1833 /* Request a slot reset. */
1834 return PCI_ERS_RESULT_NEED_RESET
;
1838 * lpfc_io_slot_reset - called after the pci bus has been reset.
1839 * @pdev: Pointer to PCI device
1841 * Restart the card from scratch, as if from a cold-boot.
1843 static pci_ers_result_t
lpfc_io_slot_reset(struct pci_dev
*pdev
)
1845 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1846 struct lpfc_hba
*phba
= (struct lpfc_hba
*)host
->hostdata
;
1847 struct lpfc_sli
*psli
= &phba
->sli
;
1848 int bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
1850 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
1851 if (pci_enable_device_bars(pdev
, bars
)) {
1852 printk(KERN_ERR
"lpfc: Cannot re-enable "
1853 "PCI device after reset.\n");
1854 return PCI_ERS_RESULT_DISCONNECT
;
1857 pci_set_master(pdev
);
1859 /* Re-establishing Link */
1860 spin_lock_irq(phba
->host
->host_lock
);
1861 phba
->fc_flag
|= FC_ESTABLISH_LINK
;
1862 psli
->sli_flag
&= ~LPFC_SLI2_ACTIVE
;
1863 spin_unlock_irq(phba
->host
->host_lock
);
1866 /* Take device offline; this will perform cleanup */
1868 lpfc_sli_brdrestart(phba
);
1870 return PCI_ERS_RESULT_RECOVERED
;
1874 * lpfc_io_resume - called when traffic can start flowing again.
1875 * @pdev: Pointer to PCI device
1877 * This callback is called when the error recovery driver tells us that
1878 * its OK to resume normal operation.
1880 static void lpfc_io_resume(struct pci_dev
*pdev
)
1882 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1883 struct lpfc_hba
*phba
= (struct lpfc_hba
*)host
->hostdata
;
1885 if (lpfc_online(phba
) == 0) {
1886 mod_timer(&phba
->fc_estabtmo
, jiffies
+ HZ
* 60);
1890 static struct pci_device_id lpfc_id_table
[] = {
1891 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_VIPER
,
1892 PCI_ANY_ID
, PCI_ANY_ID
, },
1893 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_FIREFLY
,
1894 PCI_ANY_ID
, PCI_ANY_ID
, },
1895 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_THOR
,
1896 PCI_ANY_ID
, PCI_ANY_ID
, },
1897 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PEGASUS
,
1898 PCI_ANY_ID
, PCI_ANY_ID
, },
1899 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_CENTAUR
,
1900 PCI_ANY_ID
, PCI_ANY_ID
, },
1901 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_DRAGONFLY
,
1902 PCI_ANY_ID
, PCI_ANY_ID
, },
1903 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SUPERFLY
,
1904 PCI_ANY_ID
, PCI_ANY_ID
, },
1905 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_RFLY
,
1906 PCI_ANY_ID
, PCI_ANY_ID
, },
1907 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PFLY
,
1908 PCI_ANY_ID
, PCI_ANY_ID
, },
1909 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE
,
1910 PCI_ANY_ID
, PCI_ANY_ID
, },
1911 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE_SCSP
,
1912 PCI_ANY_ID
, PCI_ANY_ID
, },
1913 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE_DCSP
,
1914 PCI_ANY_ID
, PCI_ANY_ID
, },
1915 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS
,
1916 PCI_ANY_ID
, PCI_ANY_ID
, },
1917 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS_SCSP
,
1918 PCI_ANY_ID
, PCI_ANY_ID
, },
1919 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS_DCSP
,
1920 PCI_ANY_ID
, PCI_ANY_ID
, },
1921 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_BMID
,
1922 PCI_ANY_ID
, PCI_ANY_ID
, },
1923 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_BSMB
,
1924 PCI_ANY_ID
, PCI_ANY_ID
, },
1925 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR
,
1926 PCI_ANY_ID
, PCI_ANY_ID
, },
1927 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR_SCSP
,
1928 PCI_ANY_ID
, PCI_ANY_ID
, },
1929 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR_DCSP
,
1930 PCI_ANY_ID
, PCI_ANY_ID
, },
1931 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZMID
,
1932 PCI_ANY_ID
, PCI_ANY_ID
, },
1933 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZSMB
,
1934 PCI_ANY_ID
, PCI_ANY_ID
, },
1935 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_TFLY
,
1936 PCI_ANY_ID
, PCI_ANY_ID
, },
1937 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP101
,
1938 PCI_ANY_ID
, PCI_ANY_ID
, },
1939 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP10000S
,
1940 PCI_ANY_ID
, PCI_ANY_ID
, },
1941 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP11000S
,
1942 PCI_ANY_ID
, PCI_ANY_ID
, },
1943 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LPE11000S
,
1944 PCI_ANY_ID
, PCI_ANY_ID
, },
1948 MODULE_DEVICE_TABLE(pci
, lpfc_id_table
);
1950 static struct pci_error_handlers lpfc_err_handler
= {
1951 .error_detected
= lpfc_io_error_detected
,
1952 .slot_reset
= lpfc_io_slot_reset
,
1953 .resume
= lpfc_io_resume
,
1956 static struct pci_driver lpfc_driver
= {
1957 .name
= LPFC_DRIVER_NAME
,
1958 .id_table
= lpfc_id_table
,
1959 .probe
= lpfc_pci_probe_one
,
1960 .remove
= __devexit_p(lpfc_pci_remove_one
),
1961 .err_handler
= &lpfc_err_handler
,
1969 printk(LPFC_MODULE_DESC
"\n");
1970 printk(LPFC_COPYRIGHT
"\n");
1972 lpfc_transport_template
=
1973 fc_attach_transport(&lpfc_transport_functions
);
1974 if (!lpfc_transport_template
)
1976 error
= pci_register_driver(&lpfc_driver
);
1978 fc_release_transport(lpfc_transport_template
);
1986 pci_unregister_driver(&lpfc_driver
);
1987 fc_release_transport(lpfc_transport_template
);
1990 module_init(lpfc_init
);
1991 module_exit(lpfc_exit
);
1992 MODULE_LICENSE("GPL");
1993 MODULE_DESCRIPTION(LPFC_MODULE_DESC
);
1994 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
1995 MODULE_VERSION("0:" LPFC_DRIVER_VERSION
);