1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
56 #include "lpfc_sli4.h"
58 #include "lpfc_disc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
68 static enum cpuhp_state lpfc_cpuhp_state
;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu
;
72 static void __lpfc_cpuhp_remove(struct lpfc_hba
*phba
);
73 static void lpfc_cpuhp_remove(struct lpfc_hba
*phba
);
74 static void lpfc_cpuhp_add(struct lpfc_hba
*phba
);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba
*, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba
*);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba
*);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba
*);
79 static int lpfc_setup_endian_order(struct lpfc_hba
*);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba
*);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba
*);
83 static void lpfc_init_sgl_list(struct lpfc_hba
*);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba
*);
85 static void lpfc_free_active_sgl(struct lpfc_hba
*);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba
*phba
);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba
*phba
);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba
*);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba
*);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba
*, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba
*phba
);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba
*, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba
*, struct Scsi_Host
*);
97 static struct scsi_transport_template
*lpfc_transport_template
= NULL
;
98 static struct scsi_transport_template
*lpfc_vport_transport_template
= NULL
;
99 static DEFINE_IDR(lpfc_hba_index
);
100 #define LPFC_NVMET_BUF_POST 254
103 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
104 * @phba: pointer to lpfc hba data structure.
106 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
107 * mailbox command. It retrieves the revision information from the HBA and
108 * collects the Vital Product Data (VPD) about the HBA for preparing the
109 * configuration of the HBA.
113 * -ERESTART - requests the SLI layer to reset the HBA and try again.
114 * Any other value - indicates an error.
117 lpfc_config_port_prep(struct lpfc_hba
*phba
)
119 lpfc_vpd_t
*vp
= &phba
->vpd
;
123 char *lpfc_vpd_data
= NULL
;
125 static char licensed
[56] =
126 "key unlock for use with gnu public licensed code only\0";
127 static int init_key
= 1;
129 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
131 phba
->link_state
= LPFC_HBA_ERROR
;
136 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
138 if (lpfc_is_LC_HBA(phba
->pcidev
->device
)) {
140 uint32_t *ptext
= (uint32_t *) licensed
;
142 for (i
= 0; i
< 56; i
+= sizeof (uint32_t), ptext
++)
143 *ptext
= cpu_to_be32(*ptext
);
147 lpfc_read_nv(phba
, pmb
);
148 memset((char*)mb
->un
.varRDnvp
.rsvd3
, 0,
149 sizeof (mb
->un
.varRDnvp
.rsvd3
));
150 memcpy((char*)mb
->un
.varRDnvp
.rsvd3
, licensed
,
153 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
155 if (rc
!= MBX_SUCCESS
) {
156 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
157 "0324 Config Port initialization "
158 "error, mbxCmd x%x READ_NVPARM, "
160 mb
->mbxCommand
, mb
->mbxStatus
);
161 mempool_free(pmb
, phba
->mbox_mem_pool
);
164 memcpy(phba
->wwnn
, (char *)mb
->un
.varRDnvp
.nodename
,
166 memcpy(phba
->wwpn
, (char *)mb
->un
.varRDnvp
.portname
,
171 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
172 * which was already set in lpfc_get_cfgparam()
174 phba
->sli3_options
&= (uint32_t)LPFC_SLI3_BG_ENABLED
;
176 /* Setup and issue mailbox READ REV command */
177 lpfc_read_rev(phba
, pmb
);
178 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
179 if (rc
!= MBX_SUCCESS
) {
180 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
181 "0439 Adapter failed to init, mbxCmd x%x "
182 "READ_REV, mbxStatus x%x\n",
183 mb
->mbxCommand
, mb
->mbxStatus
);
184 mempool_free( pmb
, phba
->mbox_mem_pool
);
190 * The value of rr must be 1 since the driver set the cv field to 1.
191 * This setting requires the FW to set all revision fields.
193 if (mb
->un
.varRdRev
.rr
== 0) {
195 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
196 "0440 Adapter failed to init, READ_REV has "
197 "missing revision information.\n");
198 mempool_free(pmb
, phba
->mbox_mem_pool
);
202 if (phba
->sli_rev
== 3 && !mb
->un
.varRdRev
.v3rsp
) {
203 mempool_free(pmb
, phba
->mbox_mem_pool
);
207 /* Save information as VPD data */
209 memcpy(&vp
->sli3Feat
, &mb
->un
.varRdRev
.sli3Feat
, sizeof(uint32_t));
210 vp
->rev
.sli1FwRev
= mb
->un
.varRdRev
.sli1FwRev
;
211 memcpy(vp
->rev
.sli1FwName
, (char*) mb
->un
.varRdRev
.sli1FwName
, 16);
212 vp
->rev
.sli2FwRev
= mb
->un
.varRdRev
.sli2FwRev
;
213 memcpy(vp
->rev
.sli2FwName
, (char *) mb
->un
.varRdRev
.sli2FwName
, 16);
214 vp
->rev
.biuRev
= mb
->un
.varRdRev
.biuRev
;
215 vp
->rev
.smRev
= mb
->un
.varRdRev
.smRev
;
216 vp
->rev
.smFwRev
= mb
->un
.varRdRev
.un
.smFwRev
;
217 vp
->rev
.endecRev
= mb
->un
.varRdRev
.endecRev
;
218 vp
->rev
.fcphHigh
= mb
->un
.varRdRev
.fcphHigh
;
219 vp
->rev
.fcphLow
= mb
->un
.varRdRev
.fcphLow
;
220 vp
->rev
.feaLevelHigh
= mb
->un
.varRdRev
.feaLevelHigh
;
221 vp
->rev
.feaLevelLow
= mb
->un
.varRdRev
.feaLevelLow
;
222 vp
->rev
.postKernRev
= mb
->un
.varRdRev
.postKernRev
;
223 vp
->rev
.opFwRev
= mb
->un
.varRdRev
.opFwRev
;
225 /* If the sli feature level is less then 9, we must
226 * tear down all RPIs and VPIs on link down if NPIV
229 if (vp
->rev
.feaLevelHigh
< 9)
230 phba
->sli3_options
|= LPFC_SLI3_VPORT_TEARDOWN
;
232 if (lpfc_is_LC_HBA(phba
->pcidev
->device
))
233 memcpy(phba
->RandomData
, (char *)&mb
->un
.varWords
[24],
234 sizeof (phba
->RandomData
));
236 /* Get adapter VPD information */
237 lpfc_vpd_data
= kmalloc(DMP_VPD_SIZE
, GFP_KERNEL
);
241 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_VPD
);
242 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
244 if (rc
!= MBX_SUCCESS
) {
245 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
246 "0441 VPD not present on adapter, "
247 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
248 mb
->mbxCommand
, mb
->mbxStatus
);
249 mb
->un
.varDmp
.word_cnt
= 0;
251 /* dump mem may return a zero when finished or we got a
252 * mailbox error, either way we are done.
254 if (mb
->un
.varDmp
.word_cnt
== 0)
257 i
= mb
->un
.varDmp
.word_cnt
* sizeof(uint32_t);
258 if (offset
+ i
> DMP_VPD_SIZE
)
259 i
= DMP_VPD_SIZE
- offset
;
260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
261 lpfc_vpd_data
+ offset
, i
);
263 } while (offset
< DMP_VPD_SIZE
);
265 lpfc_parse_vpd(phba
, lpfc_vpd_data
, offset
);
267 kfree(lpfc_vpd_data
);
269 mempool_free(pmb
, phba
->mbox_mem_pool
);
274 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
275 * @phba: pointer to lpfc hba data structure.
276 * @pmboxq: pointer to the driver internal queue element for mailbox command.
278 * This is the completion handler for driver's configuring asynchronous event
279 * mailbox command to the device. If the mailbox command returns successfully,
280 * it will set internal async event support flag to 1; otherwise, it will
281 * set internal async event support flag to 0.
284 lpfc_config_async_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
286 if (pmboxq
->u
.mb
.mbxStatus
== MBX_SUCCESS
)
287 phba
->temp_sensor_support
= 1;
289 phba
->temp_sensor_support
= 0;
290 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
295 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
296 * @phba: pointer to lpfc hba data structure.
297 * @pmboxq: pointer to the driver internal queue element for mailbox command.
299 * This is the completion handler for dump mailbox command for getting
300 * wake up parameters. When this command complete, the response contain
301 * Option rom version of the HBA. This function translate the version number
302 * into a human readable string and store it in OptionROMVersion.
305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
308 uint32_t prog_id_word
;
310 /* character array used for decoding dist type. */
311 char dist_char
[] = "nabx";
313 if (pmboxq
->u
.mb
.mbxStatus
!= MBX_SUCCESS
) {
314 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
318 prg
= (struct prog_id
*) &prog_id_word
;
320 /* word 7 contain option rom version */
321 prog_id_word
= pmboxq
->u
.mb
.un
.varWords
[7];
323 /* Decode the Option rom version word to a readable string */
325 dist
= dist_char
[prg
->dist
];
327 if ((prg
->dist
== 3) && (prg
->num
== 0))
328 snprintf(phba
->OptionROMVersion
, 32, "%d.%d%d",
329 prg
->ver
, prg
->rev
, prg
->lev
);
331 snprintf(phba
->OptionROMVersion
, 32, "%d.%d%d%c%d",
332 prg
->ver
, prg
->rev
, prg
->lev
,
334 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
339 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
340 * cfg_soft_wwnn, cfg_soft_wwpn
341 * @vport: pointer to lpfc vport data structure.
348 lpfc_update_vport_wwn(struct lpfc_vport
*vport
)
350 uint8_t vvvl
= vport
->fc_sparam
.cmn
.valid_vendor_ver_level
;
351 u32
*fawwpn_key
= (u32
*)&vport
->fc_sparam
.un
.vendorVersion
[0];
353 /* If the soft name exists then update it using the service params */
354 if (vport
->phba
->cfg_soft_wwnn
)
355 u64_to_wwn(vport
->phba
->cfg_soft_wwnn
,
356 vport
->fc_sparam
.nodeName
.u
.wwn
);
357 if (vport
->phba
->cfg_soft_wwpn
)
358 u64_to_wwn(vport
->phba
->cfg_soft_wwpn
,
359 vport
->fc_sparam
.portName
.u
.wwn
);
362 * If the name is empty or there exists a soft name
363 * then copy the service params name, otherwise use the fc name
365 if (vport
->fc_nodename
.u
.wwn
[0] == 0 || vport
->phba
->cfg_soft_wwnn
)
366 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
367 sizeof(struct lpfc_name
));
369 memcpy(&vport
->fc_sparam
.nodeName
, &vport
->fc_nodename
,
370 sizeof(struct lpfc_name
));
373 * If the port name has changed, then set the Param changes flag
376 if (vport
->fc_portname
.u
.wwn
[0] != 0 &&
377 memcmp(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
378 sizeof(struct lpfc_name
)))
379 vport
->vport_flag
|= FAWWPN_PARAM_CHG
;
381 if (vport
->fc_portname
.u
.wwn
[0] == 0 ||
382 vport
->phba
->cfg_soft_wwpn
||
383 (vvvl
== 1 && cpu_to_be32(*fawwpn_key
) == FAPWWN_KEY_VENDOR
) ||
384 vport
->vport_flag
& FAWWPN_SET
) {
385 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
386 sizeof(struct lpfc_name
));
387 vport
->vport_flag
&= ~FAWWPN_SET
;
388 if (vvvl
== 1 && cpu_to_be32(*fawwpn_key
) == FAPWWN_KEY_VENDOR
)
389 vport
->vport_flag
|= FAWWPN_SET
;
392 memcpy(&vport
->fc_sparam
.portName
, &vport
->fc_portname
,
393 sizeof(struct lpfc_name
));
397 * lpfc_config_port_post - Perform lpfc initialization after config port
398 * @phba: pointer to lpfc hba data structure.
400 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
401 * command call. It performs all internal resource and state setups on the
402 * port: post IOCB buffers, enable appropriate host interrupt attentions,
403 * ELS ring timers, etc.
407 * Any other value - error.
410 lpfc_config_port_post(struct lpfc_hba
*phba
)
412 struct lpfc_vport
*vport
= phba
->pport
;
413 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
416 struct lpfc_dmabuf
*mp
;
417 struct lpfc_sli
*psli
= &phba
->sli
;
418 uint32_t status
, timeout
;
422 spin_lock_irq(&phba
->hbalock
);
424 * If the Config port completed correctly the HBA is not
425 * over heated any more.
427 if (phba
->over_temp_state
== HBA_OVER_TEMP
)
428 phba
->over_temp_state
= HBA_NORMAL_TEMP
;
429 spin_unlock_irq(&phba
->hbalock
);
431 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
433 phba
->link_state
= LPFC_HBA_ERROR
;
438 /* Get login parameters for NID. */
439 rc
= lpfc_read_sparam(phba
, pmb
, 0);
441 mempool_free(pmb
, phba
->mbox_mem_pool
);
446 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
447 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
448 "0448 Adapter failed init, mbxCmd x%x "
449 "READ_SPARM mbxStatus x%x\n",
450 mb
->mbxCommand
, mb
->mbxStatus
);
451 phba
->link_state
= LPFC_HBA_ERROR
;
452 mp
= (struct lpfc_dmabuf
*)pmb
->ctx_buf
;
453 mempool_free(pmb
, phba
->mbox_mem_pool
);
454 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
459 mp
= (struct lpfc_dmabuf
*)pmb
->ctx_buf
;
461 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof (struct serv_parm
));
462 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
465 lpfc_update_vport_wwn(vport
);
467 /* Update the fc_host data structures with new wwn. */
468 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
469 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
470 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
472 /* If no serial number in VPD data, use low 6 bytes of WWNN */
473 /* This should be consolidated into parse_vpd ? - mr */
474 if (phba
->SerialNumber
[0] == 0) {
477 outptr
= &vport
->fc_nodename
.u
.s
.IEEE
[0];
478 for (i
= 0; i
< 12; i
++) {
480 j
= ((status
& 0xf0) >> 4);
482 phba
->SerialNumber
[i
] =
483 (char)((uint8_t) 0x30 + (uint8_t) j
);
485 phba
->SerialNumber
[i
] =
486 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
490 phba
->SerialNumber
[i
] =
491 (char)((uint8_t) 0x30 + (uint8_t) j
);
493 phba
->SerialNumber
[i
] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
498 lpfc_read_config(phba
, pmb
);
500 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
501 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
502 "0453 Adapter failed to init, mbxCmd x%x "
503 "READ_CONFIG, mbxStatus x%x\n",
504 mb
->mbxCommand
, mb
->mbxStatus
);
505 phba
->link_state
= LPFC_HBA_ERROR
;
506 mempool_free( pmb
, phba
->mbox_mem_pool
);
510 /* Check if the port is disabled */
511 lpfc_sli_read_link_ste(phba
);
513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
514 if (phba
->cfg_hba_queue_depth
> mb
->un
.varRdConfig
.max_xri
) {
515 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
516 "3359 HBA queue depth changed from %d to %d\n",
517 phba
->cfg_hba_queue_depth
,
518 mb
->un
.varRdConfig
.max_xri
);
519 phba
->cfg_hba_queue_depth
= mb
->un
.varRdConfig
.max_xri
;
522 phba
->lmt
= mb
->un
.varRdConfig
.lmt
;
524 /* Get the default values for Model Name and Description */
525 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
527 phba
->link_state
= LPFC_LINK_DOWN
;
529 /* Only process IOCBs on ELS ring till hba_state is READY */
530 if (psli
->sli3_ring
[LPFC_EXTRA_RING
].sli
.sli3
.cmdringaddr
)
531 psli
->sli3_ring
[LPFC_EXTRA_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
532 if (psli
->sli3_ring
[LPFC_FCP_RING
].sli
.sli3
.cmdringaddr
)
533 psli
->sli3_ring
[LPFC_FCP_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
535 /* Post receive buffers for desired rings */
536 if (phba
->sli_rev
!= 3)
537 lpfc_post_rcv_buf(phba
);
540 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
542 if (phba
->intr_type
== MSIX
) {
543 rc
= lpfc_config_msi(phba
, pmb
);
545 mempool_free(pmb
, phba
->mbox_mem_pool
);
548 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
549 if (rc
!= MBX_SUCCESS
) {
550 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
551 "0352 Config MSI mailbox command "
552 "failed, mbxCmd x%x, mbxStatus x%x\n",
553 pmb
->u
.mb
.mbxCommand
,
554 pmb
->u
.mb
.mbxStatus
);
555 mempool_free(pmb
, phba
->mbox_mem_pool
);
560 spin_lock_irq(&phba
->hbalock
);
561 /* Initialize ERATT handling flag */
562 phba
->hba_flag
&= ~HBA_ERATT_HANDLED
;
564 /* Enable appropriate host interrupts */
565 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
566 spin_unlock_irq(&phba
->hbalock
);
569 status
|= HC_MBINT_ENA
| HC_ERINT_ENA
| HC_LAINT_ENA
;
570 if (psli
->num_rings
> 0)
571 status
|= HC_R0INT_ENA
;
572 if (psli
->num_rings
> 1)
573 status
|= HC_R1INT_ENA
;
574 if (psli
->num_rings
> 2)
575 status
|= HC_R2INT_ENA
;
576 if (psli
->num_rings
> 3)
577 status
|= HC_R3INT_ENA
;
579 if ((phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) &&
580 (phba
->cfg_poll
& DISABLE_FCP_RING_INT
))
581 status
&= ~(HC_R0INT_ENA
);
583 writel(status
, phba
->HCregaddr
);
584 readl(phba
->HCregaddr
); /* flush */
585 spin_unlock_irq(&phba
->hbalock
);
587 /* Set up ring-0 (ELS) timer */
588 timeout
= phba
->fc_ratov
* 2;
589 mod_timer(&vport
->els_tmofunc
,
590 jiffies
+ msecs_to_jiffies(1000 * timeout
));
591 /* Set up heart beat (HB) timer */
592 mod_timer(&phba
->hb_tmofunc
,
593 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
594 phba
->hb_outstanding
= 0;
595 phba
->last_completion_time
= jiffies
;
596 /* Set up error attention (ERATT) polling timer */
597 mod_timer(&phba
->eratt_poll
,
598 jiffies
+ msecs_to_jiffies(1000 * phba
->eratt_poll_interval
));
600 if (phba
->hba_flag
& LINK_DISABLED
) {
601 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
602 "2598 Adapter Link is disabled.\n");
603 lpfc_down_link(phba
, pmb
);
604 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
605 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
606 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
607 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
608 "2599 Adapter failed to issue DOWN_LINK"
609 " mbox command rc 0x%x\n", rc
);
611 mempool_free(pmb
, phba
->mbox_mem_pool
);
614 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
615 mempool_free(pmb
, phba
->mbox_mem_pool
);
616 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
620 /* MBOX buffer will be freed in mbox compl */
621 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
623 phba
->link_state
= LPFC_HBA_ERROR
;
627 lpfc_config_async(phba
, pmb
, LPFC_ELS_RING
);
628 pmb
->mbox_cmpl
= lpfc_config_async_cmpl
;
629 pmb
->vport
= phba
->pport
;
630 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
632 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
633 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
634 "0456 Adapter failed to issue "
635 "ASYNCEVT_ENABLE mbox status x%x\n",
637 mempool_free(pmb
, phba
->mbox_mem_pool
);
640 /* Get Option rom version */
641 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
643 phba
->link_state
= LPFC_HBA_ERROR
;
647 lpfc_dump_wakeup_param(phba
, pmb
);
648 pmb
->mbox_cmpl
= lpfc_dump_wakeup_param_cmpl
;
649 pmb
->vport
= phba
->pport
;
650 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
652 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
653 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
654 "0435 Adapter failed "
655 "to get Option ROM version status x%x\n", rc
);
656 mempool_free(pmb
, phba
->mbox_mem_pool
);
663 * lpfc_hba_init_link - Initialize the FC link
664 * @phba: pointer to lpfc hba data structure.
665 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
667 * This routine will issue the INIT_LINK mailbox command call.
668 * It is available to other drivers through the lpfc_hba data
669 * structure for use as a delayed link up mechanism with the
670 * module parameter lpfc_suppress_link_up.
674 * Any other value - error
677 lpfc_hba_init_link(struct lpfc_hba
*phba
, uint32_t flag
)
679 return lpfc_hba_init_link_fc_topology(phba
, phba
->cfg_topology
, flag
);
683 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
684 * @phba: pointer to lpfc hba data structure.
685 * @fc_topology: desired fc topology.
686 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
688 * This routine will issue the INIT_LINK mailbox command call.
689 * It is available to other drivers through the lpfc_hba data
690 * structure for use as a delayed link up mechanism with the
691 * module parameter lpfc_suppress_link_up.
695 * Any other value - error
698 lpfc_hba_init_link_fc_topology(struct lpfc_hba
*phba
, uint32_t fc_topology
,
701 struct lpfc_vport
*vport
= phba
->pport
;
706 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
708 phba
->link_state
= LPFC_HBA_ERROR
;
714 if ((phba
->cfg_link_speed
> LPFC_USER_LINK_SPEED_MAX
) ||
715 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_1G
) &&
716 !(phba
->lmt
& LMT_1Gb
)) ||
717 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_2G
) &&
718 !(phba
->lmt
& LMT_2Gb
)) ||
719 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_4G
) &&
720 !(phba
->lmt
& LMT_4Gb
)) ||
721 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_8G
) &&
722 !(phba
->lmt
& LMT_8Gb
)) ||
723 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_10G
) &&
724 !(phba
->lmt
& LMT_10Gb
)) ||
725 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_16G
) &&
726 !(phba
->lmt
& LMT_16Gb
)) ||
727 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_32G
) &&
728 !(phba
->lmt
& LMT_32Gb
)) ||
729 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_64G
) &&
730 !(phba
->lmt
& LMT_64Gb
))) {
731 /* Reset link speed to auto */
732 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
733 "1302 Invalid speed for this board:%d "
734 "Reset link speed to auto.\n",
735 phba
->cfg_link_speed
);
736 phba
->cfg_link_speed
= LPFC_USER_LINK_SPEED_AUTO
;
738 lpfc_init_link(phba
, pmb
, fc_topology
, phba
->cfg_link_speed
);
739 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
740 if (phba
->sli_rev
< LPFC_SLI_REV4
)
741 lpfc_set_loopback_flag(phba
);
742 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
743 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
744 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
745 "0498 Adapter failed to init, mbxCmd x%x "
746 "INIT_LINK, mbxStatus x%x\n",
747 mb
->mbxCommand
, mb
->mbxStatus
);
748 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
749 /* Clear all interrupt enable conditions */
750 writel(0, phba
->HCregaddr
);
751 readl(phba
->HCregaddr
); /* flush */
752 /* Clear all pending interrupts */
753 writel(0xffffffff, phba
->HAregaddr
);
754 readl(phba
->HAregaddr
); /* flush */
756 phba
->link_state
= LPFC_HBA_ERROR
;
757 if (rc
!= MBX_BUSY
|| flag
== MBX_POLL
)
758 mempool_free(pmb
, phba
->mbox_mem_pool
);
761 phba
->cfg_suppress_link_up
= LPFC_INITIALIZE_LINK
;
762 if (flag
== MBX_POLL
)
763 mempool_free(pmb
, phba
->mbox_mem_pool
);
769 * lpfc_hba_down_link - this routine downs the FC link
770 * @phba: pointer to lpfc hba data structure.
771 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
773 * This routine will issue the DOWN_LINK mailbox command call.
774 * It is available to other drivers through the lpfc_hba data
775 * structure for use to stop the link.
779 * Any other value - error
782 lpfc_hba_down_link(struct lpfc_hba
*phba
, uint32_t flag
)
787 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
789 phba
->link_state
= LPFC_HBA_ERROR
;
793 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
794 "0491 Adapter Link is disabled.\n");
795 lpfc_down_link(phba
, pmb
);
796 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
797 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
798 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
799 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
800 "2522 Adapter failed to issue DOWN_LINK"
801 " mbox command rc 0x%x\n", rc
);
803 mempool_free(pmb
, phba
->mbox_mem_pool
);
806 if (flag
== MBX_POLL
)
807 mempool_free(pmb
, phba
->mbox_mem_pool
);
813 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
814 * @phba: pointer to lpfc HBA data structure.
816 * This routine will do LPFC uninitialization before the HBA is reset when
817 * bringing down the SLI Layer.
821 * Any other value - error.
824 lpfc_hba_down_prep(struct lpfc_hba
*phba
)
826 struct lpfc_vport
**vports
;
829 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
830 /* Disable interrupts */
831 writel(0, phba
->HCregaddr
);
832 readl(phba
->HCregaddr
); /* flush */
835 if (phba
->pport
->load_flag
& FC_UNLOADING
)
836 lpfc_cleanup_discovery_resources(phba
->pport
);
838 vports
= lpfc_create_vport_work_array(phba
);
840 for (i
= 0; i
<= phba
->max_vports
&&
841 vports
[i
] != NULL
; i
++)
842 lpfc_cleanup_discovery_resources(vports
[i
]);
843 lpfc_destroy_vport_work_array(phba
, vports
);
849 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
850 * rspiocb which got deferred
852 * @phba: pointer to lpfc HBA data structure.
854 * This routine will cleanup completed slow path events after HBA is reset
855 * when bringing down the SLI Layer.
862 lpfc_sli4_free_sp_events(struct lpfc_hba
*phba
)
864 struct lpfc_iocbq
*rspiocbq
;
865 struct hbq_dmabuf
*dmabuf
;
866 struct lpfc_cq_event
*cq_event
;
868 spin_lock_irq(&phba
->hbalock
);
869 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
870 spin_unlock_irq(&phba
->hbalock
);
872 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
873 /* Get the response iocb from the head of work queue */
874 spin_lock_irq(&phba
->hbalock
);
875 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
876 cq_event
, struct lpfc_cq_event
, list
);
877 spin_unlock_irq(&phba
->hbalock
);
879 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
880 case CQE_CODE_COMPL_WQE
:
881 rspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
883 lpfc_sli_release_iocbq(phba
, rspiocbq
);
885 case CQE_CODE_RECEIVE
:
886 case CQE_CODE_RECEIVE_V1
:
887 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
889 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
895 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
896 * @phba: pointer to lpfc HBA data structure.
898 * This routine will cleanup posted ELS buffers after the HBA is reset
899 * when bringing down the SLI Layer.
906 lpfc_hba_free_post_buf(struct lpfc_hba
*phba
)
908 struct lpfc_sli
*psli
= &phba
->sli
;
909 struct lpfc_sli_ring
*pring
;
910 struct lpfc_dmabuf
*mp
, *next_mp
;
914 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
915 lpfc_sli_hbqbuf_free_all(phba
);
917 /* Cleanup preposted buffers on the ELS ring */
918 pring
= &psli
->sli3_ring
[LPFC_ELS_RING
];
919 spin_lock_irq(&phba
->hbalock
);
920 list_splice_init(&pring
->postbufq
, &buflist
);
921 spin_unlock_irq(&phba
->hbalock
);
924 list_for_each_entry_safe(mp
, next_mp
, &buflist
, list
) {
927 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
931 spin_lock_irq(&phba
->hbalock
);
932 pring
->postbufq_cnt
-= count
;
933 spin_unlock_irq(&phba
->hbalock
);
938 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
939 * @phba: pointer to lpfc HBA data structure.
941 * This routine will cleanup the txcmplq after the HBA is reset when bringing
942 * down the SLI Layer.
948 lpfc_hba_clean_txcmplq(struct lpfc_hba
*phba
)
950 struct lpfc_sli
*psli
= &phba
->sli
;
951 struct lpfc_queue
*qp
= NULL
;
952 struct lpfc_sli_ring
*pring
;
953 LIST_HEAD(completions
);
955 struct lpfc_iocbq
*piocb
, *next_iocb
;
957 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
958 for (i
= 0; i
< psli
->num_rings
; i
++) {
959 pring
= &psli
->sli3_ring
[i
];
960 spin_lock_irq(&phba
->hbalock
);
961 /* At this point in time the HBA is either reset or DOA
962 * Nothing should be on txcmplq as it will
965 list_splice_init(&pring
->txcmplq
, &completions
);
966 pring
->txcmplq_cnt
= 0;
967 spin_unlock_irq(&phba
->hbalock
);
969 lpfc_sli_abort_iocb_ring(phba
, pring
);
971 /* Cancel all the IOCBs from the completions list */
972 lpfc_sli_cancel_iocbs(phba
, &completions
,
973 IOSTAT_LOCAL_REJECT
, IOERR_SLI_ABORTED
);
976 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
980 spin_lock_irq(&pring
->ring_lock
);
981 list_for_each_entry_safe(piocb
, next_iocb
,
982 &pring
->txcmplq
, list
)
983 piocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
984 list_splice_init(&pring
->txcmplq
, &completions
);
985 pring
->txcmplq_cnt
= 0;
986 spin_unlock_irq(&pring
->ring_lock
);
987 lpfc_sli_abort_iocb_ring(phba
, pring
);
989 /* Cancel all the IOCBs from the completions list */
990 lpfc_sli_cancel_iocbs(phba
, &completions
,
991 IOSTAT_LOCAL_REJECT
, IOERR_SLI_ABORTED
);
995 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
996 * @phba: pointer to lpfc HBA data structure.
998 * This routine will do uninitialization after the HBA is reset when bring
999 * down the SLI Layer.
1003 * Any other value - error.
1006 lpfc_hba_down_post_s3(struct lpfc_hba
*phba
)
1008 lpfc_hba_free_post_buf(phba
);
1009 lpfc_hba_clean_txcmplq(phba
);
1014 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1015 * @phba: pointer to lpfc HBA data structure.
1017 * This routine will do uninitialization after the HBA is reset when bring
1018 * down the SLI Layer.
1022 * Any other value - error.
1025 lpfc_hba_down_post_s4(struct lpfc_hba
*phba
)
1027 struct lpfc_io_buf
*psb
, *psb_next
;
1028 struct lpfc_async_xchg_ctx
*ctxp
, *ctxp_next
;
1029 struct lpfc_sli4_hdw_queue
*qp
;
1031 LIST_HEAD(nvme_aborts
);
1032 LIST_HEAD(nvmet_aborts
);
1033 struct lpfc_sglq
*sglq_entry
= NULL
;
1037 lpfc_sli_hbqbuf_free_all(phba
);
1038 lpfc_hba_clean_txcmplq(phba
);
1040 /* At this point in time the HBA is either reset or DOA. Either
1041 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1042 * on the lpfc_els_sgl_list so that it can either be freed if the
1043 * driver is unloading or reposted if the driver is restarting
1046 spin_lock_irq(&phba
->hbalock
); /* required for lpfc_els_sgl_list and */
1048 /* sgl_list_lock required because worker thread uses this
1051 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1052 list_for_each_entry(sglq_entry
,
1053 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
)
1054 sglq_entry
->state
= SGL_FREED
;
1056 list_splice_init(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
,
1057 &phba
->sli4_hba
.lpfc_els_sgl_list
);
1060 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1062 /* abts_xxxx_buf_list_lock required because worker thread uses this
1066 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
1067 qp
= &phba
->sli4_hba
.hdwq
[idx
];
1069 spin_lock(&qp
->abts_io_buf_list_lock
);
1070 list_splice_init(&qp
->lpfc_abts_io_buf_list
,
1073 list_for_each_entry_safe(psb
, psb_next
, &aborts
, list
) {
1075 psb
->status
= IOSTAT_SUCCESS
;
1078 spin_lock(&qp
->io_buf_list_put_lock
);
1079 list_splice_init(&aborts
, &qp
->lpfc_io_buf_list_put
);
1080 qp
->put_io_bufs
+= qp
->abts_scsi_io_bufs
;
1081 qp
->put_io_bufs
+= qp
->abts_nvme_io_bufs
;
1082 qp
->abts_scsi_io_bufs
= 0;
1083 qp
->abts_nvme_io_bufs
= 0;
1084 spin_unlock(&qp
->io_buf_list_put_lock
);
1085 spin_unlock(&qp
->abts_io_buf_list_lock
);
1087 spin_unlock_irq(&phba
->hbalock
);
1089 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
1090 spin_lock_irq(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1091 list_splice_init(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1093 spin_unlock_irq(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1094 list_for_each_entry_safe(ctxp
, ctxp_next
, &nvmet_aborts
, list
) {
1095 ctxp
->flag
&= ~(LPFC_NVME_XBUSY
| LPFC_NVME_ABORT_OP
);
1096 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1100 lpfc_sli4_free_sp_events(phba
);
1105 * lpfc_hba_down_post - Wrapper func for hba down post routine
1106 * @phba: pointer to lpfc HBA data structure.
1108 * This routine wraps the actual SLI3 or SLI4 routine for performing
1109 * uninitialization after the HBA is reset when bring down the SLI Layer.
1113 * Any other value - error.
1116 lpfc_hba_down_post(struct lpfc_hba
*phba
)
1118 return (*phba
->lpfc_hba_down_post
)(phba
);
1122 * lpfc_hb_timeout - The HBA-timer timeout handler
1123 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1125 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1126 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1127 * work-port-events bitmap and the worker thread is notified. This timeout
1128 * event will be used by the worker thread to invoke the actual timeout
1129 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1130 * be performed in the timeout handler and the HBA timeout event bit shall
1131 * be cleared by the worker thread after it has taken the event bitmap out.
1134 lpfc_hb_timeout(struct timer_list
*t
)
1136 struct lpfc_hba
*phba
;
1137 uint32_t tmo_posted
;
1138 unsigned long iflag
;
1140 phba
= from_timer(phba
, t
, hb_tmofunc
);
1142 /* Check for heart beat timeout conditions */
1143 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
1144 tmo_posted
= phba
->pport
->work_port_events
& WORKER_HB_TMO
;
1146 phba
->pport
->work_port_events
|= WORKER_HB_TMO
;
1147 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
1149 /* Tell the worker thread there is work to do */
1151 lpfc_worker_wake_up(phba
);
1156 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1157 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1159 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1160 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1161 * work-port-events bitmap and the worker thread is notified. This timeout
1162 * event will be used by the worker thread to invoke the actual timeout
1163 * handler routine, lpfc_rrq_handler. Any periodical operations will
1164 * be performed in the timeout handler and the RRQ timeout event bit shall
1165 * be cleared by the worker thread after it has taken the event bitmap out.
1168 lpfc_rrq_timeout(struct timer_list
*t
)
1170 struct lpfc_hba
*phba
;
1171 unsigned long iflag
;
1173 phba
= from_timer(phba
, t
, rrq_tmr
);
1174 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
1175 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
1176 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
1178 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
1179 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
1181 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
1182 lpfc_worker_wake_up(phba
);
1186 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1187 * @phba: pointer to lpfc hba data structure.
1188 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1190 * This is the callback function to the lpfc heart-beat mailbox command.
1191 * If configured, the lpfc driver issues the heart-beat mailbox command to
1192 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1193 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1194 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1195 * heart-beat outstanding state. Once the mailbox command comes back and
1196 * no error conditions detected, the heart-beat mailbox command timer is
1197 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1198 * state is cleared for the next heart-beat. If the timer expired with the
1199 * heart-beat outstanding state set, the driver will put the HBA offline.
1202 lpfc_hb_mbox_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
1204 unsigned long drvr_flag
;
1206 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
1207 phba
->hb_outstanding
= 0;
1208 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
1210 /* Check and reset heart-beat timer is necessary */
1211 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1212 if (!(phba
->pport
->fc_flag
& FC_OFFLINE_MODE
) &&
1213 !(phba
->link_state
== LPFC_HBA_ERROR
) &&
1214 !(phba
->pport
->load_flag
& FC_UNLOADING
))
1215 mod_timer(&phba
->hb_tmofunc
,
1217 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1222 * lpfc_idle_stat_delay_work - idle_stat tracking
1224 * This routine tracks per-cq idle_stat and determines polling decisions.
1230 lpfc_idle_stat_delay_work(struct work_struct
*work
)
1232 struct lpfc_hba
*phba
= container_of(to_delayed_work(work
),
1234 idle_stat_delay_work
);
1235 struct lpfc_queue
*cq
;
1236 struct lpfc_sli4_hdw_queue
*hdwq
;
1237 struct lpfc_idle_stat
*idle_stat
;
1238 u32 i
, idle_percent
;
1239 u64 wall
, wall_idle
, diff_wall
, diff_idle
, busy_time
;
1241 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1244 if (phba
->link_state
== LPFC_HBA_ERROR
||
1245 phba
->pport
->fc_flag
& FC_OFFLINE_MODE
)
1248 for_each_present_cpu(i
) {
1249 hdwq
= &phba
->sli4_hba
.hdwq
[phba
->sli4_hba
.cpu_map
[i
].hdwq
];
1252 /* Skip if we've already handled this cq's primary CPU */
1256 idle_stat
= &phba
->sli4_hba
.idle_stat
[i
];
1258 /* get_cpu_idle_time returns values as running counters. Thus,
1259 * to know the amount for this period, the prior counter values
1260 * need to be subtracted from the current counter values.
1261 * From there, the idle time stat can be calculated as a
1262 * percentage of 100 - the sum of the other consumption times.
1264 wall_idle
= get_cpu_idle_time(i
, &wall
, 1);
1265 diff_idle
= wall_idle
- idle_stat
->prev_idle
;
1266 diff_wall
= wall
- idle_stat
->prev_wall
;
1268 if (diff_wall
<= diff_idle
)
1271 busy_time
= diff_wall
- diff_idle
;
1273 idle_percent
= div64_u64(100 * busy_time
, diff_wall
);
1274 idle_percent
= 100 - idle_percent
;
1276 if (idle_percent
< 15)
1277 cq
->poll_mode
= LPFC_QUEUE_WORK
;
1279 cq
->poll_mode
= LPFC_IRQ_POLL
;
1281 idle_stat
->prev_idle
= wall_idle
;
1282 idle_stat
->prev_wall
= wall
;
1286 schedule_delayed_work(&phba
->idle_stat_delay_work
,
1287 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY
));
1291 lpfc_hb_eq_delay_work(struct work_struct
*work
)
1293 struct lpfc_hba
*phba
= container_of(to_delayed_work(work
),
1294 struct lpfc_hba
, eq_delay_work
);
1295 struct lpfc_eq_intr_info
*eqi
, *eqi_new
;
1296 struct lpfc_queue
*eq
, *eq_next
;
1297 unsigned char *ena_delay
= NULL
;
1301 if (!phba
->cfg_auto_imax
|| phba
->pport
->load_flag
& FC_UNLOADING
)
1304 if (phba
->link_state
== LPFC_HBA_ERROR
||
1305 phba
->pport
->fc_flag
& FC_OFFLINE_MODE
)
1308 ena_delay
= kcalloc(phba
->sli4_hba
.num_possible_cpu
, sizeof(*ena_delay
),
1313 for (i
= 0; i
< phba
->cfg_irq_chann
; i
++) {
1314 /* Get the EQ corresponding to the IRQ vector */
1315 eq
= phba
->sli4_hba
.hba_eq_hdl
[i
].eq
;
1318 if (eq
->q_mode
|| eq
->q_flag
& HBA_EQ_DELAY_CHK
) {
1319 eq
->q_flag
&= ~HBA_EQ_DELAY_CHK
;
1320 ena_delay
[eq
->last_cpu
] = 1;
1324 for_each_present_cpu(i
) {
1325 eqi
= per_cpu_ptr(phba
->sli4_hba
.eq_info
, i
);
1327 usdelay
= (eqi
->icnt
>> 10) * LPFC_EQ_DELAY_STEP
;
1328 if (usdelay
> LPFC_MAX_AUTO_EQ_DELAY
)
1329 usdelay
= LPFC_MAX_AUTO_EQ_DELAY
;
1336 list_for_each_entry_safe(eq
, eq_next
, &eqi
->list
, cpu_list
) {
1337 if (unlikely(eq
->last_cpu
!= i
)) {
1338 eqi_new
= per_cpu_ptr(phba
->sli4_hba
.eq_info
,
1340 list_move_tail(&eq
->cpu_list
, &eqi_new
->list
);
1343 if (usdelay
!= eq
->q_mode
)
1344 lpfc_modify_hba_eq_delay(phba
, eq
->hdwq
, 1,
1352 queue_delayed_work(phba
->wq
, &phba
->eq_delay_work
,
1353 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS
));
1357 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1358 * @phba: pointer to lpfc hba data structure.
1360 * For each heartbeat, this routine does some heuristic methods to adjust
1361 * XRI distribution. The goal is to fully utilize free XRIs.
1363 static void lpfc_hb_mxp_handler(struct lpfc_hba
*phba
)
1368 hwq_count
= phba
->cfg_hdw_queue
;
1369 for (i
= 0; i
< hwq_count
; i
++) {
1370 /* Adjust XRIs in private pool */
1371 lpfc_adjust_pvt_pool_count(phba
, i
);
1373 /* Adjust high watermark */
1374 lpfc_adjust_high_watermark(phba
, i
);
1376 #ifdef LPFC_MXP_STAT
1377 /* Snapshot pbl, pvt and busy count */
1378 lpfc_snapshot_mxp(phba
, i
);
1384 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1385 * @phba: pointer to lpfc hba data structure.
1387 * This is the actual HBA-timer timeout handler to be invoked by the worker
1388 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1389 * handler performs any periodic operations needed for the device. If such
1390 * periodic event has already been attended to either in the interrupt handler
1391 * or by processing slow-ring or fast-ring events within the HBA-timer
1392 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1393 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1394 * is configured and there is no heart-beat mailbox command outstanding, a
1395 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1396 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1400 lpfc_hb_timeout_handler(struct lpfc_hba
*phba
)
1402 struct lpfc_vport
**vports
;
1403 LPFC_MBOXQ_t
*pmboxq
;
1404 struct lpfc_dmabuf
*buf_ptr
;
1406 struct lpfc_sli
*psli
= &phba
->sli
;
1407 LIST_HEAD(completions
);
1409 if (phba
->cfg_xri_rebalancing
) {
1410 /* Multi-XRI pools handler */
1411 lpfc_hb_mxp_handler(phba
);
1414 vports
= lpfc_create_vport_work_array(phba
);
1416 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
1417 lpfc_rcv_seq_check_edtov(vports
[i
]);
1418 lpfc_fdmi_change_check(vports
[i
]);
1420 lpfc_destroy_vport_work_array(phba
, vports
);
1422 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1423 (phba
->pport
->load_flag
& FC_UNLOADING
) ||
1424 (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
))
1427 spin_lock_irq(&phba
->pport
->work_port_lock
);
1429 if (time_after(phba
->last_completion_time
+
1430 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
),
1432 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1433 if (!phba
->hb_outstanding
)
1434 mod_timer(&phba
->hb_tmofunc
,
1436 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1438 mod_timer(&phba
->hb_tmofunc
,
1440 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1443 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1445 if (phba
->elsbuf_cnt
&&
1446 (phba
->elsbuf_cnt
== phba
->elsbuf_prev_cnt
)) {
1447 spin_lock_irq(&phba
->hbalock
);
1448 list_splice_init(&phba
->elsbuf
, &completions
);
1449 phba
->elsbuf_cnt
= 0;
1450 phba
->elsbuf_prev_cnt
= 0;
1451 spin_unlock_irq(&phba
->hbalock
);
1453 while (!list_empty(&completions
)) {
1454 list_remove_head(&completions
, buf_ptr
,
1455 struct lpfc_dmabuf
, list
);
1456 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
1460 phba
->elsbuf_prev_cnt
= phba
->elsbuf_cnt
;
1462 /* If there is no heart beat outstanding, issue a heartbeat command */
1463 if (phba
->cfg_enable_hba_heartbeat
) {
1464 if (!phba
->hb_outstanding
) {
1465 if ((!(psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
)) &&
1466 (list_empty(&psli
->mboxq
))) {
1467 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
,
1470 mod_timer(&phba
->hb_tmofunc
,
1472 msecs_to_jiffies(1000 *
1473 LPFC_HB_MBOX_INTERVAL
));
1477 lpfc_heart_beat(phba
, pmboxq
);
1478 pmboxq
->mbox_cmpl
= lpfc_hb_mbox_cmpl
;
1479 pmboxq
->vport
= phba
->pport
;
1480 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
,
1483 if (retval
!= MBX_BUSY
&&
1484 retval
!= MBX_SUCCESS
) {
1485 mempool_free(pmboxq
,
1486 phba
->mbox_mem_pool
);
1487 mod_timer(&phba
->hb_tmofunc
,
1489 msecs_to_jiffies(1000 *
1490 LPFC_HB_MBOX_INTERVAL
));
1493 phba
->skipped_hb
= 0;
1494 phba
->hb_outstanding
= 1;
1495 } else if (time_before_eq(phba
->last_completion_time
,
1496 phba
->skipped_hb
)) {
1497 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1498 "2857 Last completion time not "
1499 " updated in %d ms\n",
1500 jiffies_to_msecs(jiffies
1501 - phba
->last_completion_time
));
1503 phba
->skipped_hb
= jiffies
;
1505 mod_timer(&phba
->hb_tmofunc
,
1507 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1511 * If heart beat timeout called with hb_outstanding set
1512 * we need to give the hb mailbox cmd a chance to
1515 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1516 "0459 Adapter heartbeat still out"
1517 "standing:last compl time was %d ms.\n",
1518 jiffies_to_msecs(jiffies
1519 - phba
->last_completion_time
));
1520 mod_timer(&phba
->hb_tmofunc
,
1522 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1525 mod_timer(&phba
->hb_tmofunc
,
1527 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1532 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1533 * @phba: pointer to lpfc hba data structure.
1535 * This routine is called to bring the HBA offline when HBA hardware error
1536 * other than Port Error 6 has been detected.
1539 lpfc_offline_eratt(struct lpfc_hba
*phba
)
1541 struct lpfc_sli
*psli
= &phba
->sli
;
1543 spin_lock_irq(&phba
->hbalock
);
1544 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1545 spin_unlock_irq(&phba
->hbalock
);
1546 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1549 lpfc_reset_barrier(phba
);
1550 spin_lock_irq(&phba
->hbalock
);
1551 lpfc_sli_brdreset(phba
);
1552 spin_unlock_irq(&phba
->hbalock
);
1553 lpfc_hba_down_post(phba
);
1554 lpfc_sli_brdready(phba
, HS_MBRDY
);
1555 lpfc_unblock_mgmt_io(phba
);
1556 phba
->link_state
= LPFC_HBA_ERROR
;
1561 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1562 * @phba: pointer to lpfc hba data structure.
1564 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1565 * other than Port Error 6 has been detected.
1568 lpfc_sli4_offline_eratt(struct lpfc_hba
*phba
)
1570 spin_lock_irq(&phba
->hbalock
);
1571 phba
->link_state
= LPFC_HBA_ERROR
;
1572 spin_unlock_irq(&phba
->hbalock
);
1574 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1575 lpfc_sli_flush_io_rings(phba
);
1577 lpfc_hba_down_post(phba
);
1578 lpfc_unblock_mgmt_io(phba
);
1582 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1583 * @phba: pointer to lpfc hba data structure.
1585 * This routine is invoked to handle the deferred HBA hardware error
1586 * conditions. This type of error is indicated by HBA by setting ER1
1587 * and another ER bit in the host status register. The driver will
1588 * wait until the ER1 bit clears before handling the error condition.
1591 lpfc_handle_deferred_eratt(struct lpfc_hba
*phba
)
1593 uint32_t old_host_status
= phba
->work_hs
;
1594 struct lpfc_sli
*psli
= &phba
->sli
;
1596 /* If the pci channel is offline, ignore possible errors,
1597 * since we cannot communicate with the pci card anyway.
1599 if (pci_channel_offline(phba
->pcidev
)) {
1600 spin_lock_irq(&phba
->hbalock
);
1601 phba
->hba_flag
&= ~DEFER_ERATT
;
1602 spin_unlock_irq(&phba
->hbalock
);
1606 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1607 "0479 Deferred Adapter Hardware Error "
1608 "Data: x%x x%x x%x\n",
1609 phba
->work_hs
, phba
->work_status
[0],
1610 phba
->work_status
[1]);
1612 spin_lock_irq(&phba
->hbalock
);
1613 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1614 spin_unlock_irq(&phba
->hbalock
);
1618 * Firmware stops when it triggred erratt. That could cause the I/Os
1619 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1620 * SCSI layer retry it after re-establishing link.
1622 lpfc_sli_abort_fcp_rings(phba
);
1625 * There was a firmware error. Take the hba offline and then
1626 * attempt to restart it.
1628 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
1631 /* Wait for the ER1 bit to clear.*/
1632 while (phba
->work_hs
& HS_FFER1
) {
1634 if (lpfc_readl(phba
->HSregaddr
, &phba
->work_hs
)) {
1635 phba
->work_hs
= UNPLUG_ERR
;
1638 /* If driver is unloading let the worker thread continue */
1639 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
1646 * This is to ptrotect against a race condition in which
1647 * first write to the host attention register clear the
1648 * host status register.
1650 if ((!phba
->work_hs
) && (!(phba
->pport
->load_flag
& FC_UNLOADING
)))
1651 phba
->work_hs
= old_host_status
& ~HS_FFER1
;
1653 spin_lock_irq(&phba
->hbalock
);
1654 phba
->hba_flag
&= ~DEFER_ERATT
;
1655 spin_unlock_irq(&phba
->hbalock
);
1656 phba
->work_status
[0] = readl(phba
->MBslimaddr
+ 0xa8);
1657 phba
->work_status
[1] = readl(phba
->MBslimaddr
+ 0xac);
1661 lpfc_board_errevt_to_mgmt(struct lpfc_hba
*phba
)
1663 struct lpfc_board_event_header board_event
;
1664 struct Scsi_Host
*shost
;
1666 board_event
.event_type
= FC_REG_BOARD_EVENT
;
1667 board_event
.subcategory
= LPFC_EVENT_PORTINTERR
;
1668 shost
= lpfc_shost_from_vport(phba
->pport
);
1669 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1670 sizeof(board_event
),
1671 (char *) &board_event
,
1676 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1677 * @phba: pointer to lpfc hba data structure.
1679 * This routine is invoked to handle the following HBA hardware error
1681 * 1 - HBA error attention interrupt
1682 * 2 - DMA ring index out of range
1683 * 3 - Mailbox command came back as unknown
1686 lpfc_handle_eratt_s3(struct lpfc_hba
*phba
)
1688 struct lpfc_vport
*vport
= phba
->pport
;
1689 struct lpfc_sli
*psli
= &phba
->sli
;
1690 uint32_t event_data
;
1691 unsigned long temperature
;
1692 struct temp_event temp_event_data
;
1693 struct Scsi_Host
*shost
;
1695 /* If the pci channel is offline, ignore possible errors,
1696 * since we cannot communicate with the pci card anyway.
1698 if (pci_channel_offline(phba
->pcidev
)) {
1699 spin_lock_irq(&phba
->hbalock
);
1700 phba
->hba_flag
&= ~DEFER_ERATT
;
1701 spin_unlock_irq(&phba
->hbalock
);
1705 /* If resets are disabled then leave the HBA alone and return */
1706 if (!phba
->cfg_enable_hba_reset
)
1709 /* Send an internal error event to mgmt application */
1710 lpfc_board_errevt_to_mgmt(phba
);
1712 if (phba
->hba_flag
& DEFER_ERATT
)
1713 lpfc_handle_deferred_eratt(phba
);
1715 if ((phba
->work_hs
& HS_FFER6
) || (phba
->work_hs
& HS_FFER8
)) {
1716 if (phba
->work_hs
& HS_FFER6
)
1717 /* Re-establishing Link */
1718 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1719 "1301 Re-establishing Link "
1720 "Data: x%x x%x x%x\n",
1721 phba
->work_hs
, phba
->work_status
[0],
1722 phba
->work_status
[1]);
1723 if (phba
->work_hs
& HS_FFER8
)
1724 /* Device Zeroization */
1725 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1726 "2861 Host Authentication device "
1727 "zeroization Data:x%x x%x x%x\n",
1728 phba
->work_hs
, phba
->work_status
[0],
1729 phba
->work_status
[1]);
1731 spin_lock_irq(&phba
->hbalock
);
1732 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1733 spin_unlock_irq(&phba
->hbalock
);
1736 * Firmware stops when it triggled erratt with HS_FFER6.
1737 * That could cause the I/Os dropped by the firmware.
1738 * Error iocb (I/O) on txcmplq and let the SCSI layer
1739 * retry it after re-establishing link.
1741 lpfc_sli_abort_fcp_rings(phba
);
1744 * There was a firmware error. Take the hba offline and then
1745 * attempt to restart it.
1747 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1749 lpfc_sli_brdrestart(phba
);
1750 if (lpfc_online(phba
) == 0) { /* Initialize the HBA */
1751 lpfc_unblock_mgmt_io(phba
);
1754 lpfc_unblock_mgmt_io(phba
);
1755 } else if (phba
->work_hs
& HS_CRIT_TEMP
) {
1756 temperature
= readl(phba
->MBslimaddr
+ TEMPERATURE_OFFSET
);
1757 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
1758 temp_event_data
.event_code
= LPFC_CRIT_TEMP
;
1759 temp_event_data
.data
= (uint32_t)temperature
;
1761 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1762 "0406 Adapter maximum temperature exceeded "
1763 "(%ld), taking this port offline "
1764 "Data: x%x x%x x%x\n",
1765 temperature
, phba
->work_hs
,
1766 phba
->work_status
[0], phba
->work_status
[1]);
1768 shost
= lpfc_shost_from_vport(phba
->pport
);
1769 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1770 sizeof(temp_event_data
),
1771 (char *) &temp_event_data
,
1772 SCSI_NL_VID_TYPE_PCI
1773 | PCI_VENDOR_ID_EMULEX
);
1775 spin_lock_irq(&phba
->hbalock
);
1776 phba
->over_temp_state
= HBA_OVER_TEMP
;
1777 spin_unlock_irq(&phba
->hbalock
);
1778 lpfc_offline_eratt(phba
);
1781 /* The if clause above forces this code path when the status
1782 * failure is a value other than FFER6. Do not call the offline
1783 * twice. This is the adapter hardware error path.
1785 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1786 "0457 Adapter Hardware Error "
1787 "Data: x%x x%x x%x\n",
1789 phba
->work_status
[0], phba
->work_status
[1]);
1791 event_data
= FC_REG_DUMP_EVENT
;
1792 shost
= lpfc_shost_from_vport(vport
);
1793 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1794 sizeof(event_data
), (char *) &event_data
,
1795 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
1797 lpfc_offline_eratt(phba
);
1803 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1804 * @phba: pointer to lpfc hba data structure.
1805 * @mbx_action: flag for mailbox shutdown action.
1806 * @en_rn_msg: send reset/port recovery message.
1807 * This routine is invoked to perform an SLI4 port PCI function reset in
1808 * response to port status register polling attention. It waits for port
1809 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1810 * During this process, interrupt vectors are freed and later requested
1811 * for handling possible port resource change.
1814 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba
*phba
, int mbx_action
,
1820 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) >=
1821 LPFC_SLI_INTF_IF_TYPE_2
) {
1823 * On error status condition, driver need to wait for port
1824 * ready before performing reset.
1826 rc
= lpfc_sli4_pdev_status_reg_wait(phba
);
1831 /* need reset: attempt for port recovery */
1833 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1834 "2887 Reset Needed: Attempting Port "
1836 lpfc_offline_prep(phba
, mbx_action
);
1837 lpfc_sli_flush_io_rings(phba
);
1839 /* release interrupt for possible resource change */
1840 lpfc_sli4_disable_intr(phba
);
1841 rc
= lpfc_sli_brdrestart(phba
);
1843 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1844 "6309 Failed to restart board\n");
1847 /* request and enable interrupt */
1848 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
1849 if (intr_mode
== LPFC_INTR_ERROR
) {
1850 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1851 "3175 Failed to enable interrupt\n");
1854 phba
->intr_mode
= intr_mode
;
1855 rc
= lpfc_online(phba
);
1857 lpfc_unblock_mgmt_io(phba
);
1863 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1864 * @phba: pointer to lpfc hba data structure.
1866 * This routine is invoked to handle the SLI4 HBA hardware error attention
1870 lpfc_handle_eratt_s4(struct lpfc_hba
*phba
)
1872 struct lpfc_vport
*vport
= phba
->pport
;
1873 uint32_t event_data
;
1874 struct Scsi_Host
*shost
;
1876 struct lpfc_register portstat_reg
= {0};
1877 uint32_t reg_err1
, reg_err2
;
1878 uint32_t uerrlo_reg
, uemasklo_reg
;
1879 uint32_t smphr_port_status
= 0, pci_rd_rc1
, pci_rd_rc2
;
1880 bool en_rn_msg
= true;
1881 struct temp_event temp_event_data
;
1882 struct lpfc_register portsmphr_reg
;
1885 /* If the pci channel is offline, ignore possible errors, since
1886 * we cannot communicate with the pci card anyway.
1888 if (pci_channel_offline(phba
->pcidev
)) {
1889 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1890 "3166 pci channel is offline\n");
1891 lpfc_sli4_offline_eratt(phba
);
1895 memset(&portsmphr_reg
, 0, sizeof(portsmphr_reg
));
1896 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
1898 case LPFC_SLI_INTF_IF_TYPE_0
:
1899 pci_rd_rc1
= lpfc_readl(
1900 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
1902 pci_rd_rc2
= lpfc_readl(
1903 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
,
1905 /* consider PCI bus read error as pci_channel_offline */
1906 if (pci_rd_rc1
== -EIO
&& pci_rd_rc2
== -EIO
)
1908 if (!(phba
->hba_flag
& HBA_RECOVERABLE_UE
)) {
1909 lpfc_sli4_offline_eratt(phba
);
1912 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1913 "7623 Checking UE recoverable");
1915 for (i
= 0; i
< phba
->sli4_hba
.ue_to_sr
/ 1000; i
++) {
1916 if (lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
1917 &portsmphr_reg
.word0
))
1920 smphr_port_status
= bf_get(lpfc_port_smphr_port_status
,
1922 if ((smphr_port_status
& LPFC_PORT_SEM_MASK
) ==
1923 LPFC_PORT_SEM_UE_RECOVERABLE
)
1925 /*Sleep for 1Sec, before checking SEMAPHORE */
1929 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1930 "4827 smphr_port_status x%x : Waited %dSec",
1931 smphr_port_status
, i
);
1933 /* Recoverable UE, reset the HBA device */
1934 if ((smphr_port_status
& LPFC_PORT_SEM_MASK
) ==
1935 LPFC_PORT_SEM_UE_RECOVERABLE
) {
1936 for (i
= 0; i
< 20; i
++) {
1938 if (!lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
1939 &portsmphr_reg
.word0
) &&
1940 (LPFC_POST_STAGE_PORT_READY
==
1941 bf_get(lpfc_port_smphr_port_status
,
1943 rc
= lpfc_sli4_port_sta_fn_reset(phba
,
1944 LPFC_MBX_NO_WAIT
, en_rn_msg
);
1947 lpfc_printf_log(phba
, KERN_ERR
,
1949 "4215 Failed to recover UE");
1954 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1955 "7624 Firmware not ready: Failing UE recovery,"
1956 " waited %dSec", i
);
1957 phba
->link_state
= LPFC_HBA_ERROR
;
1960 case LPFC_SLI_INTF_IF_TYPE_2
:
1961 case LPFC_SLI_INTF_IF_TYPE_6
:
1962 pci_rd_rc1
= lpfc_readl(
1963 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
1964 &portstat_reg
.word0
);
1965 /* consider PCI bus read error as pci_channel_offline */
1966 if (pci_rd_rc1
== -EIO
) {
1967 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1968 "3151 PCI bus read access failure: x%x\n",
1969 readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
));
1970 lpfc_sli4_offline_eratt(phba
);
1973 reg_err1
= readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
1974 reg_err2
= readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
1975 if (bf_get(lpfc_sliport_status_oti
, &portstat_reg
)) {
1976 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1977 "2889 Port Overtemperature event, "
1978 "taking port offline Data: x%x x%x\n",
1979 reg_err1
, reg_err2
);
1981 phba
->sfp_alarm
|= LPFC_TRANSGRESSION_HIGH_TEMPERATURE
;
1982 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
1983 temp_event_data
.event_code
= LPFC_CRIT_TEMP
;
1984 temp_event_data
.data
= 0xFFFFFFFF;
1986 shost
= lpfc_shost_from_vport(phba
->pport
);
1987 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1988 sizeof(temp_event_data
),
1989 (char *)&temp_event_data
,
1990 SCSI_NL_VID_TYPE_PCI
1991 | PCI_VENDOR_ID_EMULEX
);
1993 spin_lock_irq(&phba
->hbalock
);
1994 phba
->over_temp_state
= HBA_OVER_TEMP
;
1995 spin_unlock_irq(&phba
->hbalock
);
1996 lpfc_sli4_offline_eratt(phba
);
1999 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2000 reg_err2
== SLIPORT_ERR2_REG_FW_RESTART
) {
2001 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2002 "3143 Port Down: Firmware Update "
2005 } else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2006 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
2007 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2008 "3144 Port Down: Debug Dump\n");
2009 else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2010 reg_err2
== SLIPORT_ERR2_REG_FUNC_PROVISON
)
2011 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2012 "3145 Port Down: Provisioning\n");
2014 /* If resets are disabled then leave the HBA alone and return */
2015 if (!phba
->cfg_enable_hba_reset
)
2018 /* Check port status register for function reset */
2019 rc
= lpfc_sli4_port_sta_fn_reset(phba
, LPFC_MBX_NO_WAIT
,
2022 /* don't report event on forced debug dump */
2023 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2024 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
2029 /* fall through for not able to recover */
2030 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2031 "3152 Unrecoverable error\n");
2032 phba
->link_state
= LPFC_HBA_ERROR
;
2034 case LPFC_SLI_INTF_IF_TYPE_1
:
2038 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2039 "3123 Report dump event to upper layer\n");
2040 /* Send an internal error event to mgmt application */
2041 lpfc_board_errevt_to_mgmt(phba
);
2043 event_data
= FC_REG_DUMP_EVENT
;
2044 shost
= lpfc_shost_from_vport(vport
);
2045 fc_host_post_vendor_event(shost
, fc_get_event_number(),
2046 sizeof(event_data
), (char *) &event_data
,
2047 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
2051 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2052 * @phba: pointer to lpfc HBA data structure.
2054 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2055 * routine from the API jump table function pointer from the lpfc_hba struct.
2059 * Any other value - error.
2062 lpfc_handle_eratt(struct lpfc_hba
*phba
)
2064 (*phba
->lpfc_handle_eratt
)(phba
);
2068 * lpfc_handle_latt - The HBA link event handler
2069 * @phba: pointer to lpfc hba data structure.
2071 * This routine is invoked from the worker thread to handle a HBA host
2072 * attention link event. SLI3 only.
2075 lpfc_handle_latt(struct lpfc_hba
*phba
)
2077 struct lpfc_vport
*vport
= phba
->pport
;
2078 struct lpfc_sli
*psli
= &phba
->sli
;
2080 volatile uint32_t control
;
2081 struct lpfc_dmabuf
*mp
;
2084 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2087 goto lpfc_handle_latt_err_exit
;
2090 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2093 goto lpfc_handle_latt_free_pmb
;
2096 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
2099 goto lpfc_handle_latt_free_mp
;
2102 /* Cleanup any outstanding ELS commands */
2103 lpfc_els_flush_all_cmd(phba
);
2105 psli
->slistat
.link_event
++;
2106 lpfc_read_topology(phba
, pmb
, mp
);
2107 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
2109 /* Block ELS IOCBs until we have processed this mbox command */
2110 phba
->sli
.sli3_ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
2111 rc
= lpfc_sli_issue_mbox (phba
, pmb
, MBX_NOWAIT
);
2112 if (rc
== MBX_NOT_FINISHED
) {
2114 goto lpfc_handle_latt_free_mbuf
;
2117 /* Clear Link Attention in HA REG */
2118 spin_lock_irq(&phba
->hbalock
);
2119 writel(HA_LATT
, phba
->HAregaddr
);
2120 readl(phba
->HAregaddr
); /* flush */
2121 spin_unlock_irq(&phba
->hbalock
);
2125 lpfc_handle_latt_free_mbuf
:
2126 phba
->sli
.sli3_ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2127 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2128 lpfc_handle_latt_free_mp
:
2130 lpfc_handle_latt_free_pmb
:
2131 mempool_free(pmb
, phba
->mbox_mem_pool
);
2132 lpfc_handle_latt_err_exit
:
2133 /* Enable Link attention interrupts */
2134 spin_lock_irq(&phba
->hbalock
);
2135 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2136 control
= readl(phba
->HCregaddr
);
2137 control
|= HC_LAINT_ENA
;
2138 writel(control
, phba
->HCregaddr
);
2139 readl(phba
->HCregaddr
); /* flush */
2141 /* Clear Link Attention in HA REG */
2142 writel(HA_LATT
, phba
->HAregaddr
);
2143 readl(phba
->HAregaddr
); /* flush */
2144 spin_unlock_irq(&phba
->hbalock
);
2145 lpfc_linkdown(phba
);
2146 phba
->link_state
= LPFC_HBA_ERROR
;
2148 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2149 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc
);
2155 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2156 * @phba: pointer to lpfc hba data structure.
2157 * @vpd: pointer to the vital product data.
2158 * @len: length of the vital product data in bytes.
2160 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2161 * an array of characters. In this routine, the ModelName, ProgramType, and
2162 * ModelDesc, etc. fields of the phba data structure will be populated.
2165 * 0 - pointer to the VPD passed in is NULL
2169 lpfc_parse_vpd(struct lpfc_hba
*phba
, uint8_t *vpd
, int len
)
2171 uint8_t lenlo
, lenhi
;
2181 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
2182 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2183 (uint32_t) vpd
[0], (uint32_t) vpd
[1], (uint32_t) vpd
[2],
2185 while (!finished
&& (index
< (len
- 4))) {
2186 switch (vpd
[index
]) {
2194 i
= ((((unsigned short)lenhi
) << 8) + lenlo
);
2203 Length
= ((((unsigned short)lenhi
) << 8) + lenlo
);
2204 if (Length
> len
- index
)
2205 Length
= len
- index
;
2206 while (Length
> 0) {
2207 /* Look for Serial Number */
2208 if ((vpd
[index
] == 'S') && (vpd
[index
+1] == 'N')) {
2215 phba
->SerialNumber
[j
++] = vpd
[index
++];
2219 phba
->SerialNumber
[j
] = 0;
2222 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '1')) {
2223 phba
->vpd_flag
|= VPD_MODEL_DESC
;
2230 phba
->ModelDesc
[j
++] = vpd
[index
++];
2234 phba
->ModelDesc
[j
] = 0;
2237 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '2')) {
2238 phba
->vpd_flag
|= VPD_MODEL_NAME
;
2245 phba
->ModelName
[j
++] = vpd
[index
++];
2249 phba
->ModelName
[j
] = 0;
2252 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '3')) {
2253 phba
->vpd_flag
|= VPD_PROGRAM_TYPE
;
2260 phba
->ProgramType
[j
++] = vpd
[index
++];
2264 phba
->ProgramType
[j
] = 0;
2267 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '4')) {
2268 phba
->vpd_flag
|= VPD_PORT
;
2275 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2276 (phba
->sli4_hba
.pport_name_sta
==
2277 LPFC_SLI4_PPNAME_GET
)) {
2281 phba
->Port
[j
++] = vpd
[index
++];
2285 if ((phba
->sli_rev
!= LPFC_SLI_REV4
) ||
2286 (phba
->sli4_hba
.pport_name_sta
==
2287 LPFC_SLI4_PPNAME_NON
))
2314 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2315 * @phba: pointer to lpfc hba data structure.
2316 * @mdp: pointer to the data structure to hold the derived model name.
2317 * @descp: pointer to the data structure to hold the derived description.
2319 * This routine retrieves HBA's description based on its registered PCI device
2320 * ID. The @descp passed into this function points to an array of 256 chars. It
2321 * shall be returned with the model name, maximum speed, and the host bus type.
2322 * The @mdp passed into this function points to an array of 80 chars. When the
2323 * function returns, the @mdp will be filled with the model name.
2326 lpfc_get_hba_model_desc(struct lpfc_hba
*phba
, uint8_t *mdp
, uint8_t *descp
)
2329 uint16_t dev_id
= phba
->pcidev
->device
;
2332 int oneConnect
= 0; /* default is not a oneConnect */
2337 } m
= {"<Unknown>", "", ""};
2339 if (mdp
&& mdp
[0] != '\0'
2340 && descp
&& descp
[0] != '\0')
2343 if (phba
->lmt
& LMT_64Gb
)
2345 else if (phba
->lmt
& LMT_32Gb
)
2347 else if (phba
->lmt
& LMT_16Gb
)
2349 else if (phba
->lmt
& LMT_10Gb
)
2351 else if (phba
->lmt
& LMT_8Gb
)
2353 else if (phba
->lmt
& LMT_4Gb
)
2355 else if (phba
->lmt
& LMT_2Gb
)
2357 else if (phba
->lmt
& LMT_1Gb
)
2365 case PCI_DEVICE_ID_FIREFLY
:
2366 m
= (typeof(m
)){"LP6000", "PCI",
2367 "Obsolete, Unsupported Fibre Channel Adapter"};
2369 case PCI_DEVICE_ID_SUPERFLY
:
2370 if (vp
->rev
.biuRev
>= 1 && vp
->rev
.biuRev
<= 3)
2371 m
= (typeof(m
)){"LP7000", "PCI", ""};
2373 m
= (typeof(m
)){"LP7000E", "PCI", ""};
2374 m
.function
= "Obsolete, Unsupported Fibre Channel Adapter";
2376 case PCI_DEVICE_ID_DRAGONFLY
:
2377 m
= (typeof(m
)){"LP8000", "PCI",
2378 "Obsolete, Unsupported Fibre Channel Adapter"};
2380 case PCI_DEVICE_ID_CENTAUR
:
2381 if (FC_JEDEC_ID(vp
->rev
.biuRev
) == CENTAUR_2G_JEDEC_ID
)
2382 m
= (typeof(m
)){"LP9002", "PCI", ""};
2384 m
= (typeof(m
)){"LP9000", "PCI", ""};
2385 m
.function
= "Obsolete, Unsupported Fibre Channel Adapter";
2387 case PCI_DEVICE_ID_RFLY
:
2388 m
= (typeof(m
)){"LP952", "PCI",
2389 "Obsolete, Unsupported Fibre Channel Adapter"};
2391 case PCI_DEVICE_ID_PEGASUS
:
2392 m
= (typeof(m
)){"LP9802", "PCI-X",
2393 "Obsolete, Unsupported Fibre Channel Adapter"};
2395 case PCI_DEVICE_ID_THOR
:
2396 m
= (typeof(m
)){"LP10000", "PCI-X",
2397 "Obsolete, Unsupported Fibre Channel Adapter"};
2399 case PCI_DEVICE_ID_VIPER
:
2400 m
= (typeof(m
)){"LPX1000", "PCI-X",
2401 "Obsolete, Unsupported Fibre Channel Adapter"};
2403 case PCI_DEVICE_ID_PFLY
:
2404 m
= (typeof(m
)){"LP982", "PCI-X",
2405 "Obsolete, Unsupported Fibre Channel Adapter"};
2407 case PCI_DEVICE_ID_TFLY
:
2408 m
= (typeof(m
)){"LP1050", "PCI-X",
2409 "Obsolete, Unsupported Fibre Channel Adapter"};
2411 case PCI_DEVICE_ID_HELIOS
:
2412 m
= (typeof(m
)){"LP11000", "PCI-X2",
2413 "Obsolete, Unsupported Fibre Channel Adapter"};
2415 case PCI_DEVICE_ID_HELIOS_SCSP
:
2416 m
= (typeof(m
)){"LP11000-SP", "PCI-X2",
2417 "Obsolete, Unsupported Fibre Channel Adapter"};
2419 case PCI_DEVICE_ID_HELIOS_DCSP
:
2420 m
= (typeof(m
)){"LP11002-SP", "PCI-X2",
2421 "Obsolete, Unsupported Fibre Channel Adapter"};
2423 case PCI_DEVICE_ID_NEPTUNE
:
2424 m
= (typeof(m
)){"LPe1000", "PCIe",
2425 "Obsolete, Unsupported Fibre Channel Adapter"};
2427 case PCI_DEVICE_ID_NEPTUNE_SCSP
:
2428 m
= (typeof(m
)){"LPe1000-SP", "PCIe",
2429 "Obsolete, Unsupported Fibre Channel Adapter"};
2431 case PCI_DEVICE_ID_NEPTUNE_DCSP
:
2432 m
= (typeof(m
)){"LPe1002-SP", "PCIe",
2433 "Obsolete, Unsupported Fibre Channel Adapter"};
2435 case PCI_DEVICE_ID_BMID
:
2436 m
= (typeof(m
)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2438 case PCI_DEVICE_ID_BSMB
:
2439 m
= (typeof(m
)){"LP111", "PCI-X2",
2440 "Obsolete, Unsupported Fibre Channel Adapter"};
2442 case PCI_DEVICE_ID_ZEPHYR
:
2443 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2445 case PCI_DEVICE_ID_ZEPHYR_SCSP
:
2446 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2448 case PCI_DEVICE_ID_ZEPHYR_DCSP
:
2449 m
= (typeof(m
)){"LP2105", "PCIe", "FCoE Adapter"};
2452 case PCI_DEVICE_ID_ZMID
:
2453 m
= (typeof(m
)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2455 case PCI_DEVICE_ID_ZSMB
:
2456 m
= (typeof(m
)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2458 case PCI_DEVICE_ID_LP101
:
2459 m
= (typeof(m
)){"LP101", "PCI-X",
2460 "Obsolete, Unsupported Fibre Channel Adapter"};
2462 case PCI_DEVICE_ID_LP10000S
:
2463 m
= (typeof(m
)){"LP10000-S", "PCI",
2464 "Obsolete, Unsupported Fibre Channel Adapter"};
2466 case PCI_DEVICE_ID_LP11000S
:
2467 m
= (typeof(m
)){"LP11000-S", "PCI-X2",
2468 "Obsolete, Unsupported Fibre Channel Adapter"};
2470 case PCI_DEVICE_ID_LPE11000S
:
2471 m
= (typeof(m
)){"LPe11000-S", "PCIe",
2472 "Obsolete, Unsupported Fibre Channel Adapter"};
2474 case PCI_DEVICE_ID_SAT
:
2475 m
= (typeof(m
)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2477 case PCI_DEVICE_ID_SAT_MID
:
2478 m
= (typeof(m
)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2480 case PCI_DEVICE_ID_SAT_SMB
:
2481 m
= (typeof(m
)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2483 case PCI_DEVICE_ID_SAT_DCSP
:
2484 m
= (typeof(m
)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2486 case PCI_DEVICE_ID_SAT_SCSP
:
2487 m
= (typeof(m
)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2489 case PCI_DEVICE_ID_SAT_S
:
2490 m
= (typeof(m
)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2492 case PCI_DEVICE_ID_HORNET
:
2493 m
= (typeof(m
)){"LP21000", "PCIe",
2494 "Obsolete, Unsupported FCoE Adapter"};
2497 case PCI_DEVICE_ID_PROTEUS_VF
:
2498 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2499 "Obsolete, Unsupported Fibre Channel Adapter"};
2501 case PCI_DEVICE_ID_PROTEUS_PF
:
2502 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2503 "Obsolete, Unsupported Fibre Channel Adapter"};
2505 case PCI_DEVICE_ID_PROTEUS_S
:
2506 m
= (typeof(m
)){"LPemv12002-S", "PCIe IOV",
2507 "Obsolete, Unsupported Fibre Channel Adapter"};
2509 case PCI_DEVICE_ID_TIGERSHARK
:
2511 m
= (typeof(m
)){"OCe10100", "PCIe", "FCoE"};
2513 case PCI_DEVICE_ID_TOMCAT
:
2515 m
= (typeof(m
)){"OCe11100", "PCIe", "FCoE"};
2517 case PCI_DEVICE_ID_FALCON
:
2518 m
= (typeof(m
)){"LPSe12002-ML1-E", "PCIe",
2519 "EmulexSecure Fibre"};
2521 case PCI_DEVICE_ID_BALIUS
:
2522 m
= (typeof(m
)){"LPVe12002", "PCIe Shared I/O",
2523 "Obsolete, Unsupported Fibre Channel Adapter"};
2525 case PCI_DEVICE_ID_LANCER_FC
:
2526 m
= (typeof(m
)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2528 case PCI_DEVICE_ID_LANCER_FC_VF
:
2529 m
= (typeof(m
)){"LPe16000", "PCIe",
2530 "Obsolete, Unsupported Fibre Channel Adapter"};
2532 case PCI_DEVICE_ID_LANCER_FCOE
:
2534 m
= (typeof(m
)){"OCe15100", "PCIe", "FCoE"};
2536 case PCI_DEVICE_ID_LANCER_FCOE_VF
:
2538 m
= (typeof(m
)){"OCe15100", "PCIe",
2539 "Obsolete, Unsupported FCoE"};
2541 case PCI_DEVICE_ID_LANCER_G6_FC
:
2542 m
= (typeof(m
)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2544 case PCI_DEVICE_ID_LANCER_G7_FC
:
2545 m
= (typeof(m
)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2547 case PCI_DEVICE_ID_SKYHAWK
:
2548 case PCI_DEVICE_ID_SKYHAWK_VF
:
2550 m
= (typeof(m
)){"OCe14000", "PCIe", "FCoE"};
2553 m
= (typeof(m
)){"Unknown", "", ""};
2557 if (mdp
&& mdp
[0] == '\0')
2558 snprintf(mdp
, 79,"%s", m
.name
);
2560 * oneConnect hba requires special processing, they are all initiators
2561 * and we put the port number on the end
2563 if (descp
&& descp
[0] == '\0') {
2565 snprintf(descp
, 255,
2566 "Emulex OneConnect %s, %s Initiator %s",
2569 else if (max_speed
== 0)
2570 snprintf(descp
, 255,
2572 m
.name
, m
.bus
, m
.function
);
2574 snprintf(descp
, 255,
2575 "Emulex %s %d%s %s %s",
2576 m
.name
, max_speed
, (GE
) ? "GE" : "Gb",
2582 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2583 * @phba: pointer to lpfc hba data structure.
2584 * @pring: pointer to a IOCB ring.
2585 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2587 * This routine posts a given number of IOCBs with the associated DMA buffer
2588 * descriptors specified by the cnt argument to the given IOCB ring.
2591 * The number of IOCBs NOT able to be posted to the IOCB ring.
2594 lpfc_post_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
, int cnt
)
2597 struct lpfc_iocbq
*iocb
;
2598 struct lpfc_dmabuf
*mp1
, *mp2
;
2600 cnt
+= pring
->missbufcnt
;
2602 /* While there are buffers to post */
2604 /* Allocate buffer for command iocb */
2605 iocb
= lpfc_sli_get_iocbq(phba
);
2607 pring
->missbufcnt
= cnt
;
2612 /* 2 buffers can be posted per command */
2613 /* Allocate buffer to post */
2614 mp1
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2616 mp1
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &mp1
->phys
);
2617 if (!mp1
|| !mp1
->virt
) {
2619 lpfc_sli_release_iocbq(phba
, iocb
);
2620 pring
->missbufcnt
= cnt
;
2624 INIT_LIST_HEAD(&mp1
->list
);
2625 /* Allocate buffer to post */
2627 mp2
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2629 mp2
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
2631 if (!mp2
|| !mp2
->virt
) {
2633 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2635 lpfc_sli_release_iocbq(phba
, iocb
);
2636 pring
->missbufcnt
= cnt
;
2640 INIT_LIST_HEAD(&mp2
->list
);
2645 icmd
->un
.cont64
[0].addrHigh
= putPaddrHigh(mp1
->phys
);
2646 icmd
->un
.cont64
[0].addrLow
= putPaddrLow(mp1
->phys
);
2647 icmd
->un
.cont64
[0].tus
.f
.bdeSize
= FCELSSIZE
;
2648 icmd
->ulpBdeCount
= 1;
2651 icmd
->un
.cont64
[1].addrHigh
= putPaddrHigh(mp2
->phys
);
2652 icmd
->un
.cont64
[1].addrLow
= putPaddrLow(mp2
->phys
);
2653 icmd
->un
.cont64
[1].tus
.f
.bdeSize
= FCELSSIZE
;
2655 icmd
->ulpBdeCount
= 2;
2658 icmd
->ulpCommand
= CMD_QUE_RING_BUF64_CN
;
2661 if (lpfc_sli_issue_iocb(phba
, pring
->ringno
, iocb
, 0) ==
2663 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2667 lpfc_mbuf_free(phba
, mp2
->virt
, mp2
->phys
);
2671 lpfc_sli_release_iocbq(phba
, iocb
);
2672 pring
->missbufcnt
= cnt
;
2675 lpfc_sli_ringpostbuf_put(phba
, pring
, mp1
);
2677 lpfc_sli_ringpostbuf_put(phba
, pring
, mp2
);
2679 pring
->missbufcnt
= 0;
2684 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2685 * @phba: pointer to lpfc hba data structure.
2687 * This routine posts initial receive IOCB buffers to the ELS ring. The
2688 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2689 * set to 64 IOCBs. SLI3 only.
2692 * 0 - success (currently always success)
2695 lpfc_post_rcv_buf(struct lpfc_hba
*phba
)
2697 struct lpfc_sli
*psli
= &phba
->sli
;
2699 /* Ring 0, ELS / CT buffers */
2700 lpfc_post_buffer(phba
, &psli
->sli3_ring
[LPFC_ELS_RING
], LPFC_BUF_RING0
);
2701 /* Ring 2 - FCP no buffers needed */
2706 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2709 * lpfc_sha_init - Set up initial array of hash table entries
2710 * @HashResultPointer: pointer to an array as hash table.
2712 * This routine sets up the initial values to the array of hash table entries
2716 lpfc_sha_init(uint32_t * HashResultPointer
)
2718 HashResultPointer
[0] = 0x67452301;
2719 HashResultPointer
[1] = 0xEFCDAB89;
2720 HashResultPointer
[2] = 0x98BADCFE;
2721 HashResultPointer
[3] = 0x10325476;
2722 HashResultPointer
[4] = 0xC3D2E1F0;
2726 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2727 * @HashResultPointer: pointer to an initial/result hash table.
2728 * @HashWorkingPointer: pointer to an working hash table.
2730 * This routine iterates an initial hash table pointed by @HashResultPointer
2731 * with the values from the working hash table pointeed by @HashWorkingPointer.
2732 * The results are putting back to the initial hash table, returned through
2733 * the @HashResultPointer as the result hash table.
2736 lpfc_sha_iterate(uint32_t * HashResultPointer
, uint32_t * HashWorkingPointer
)
2740 uint32_t A
, B
, C
, D
, E
;
2743 HashWorkingPointer
[t
] =
2745 HashWorkingPointer
[t
- 3] ^ HashWorkingPointer
[t
-
2747 HashWorkingPointer
[t
- 14] ^ HashWorkingPointer
[t
- 16]);
2748 } while (++t
<= 79);
2750 A
= HashResultPointer
[0];
2751 B
= HashResultPointer
[1];
2752 C
= HashResultPointer
[2];
2753 D
= HashResultPointer
[3];
2754 E
= HashResultPointer
[4];
2758 TEMP
= ((B
& C
) | ((~B
) & D
)) + 0x5A827999;
2759 } else if (t
< 40) {
2760 TEMP
= (B
^ C
^ D
) + 0x6ED9EBA1;
2761 } else if (t
< 60) {
2762 TEMP
= ((B
& C
) | (B
& D
) | (C
& D
)) + 0x8F1BBCDC;
2764 TEMP
= (B
^ C
^ D
) + 0xCA62C1D6;
2766 TEMP
+= S(5, A
) + E
+ HashWorkingPointer
[t
];
2772 } while (++t
<= 79);
2774 HashResultPointer
[0] += A
;
2775 HashResultPointer
[1] += B
;
2776 HashResultPointer
[2] += C
;
2777 HashResultPointer
[3] += D
;
2778 HashResultPointer
[4] += E
;
2783 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2784 * @RandomChallenge: pointer to the entry of host challenge random number array.
2785 * @HashWorking: pointer to the entry of the working hash array.
2787 * This routine calculates the working hash array referred by @HashWorking
2788 * from the challenge random numbers associated with the host, referred by
2789 * @RandomChallenge. The result is put into the entry of the working hash
2790 * array and returned by reference through @HashWorking.
2793 lpfc_challenge_key(uint32_t * RandomChallenge
, uint32_t * HashWorking
)
2795 *HashWorking
= (*RandomChallenge
^ *HashWorking
);
2799 * lpfc_hba_init - Perform special handling for LC HBA initialization
2800 * @phba: pointer to lpfc hba data structure.
2801 * @hbainit: pointer to an array of unsigned 32-bit integers.
2803 * This routine performs the special handling for LC HBA initialization.
2806 lpfc_hba_init(struct lpfc_hba
*phba
, uint32_t *hbainit
)
2809 uint32_t *HashWorking
;
2810 uint32_t *pwwnn
= (uint32_t *) phba
->wwnn
;
2812 HashWorking
= kcalloc(80, sizeof(uint32_t), GFP_KERNEL
);
2816 HashWorking
[0] = HashWorking
[78] = *pwwnn
++;
2817 HashWorking
[1] = HashWorking
[79] = *pwwnn
;
2819 for (t
= 0; t
< 7; t
++)
2820 lpfc_challenge_key(phba
->RandomData
+ t
, HashWorking
+ t
);
2822 lpfc_sha_init(hbainit
);
2823 lpfc_sha_iterate(hbainit
, HashWorking
);
2828 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2829 * @vport: pointer to a virtual N_Port data structure.
2831 * This routine performs the necessary cleanups before deleting the @vport.
2832 * It invokes the discovery state machine to perform necessary state
2833 * transitions and to release the ndlps associated with the @vport. Note,
2834 * the physical port is treated as @vport 0.
2837 lpfc_cleanup(struct lpfc_vport
*vport
)
2839 struct lpfc_hba
*phba
= vport
->phba
;
2840 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2843 if (phba
->link_state
> LPFC_LINK_DOWN
)
2844 lpfc_port_link_failure(vport
);
2846 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
2847 if (vport
->port_type
!= LPFC_PHYSICAL_PORT
&&
2848 ndlp
->nlp_DID
== Fabric_DID
) {
2849 /* Just free up ndlp with Fabric_DID for vports */
2854 if (ndlp
->nlp_DID
== Fabric_Cntl_DID
&&
2855 ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
2860 /* Fabric Ports not in UNMAPPED state are cleaned up in the
2863 if (ndlp
->nlp_type
& NLP_FABRIC
&&
2864 ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
)
2865 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
2866 NLP_EVT_DEVICE_RECOVERY
);
2868 if (!(ndlp
->fc4_xpt_flags
& (NVME_XPT_REGD
|SCSI_XPT_REGD
)))
2869 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
2873 /* At this point, ALL ndlp's should be gone
2874 * because of the previous NLP_EVT_DEVICE_RM.
2875 * Lets wait for this to happen, if needed.
2877 while (!list_empty(&vport
->fc_nodes
)) {
2879 lpfc_printf_vlog(vport
, KERN_ERR
,
2881 "0233 Nodelist not empty\n");
2882 list_for_each_entry_safe(ndlp
, next_ndlp
,
2883 &vport
->fc_nodes
, nlp_listp
) {
2884 lpfc_printf_vlog(ndlp
->vport
, KERN_ERR
,
2886 "0282 did:x%x ndlp:x%px "
2887 "refcnt:%d xflags x%x nflag x%x\n",
2888 ndlp
->nlp_DID
, (void *)ndlp
,
2889 kref_read(&ndlp
->kref
),
2890 ndlp
->fc4_xpt_flags
,
2896 /* Wait for any activity on ndlps to settle */
2899 lpfc_cleanup_vports_rrqs(vport
, NULL
);
2903 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2904 * @vport: pointer to a virtual N_Port data structure.
2906 * This routine stops all the timers associated with a @vport. This function
2907 * is invoked before disabling or deleting a @vport. Note that the physical
2908 * port is treated as @vport 0.
2911 lpfc_stop_vport_timers(struct lpfc_vport
*vport
)
2913 del_timer_sync(&vport
->els_tmofunc
);
2914 del_timer_sync(&vport
->delayed_disc_tmo
);
2915 lpfc_can_disctmo(vport
);
2920 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2921 * @phba: pointer to lpfc hba data structure.
2923 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2924 * caller of this routine should already hold the host lock.
2927 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
2929 /* Clear pending FCF rediscovery wait flag */
2930 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
2932 /* Now, try to stop the timer */
2933 del_timer(&phba
->fcf
.redisc_wait
);
2937 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2938 * @phba: pointer to lpfc hba data structure.
2940 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2941 * checks whether the FCF rediscovery wait timer is pending with the host
2942 * lock held before proceeding with disabling the timer and clearing the
2943 * wait timer pendig flag.
2946 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
2948 spin_lock_irq(&phba
->hbalock
);
2949 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
2950 /* FCF rediscovery timer already fired or stopped */
2951 spin_unlock_irq(&phba
->hbalock
);
2954 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
2955 /* Clear failover in progress flags */
2956 phba
->fcf
.fcf_flag
&= ~(FCF_DEAD_DISC
| FCF_ACVL_DISC
);
2957 spin_unlock_irq(&phba
->hbalock
);
2961 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2962 * @phba: pointer to lpfc hba data structure.
2964 * This routine stops all the timers associated with a HBA. This function is
2965 * invoked before either putting a HBA offline or unloading the driver.
2968 lpfc_stop_hba_timers(struct lpfc_hba
*phba
)
2971 lpfc_stop_vport_timers(phba
->pport
);
2972 cancel_delayed_work_sync(&phba
->eq_delay_work
);
2973 cancel_delayed_work_sync(&phba
->idle_stat_delay_work
);
2974 del_timer_sync(&phba
->sli
.mbox_tmo
);
2975 del_timer_sync(&phba
->fabric_block_timer
);
2976 del_timer_sync(&phba
->eratt_poll
);
2977 del_timer_sync(&phba
->hb_tmofunc
);
2978 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2979 del_timer_sync(&phba
->rrq_tmr
);
2980 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
2982 phba
->hb_outstanding
= 0;
2984 switch (phba
->pci_dev_grp
) {
2985 case LPFC_PCI_DEV_LP
:
2986 /* Stop any LightPulse device specific driver timers */
2987 del_timer_sync(&phba
->fcp_poll_timer
);
2989 case LPFC_PCI_DEV_OC
:
2990 /* Stop any OneConnect device specific driver timers */
2991 lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
2994 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2995 "0297 Invalid device group (x%x)\n",
3003 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3004 * @phba: pointer to lpfc hba data structure.
3005 * @mbx_action: flag for mailbox no wait action.
3007 * This routine marks a HBA's management interface as blocked. Once the HBA's
3008 * management interface is marked as blocked, all the user space access to
3009 * the HBA, whether they are from sysfs interface or libdfc interface will
3010 * all be blocked. The HBA is set to block the management interface when the
3011 * driver prepares the HBA interface for online or offline.
3014 lpfc_block_mgmt_io(struct lpfc_hba
*phba
, int mbx_action
)
3016 unsigned long iflag
;
3017 uint8_t actcmd
= MBX_HEARTBEAT
;
3018 unsigned long timeout
;
3020 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3021 phba
->sli
.sli_flag
|= LPFC_BLOCK_MGMT_IO
;
3022 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3023 if (mbx_action
== LPFC_MBX_NO_WAIT
)
3025 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
3026 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3027 if (phba
->sli
.mbox_active
) {
3028 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
3029 /* Determine how long we might wait for the active mailbox
3030 * command to be gracefully completed by firmware.
3032 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
3033 phba
->sli
.mbox_active
) * 1000) + jiffies
;
3035 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3037 /* Wait for the outstnading mailbox command to complete */
3038 while (phba
->sli
.mbox_active
) {
3039 /* Check active mailbox complete status every 2ms */
3041 if (time_after(jiffies
, timeout
)) {
3042 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3043 "2813 Mgmt IO is Blocked %x "
3044 "- mbox cmd %x still active\n",
3045 phba
->sli
.sli_flag
, actcmd
);
3052 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3053 * @phba: pointer to lpfc hba data structure.
3055 * Allocate RPIs for all active remote nodes. This is needed whenever
3056 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3057 * is to fixup the temporary rpi assignments.
3060 lpfc_sli4_node_prep(struct lpfc_hba
*phba
)
3062 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3063 struct lpfc_vport
**vports
;
3066 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
3069 vports
= lpfc_create_vport_work_array(phba
);
3073 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3074 if (vports
[i
]->load_flag
& FC_UNLOADING
)
3077 list_for_each_entry_safe(ndlp
, next_ndlp
,
3078 &vports
[i
]->fc_nodes
,
3080 rpi
= lpfc_sli4_alloc_rpi(phba
);
3081 if (rpi
== LPFC_RPI_ALLOC_ERROR
) {
3082 /* TODO print log? */
3085 ndlp
->nlp_rpi
= rpi
;
3086 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
,
3087 LOG_NODE
| LOG_DISCOVERY
,
3088 "0009 Assign RPI x%x to ndlp x%px "
3089 "DID:x%06x flg:x%x\n",
3090 ndlp
->nlp_rpi
, ndlp
, ndlp
->nlp_DID
,
3094 lpfc_destroy_vport_work_array(phba
, vports
);
3098 * lpfc_create_expedite_pool - create expedite pool
3099 * @phba: pointer to lpfc hba data structure.
3101 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3102 * to expedite pool. Mark them as expedite.
3104 static void lpfc_create_expedite_pool(struct lpfc_hba
*phba
)
3106 struct lpfc_sli4_hdw_queue
*qp
;
3107 struct lpfc_io_buf
*lpfc_ncmd
;
3108 struct lpfc_io_buf
*lpfc_ncmd_next
;
3109 struct lpfc_epd_pool
*epd_pool
;
3110 unsigned long iflag
;
3112 epd_pool
= &phba
->epd_pool
;
3113 qp
= &phba
->sli4_hba
.hdwq
[0];
3115 spin_lock_init(&epd_pool
->lock
);
3116 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3117 spin_lock(&epd_pool
->lock
);
3118 INIT_LIST_HEAD(&epd_pool
->list
);
3119 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3120 &qp
->lpfc_io_buf_list_put
, list
) {
3121 list_move_tail(&lpfc_ncmd
->list
, &epd_pool
->list
);
3122 lpfc_ncmd
->expedite
= true;
3125 if (epd_pool
->count
>= XRI_BATCH
)
3128 spin_unlock(&epd_pool
->lock
);
3129 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3133 * lpfc_destroy_expedite_pool - destroy expedite pool
3134 * @phba: pointer to lpfc hba data structure.
3136 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3137 * of HWQ 0. Clear the mark.
3139 static void lpfc_destroy_expedite_pool(struct lpfc_hba
*phba
)
3141 struct lpfc_sli4_hdw_queue
*qp
;
3142 struct lpfc_io_buf
*lpfc_ncmd
;
3143 struct lpfc_io_buf
*lpfc_ncmd_next
;
3144 struct lpfc_epd_pool
*epd_pool
;
3145 unsigned long iflag
;
3147 epd_pool
= &phba
->epd_pool
;
3148 qp
= &phba
->sli4_hba
.hdwq
[0];
3150 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3151 spin_lock(&epd_pool
->lock
);
3152 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3153 &epd_pool
->list
, list
) {
3154 list_move_tail(&lpfc_ncmd
->list
,
3155 &qp
->lpfc_io_buf_list_put
);
3156 lpfc_ncmd
->flags
= false;
3160 spin_unlock(&epd_pool
->lock
);
3161 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3165 * lpfc_create_multixri_pools - create multi-XRI pools
3166 * @phba: pointer to lpfc hba data structure.
3168 * This routine initialize public, private per HWQ. Then, move XRIs from
3169 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3172 void lpfc_create_multixri_pools(struct lpfc_hba
*phba
)
3177 struct lpfc_io_buf
*lpfc_ncmd
;
3178 struct lpfc_io_buf
*lpfc_ncmd_next
;
3179 unsigned long iflag
;
3180 struct lpfc_sli4_hdw_queue
*qp
;
3181 struct lpfc_multixri_pool
*multixri_pool
;
3182 struct lpfc_pbl_pool
*pbl_pool
;
3183 struct lpfc_pvt_pool
*pvt_pool
;
3185 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3186 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3187 phba
->cfg_hdw_queue
, phba
->sli4_hba
.num_present_cpu
,
3188 phba
->sli4_hba
.io_xri_cnt
);
3190 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
3191 lpfc_create_expedite_pool(phba
);
3193 hwq_count
= phba
->cfg_hdw_queue
;
3194 count_per_hwq
= phba
->sli4_hba
.io_xri_cnt
/ hwq_count
;
3196 for (i
= 0; i
< hwq_count
; i
++) {
3197 multixri_pool
= kzalloc(sizeof(*multixri_pool
), GFP_KERNEL
);
3199 if (!multixri_pool
) {
3200 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3201 "1238 Failed to allocate memory for "
3204 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
3205 lpfc_destroy_expedite_pool(phba
);
3209 qp
= &phba
->sli4_hba
.hdwq
[j
];
3210 kfree(qp
->p_multixri_pool
);
3213 phba
->cfg_xri_rebalancing
= 0;
3217 qp
= &phba
->sli4_hba
.hdwq
[i
];
3218 qp
->p_multixri_pool
= multixri_pool
;
3220 multixri_pool
->xri_limit
= count_per_hwq
;
3221 multixri_pool
->rrb_next_hwqid
= i
;
3223 /* Deal with public free xri pool */
3224 pbl_pool
= &multixri_pool
->pbl_pool
;
3225 spin_lock_init(&pbl_pool
->lock
);
3226 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3227 spin_lock(&pbl_pool
->lock
);
3228 INIT_LIST_HEAD(&pbl_pool
->list
);
3229 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3230 &qp
->lpfc_io_buf_list_put
, list
) {
3231 list_move_tail(&lpfc_ncmd
->list
, &pbl_pool
->list
);
3235 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3236 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3237 pbl_pool
->count
, i
);
3238 spin_unlock(&pbl_pool
->lock
);
3239 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3241 /* Deal with private free xri pool */
3242 pvt_pool
= &multixri_pool
->pvt_pool
;
3243 pvt_pool
->high_watermark
= multixri_pool
->xri_limit
/ 2;
3244 pvt_pool
->low_watermark
= XRI_BATCH
;
3245 spin_lock_init(&pvt_pool
->lock
);
3246 spin_lock_irqsave(&pvt_pool
->lock
, iflag
);
3247 INIT_LIST_HEAD(&pvt_pool
->list
);
3248 pvt_pool
->count
= 0;
3249 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
3254 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3255 * @phba: pointer to lpfc hba data structure.
3257 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3259 static void lpfc_destroy_multixri_pools(struct lpfc_hba
*phba
)
3263 struct lpfc_io_buf
*lpfc_ncmd
;
3264 struct lpfc_io_buf
*lpfc_ncmd_next
;
3265 unsigned long iflag
;
3266 struct lpfc_sli4_hdw_queue
*qp
;
3267 struct lpfc_multixri_pool
*multixri_pool
;
3268 struct lpfc_pbl_pool
*pbl_pool
;
3269 struct lpfc_pvt_pool
*pvt_pool
;
3271 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
3272 lpfc_destroy_expedite_pool(phba
);
3274 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3275 lpfc_sli_flush_io_rings(phba
);
3277 hwq_count
= phba
->cfg_hdw_queue
;
3279 for (i
= 0; i
< hwq_count
; i
++) {
3280 qp
= &phba
->sli4_hba
.hdwq
[i
];
3281 multixri_pool
= qp
->p_multixri_pool
;
3285 qp
->p_multixri_pool
= NULL
;
3287 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3289 /* Deal with public free xri pool */
3290 pbl_pool
= &multixri_pool
->pbl_pool
;
3291 spin_lock(&pbl_pool
->lock
);
3293 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3294 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3295 pbl_pool
->count
, i
);
3297 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3298 &pbl_pool
->list
, list
) {
3299 list_move_tail(&lpfc_ncmd
->list
,
3300 &qp
->lpfc_io_buf_list_put
);
3305 INIT_LIST_HEAD(&pbl_pool
->list
);
3306 pbl_pool
->count
= 0;
3308 spin_unlock(&pbl_pool
->lock
);
3310 /* Deal with private free xri pool */
3311 pvt_pool
= &multixri_pool
->pvt_pool
;
3312 spin_lock(&pvt_pool
->lock
);
3314 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3315 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3316 pvt_pool
->count
, i
);
3318 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3319 &pvt_pool
->list
, list
) {
3320 list_move_tail(&lpfc_ncmd
->list
,
3321 &qp
->lpfc_io_buf_list_put
);
3326 INIT_LIST_HEAD(&pvt_pool
->list
);
3327 pvt_pool
->count
= 0;
3329 spin_unlock(&pvt_pool
->lock
);
3330 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3332 kfree(multixri_pool
);
3337 * lpfc_online - Initialize and bring a HBA online
3338 * @phba: pointer to lpfc hba data structure.
3340 * This routine initializes the HBA and brings a HBA online. During this
3341 * process, the management interface is blocked to prevent user space access
3342 * to the HBA interfering with the driver initialization.
3349 lpfc_online(struct lpfc_hba
*phba
)
3351 struct lpfc_vport
*vport
;
3352 struct lpfc_vport
**vports
;
3354 bool vpis_cleared
= false;
3358 vport
= phba
->pport
;
3360 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
))
3363 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3364 "0458 Bring Adapter online\n");
3366 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
3368 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3369 if (lpfc_sli4_hba_setup(phba
)) { /* Initialize SLI4 HBA */
3370 lpfc_unblock_mgmt_io(phba
);
3373 spin_lock_irq(&phba
->hbalock
);
3374 if (!phba
->sli4_hba
.max_cfg_param
.vpi_used
)
3375 vpis_cleared
= true;
3376 spin_unlock_irq(&phba
->hbalock
);
3378 /* Reestablish the local initiator port.
3379 * The offline process destroyed the previous lport.
3381 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
&&
3382 !phba
->nvmet_support
) {
3383 error
= lpfc_nvme_create_localport(phba
->pport
);
3385 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3386 "6132 NVME restore reg failed "
3387 "on nvmei error x%x\n", error
);
3390 lpfc_sli_queue_init(phba
);
3391 if (lpfc_sli_hba_setup(phba
)) { /* Initialize SLI2/SLI3 HBA */
3392 lpfc_unblock_mgmt_io(phba
);
3397 vports
= lpfc_create_vport_work_array(phba
);
3398 if (vports
!= NULL
) {
3399 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3400 struct Scsi_Host
*shost
;
3401 shost
= lpfc_shost_from_vport(vports
[i
]);
3402 spin_lock_irq(shost
->host_lock
);
3403 vports
[i
]->fc_flag
&= ~FC_OFFLINE_MODE
;
3404 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
3405 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3406 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3407 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
3408 if ((vpis_cleared
) &&
3409 (vports
[i
]->port_type
!=
3410 LPFC_PHYSICAL_PORT
))
3413 spin_unlock_irq(shost
->host_lock
);
3416 lpfc_destroy_vport_work_array(phba
, vports
);
3418 if (phba
->cfg_xri_rebalancing
)
3419 lpfc_create_multixri_pools(phba
);
3421 lpfc_cpuhp_add(phba
);
3423 lpfc_unblock_mgmt_io(phba
);
3428 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3429 * @phba: pointer to lpfc hba data structure.
3431 * This routine marks a HBA's management interface as not blocked. Once the
3432 * HBA's management interface is marked as not blocked, all the user space
3433 * access to the HBA, whether they are from sysfs interface or libdfc
3434 * interface will be allowed. The HBA is set to block the management interface
3435 * when the driver prepares the HBA interface for online or offline and then
3436 * set to unblock the management interface afterwards.
3439 lpfc_unblock_mgmt_io(struct lpfc_hba
* phba
)
3441 unsigned long iflag
;
3443 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3444 phba
->sli
.sli_flag
&= ~LPFC_BLOCK_MGMT_IO
;
3445 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3449 * lpfc_offline_prep - Prepare a HBA to be brought offline
3450 * @phba: pointer to lpfc hba data structure.
3451 * @mbx_action: flag for mailbox shutdown action.
3453 * This routine is invoked to prepare a HBA to be brought offline. It performs
3454 * unregistration login to all the nodes on all vports and flushes the mailbox
3455 * queue to make it ready to be brought offline.
3458 lpfc_offline_prep(struct lpfc_hba
*phba
, int mbx_action
)
3460 struct lpfc_vport
*vport
= phba
->pport
;
3461 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3462 struct lpfc_vport
**vports
;
3463 struct Scsi_Host
*shost
;
3466 if (vport
->fc_flag
& FC_OFFLINE_MODE
)
3469 lpfc_block_mgmt_io(phba
, mbx_action
);
3471 lpfc_linkdown(phba
);
3473 /* Issue an unreg_login to all nodes on all vports */
3474 vports
= lpfc_create_vport_work_array(phba
);
3475 if (vports
!= NULL
) {
3476 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3477 if (vports
[i
]->load_flag
& FC_UNLOADING
)
3479 shost
= lpfc_shost_from_vport(vports
[i
]);
3480 spin_lock_irq(shost
->host_lock
);
3481 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3482 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3483 vports
[i
]->fc_flag
&= ~FC_VFI_REGISTERED
;
3484 spin_unlock_irq(shost
->host_lock
);
3486 shost
= lpfc_shost_from_vport(vports
[i
]);
3487 list_for_each_entry_safe(ndlp
, next_ndlp
,
3488 &vports
[i
]->fc_nodes
,
3490 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
3491 /* Driver must assume RPI is invalid for
3492 * any unused or inactive node.
3494 ndlp
->nlp_rpi
= LPFC_RPI_ALLOC_ERROR
;
3498 spin_lock_irq(&ndlp
->lock
);
3499 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3500 spin_unlock_irq(&ndlp
->lock
);
3502 * Whenever an SLI4 port goes offline, free the
3503 * RPI. Get a new RPI when the adapter port
3504 * comes back online.
3506 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3507 lpfc_printf_vlog(vports
[i
], KERN_INFO
,
3508 LOG_NODE
| LOG_DISCOVERY
,
3509 "0011 Free RPI x%x on "
3510 "ndlp: %p did x%x\n",
3511 ndlp
->nlp_rpi
, ndlp
,
3513 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
3514 ndlp
->nlp_rpi
= LPFC_RPI_ALLOC_ERROR
;
3516 lpfc_unreg_rpi(vports
[i
], ndlp
);
3518 if (ndlp
->nlp_type
& NLP_FABRIC
) {
3519 lpfc_disc_state_machine(vports
[i
], ndlp
,
3520 NULL
, NLP_EVT_DEVICE_RECOVERY
);
3522 /* Don't remove the node unless the
3523 * has been unregistered with the
3524 * transport. If so, let dev_loss
3525 * take care of the node.
3527 if (!(ndlp
->fc4_xpt_flags
&
3528 (NVME_XPT_REGD
| SCSI_XPT_REGD
)))
3529 lpfc_disc_state_machine
3537 lpfc_destroy_vport_work_array(phba
, vports
);
3539 lpfc_sli_mbox_sys_shutdown(phba
, mbx_action
);
3542 flush_workqueue(phba
->wq
);
3546 * lpfc_offline - Bring a HBA offline
3547 * @phba: pointer to lpfc hba data structure.
3549 * This routine actually brings a HBA offline. It stops all the timers
3550 * associated with the HBA, brings down the SLI layer, and eventually
3551 * marks the HBA as in offline state for the upper layer protocol.
3554 lpfc_offline(struct lpfc_hba
*phba
)
3556 struct Scsi_Host
*shost
;
3557 struct lpfc_vport
**vports
;
3560 if (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
)
3563 /* stop port and all timers associated with this hba */
3564 lpfc_stop_port(phba
);
3566 /* Tear down the local and target port registrations. The
3567 * nvme transports need to cleanup.
3569 lpfc_nvmet_destroy_targetport(phba
);
3570 lpfc_nvme_destroy_localport(phba
->pport
);
3572 vports
= lpfc_create_vport_work_array(phba
);
3574 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
3575 lpfc_stop_vport_timers(vports
[i
]);
3576 lpfc_destroy_vport_work_array(phba
, vports
);
3577 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3578 "0460 Bring Adapter offline\n");
3579 /* Bring down the SLI Layer and cleanup. The HBA is offline
3581 lpfc_sli_hba_down(phba
);
3582 spin_lock_irq(&phba
->hbalock
);
3584 spin_unlock_irq(&phba
->hbalock
);
3585 vports
= lpfc_create_vport_work_array(phba
);
3587 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3588 shost
= lpfc_shost_from_vport(vports
[i
]);
3589 spin_lock_irq(shost
->host_lock
);
3590 vports
[i
]->work_port_events
= 0;
3591 vports
[i
]->fc_flag
|= FC_OFFLINE_MODE
;
3592 spin_unlock_irq(shost
->host_lock
);
3594 lpfc_destroy_vport_work_array(phba
, vports
);
3595 __lpfc_cpuhp_remove(phba
);
3597 if (phba
->cfg_xri_rebalancing
)
3598 lpfc_destroy_multixri_pools(phba
);
3602 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3603 * @phba: pointer to lpfc hba data structure.
3605 * This routine is to free all the SCSI buffers and IOCBs from the driver
3606 * list back to kernel. It is called from lpfc_pci_remove_one to free
3607 * the internal resources before the device is removed from the system.
3610 lpfc_scsi_free(struct lpfc_hba
*phba
)
3612 struct lpfc_io_buf
*sb
, *sb_next
;
3614 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
3617 spin_lock_irq(&phba
->hbalock
);
3619 /* Release all the lpfc_scsi_bufs maintained by this host. */
3621 spin_lock(&phba
->scsi_buf_list_put_lock
);
3622 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_put
,
3624 list_del(&sb
->list
);
3625 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
, sb
->data
,
3628 phba
->total_scsi_bufs
--;
3630 spin_unlock(&phba
->scsi_buf_list_put_lock
);
3632 spin_lock(&phba
->scsi_buf_list_get_lock
);
3633 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_get
,
3635 list_del(&sb
->list
);
3636 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
, sb
->data
,
3639 phba
->total_scsi_bufs
--;
3641 spin_unlock(&phba
->scsi_buf_list_get_lock
);
3642 spin_unlock_irq(&phba
->hbalock
);
3646 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3647 * @phba: pointer to lpfc hba data structure.
3649 * This routine is to free all the IO buffers and IOCBs from the driver
3650 * list back to kernel. It is called from lpfc_pci_remove_one to free
3651 * the internal resources before the device is removed from the system.
3654 lpfc_io_free(struct lpfc_hba
*phba
)
3656 struct lpfc_io_buf
*lpfc_ncmd
, *lpfc_ncmd_next
;
3657 struct lpfc_sli4_hdw_queue
*qp
;
3660 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
3661 qp
= &phba
->sli4_hba
.hdwq
[idx
];
3662 /* Release all the lpfc_nvme_bufs maintained by this host. */
3663 spin_lock(&qp
->io_buf_list_put_lock
);
3664 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3665 &qp
->lpfc_io_buf_list_put
,
3667 list_del(&lpfc_ncmd
->list
);
3669 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
3670 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
3671 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
3672 lpfc_put_sgl_per_hdwq(phba
, lpfc_ncmd
);
3673 lpfc_put_cmd_rsp_buf_per_hdwq(phba
, lpfc_ncmd
);
3675 qp
->total_io_bufs
--;
3677 spin_unlock(&qp
->io_buf_list_put_lock
);
3679 spin_lock(&qp
->io_buf_list_get_lock
);
3680 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3681 &qp
->lpfc_io_buf_list_get
,
3683 list_del(&lpfc_ncmd
->list
);
3685 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
3686 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
3687 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
3688 lpfc_put_sgl_per_hdwq(phba
, lpfc_ncmd
);
3689 lpfc_put_cmd_rsp_buf_per_hdwq(phba
, lpfc_ncmd
);
3691 qp
->total_io_bufs
--;
3693 spin_unlock(&qp
->io_buf_list_get_lock
);
3698 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3699 * @phba: pointer to lpfc hba data structure.
3701 * This routine first calculates the sizes of the current els and allocated
3702 * scsi sgl lists, and then goes through all sgls to updates the physical
3703 * XRIs assigned due to port function reset. During port initialization, the
3704 * current els and allocated scsi sgl lists are 0s.
3707 * 0 - successful (for now, it always returns 0)
3710 lpfc_sli4_els_sgl_update(struct lpfc_hba
*phba
)
3712 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_entry_next
= NULL
;
3713 uint16_t i
, lxri
, xri_cnt
, els_xri_cnt
;
3714 LIST_HEAD(els_sgl_list
);
3718 * update on pci function's els xri-sgl list
3720 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
3722 if (els_xri_cnt
> phba
->sli4_hba
.els_xri_cnt
) {
3723 /* els xri-sgl expanded */
3724 xri_cnt
= els_xri_cnt
- phba
->sli4_hba
.els_xri_cnt
;
3725 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3726 "3157 ELS xri-sgl count increased from "
3727 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
3729 /* allocate the additional els sgls */
3730 for (i
= 0; i
< xri_cnt
; i
++) {
3731 sglq_entry
= kzalloc(sizeof(struct lpfc_sglq
),
3733 if (sglq_entry
== NULL
) {
3734 lpfc_printf_log(phba
, KERN_ERR
,
3736 "2562 Failure to allocate an "
3737 "ELS sgl entry:%d\n", i
);
3741 sglq_entry
->buff_type
= GEN_BUFF_TYPE
;
3742 sglq_entry
->virt
= lpfc_mbuf_alloc(phba
, 0,
3744 if (sglq_entry
->virt
== NULL
) {
3746 lpfc_printf_log(phba
, KERN_ERR
,
3748 "2563 Failure to allocate an "
3749 "ELS mbuf:%d\n", i
);
3753 sglq_entry
->sgl
= sglq_entry
->virt
;
3754 memset(sglq_entry
->sgl
, 0, LPFC_BPL_SIZE
);
3755 sglq_entry
->state
= SGL_FREED
;
3756 list_add_tail(&sglq_entry
->list
, &els_sgl_list
);
3758 spin_lock_irq(&phba
->hbalock
);
3759 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
3760 list_splice_init(&els_sgl_list
,
3761 &phba
->sli4_hba
.lpfc_els_sgl_list
);
3762 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
3763 spin_unlock_irq(&phba
->hbalock
);
3764 } else if (els_xri_cnt
< phba
->sli4_hba
.els_xri_cnt
) {
3765 /* els xri-sgl shrinked */
3766 xri_cnt
= phba
->sli4_hba
.els_xri_cnt
- els_xri_cnt
;
3767 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3768 "3158 ELS xri-sgl count decreased from "
3769 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
3771 spin_lock_irq(&phba
->hbalock
);
3772 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
3773 list_splice_init(&phba
->sli4_hba
.lpfc_els_sgl_list
,
3775 /* release extra els sgls from list */
3776 for (i
= 0; i
< xri_cnt
; i
++) {
3777 list_remove_head(&els_sgl_list
,
3778 sglq_entry
, struct lpfc_sglq
, list
);
3780 __lpfc_mbuf_free(phba
, sglq_entry
->virt
,
3785 list_splice_init(&els_sgl_list
,
3786 &phba
->sli4_hba
.lpfc_els_sgl_list
);
3787 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
3788 spin_unlock_irq(&phba
->hbalock
);
3790 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3791 "3163 ELS xri-sgl count unchanged: %d\n",
3793 phba
->sli4_hba
.els_xri_cnt
= els_xri_cnt
;
3795 /* update xris to els sgls on the list */
3797 sglq_entry_next
= NULL
;
3798 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
3799 &phba
->sli4_hba
.lpfc_els_sgl_list
, list
) {
3800 lxri
= lpfc_sli4_next_xritag(phba
);
3801 if (lxri
== NO_XRI
) {
3802 lpfc_printf_log(phba
, KERN_ERR
,
3804 "2400 Failed to allocate xri for "
3809 sglq_entry
->sli4_lxritag
= lxri
;
3810 sglq_entry
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
3815 lpfc_free_els_sgl_list(phba
);
3820 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3821 * @phba: pointer to lpfc hba data structure.
3823 * This routine first calculates the sizes of the current els and allocated
3824 * scsi sgl lists, and then goes through all sgls to updates the physical
3825 * XRIs assigned due to port function reset. During port initialization, the
3826 * current els and allocated scsi sgl lists are 0s.
3829 * 0 - successful (for now, it always returns 0)
3832 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba
*phba
)
3834 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_entry_next
= NULL
;
3835 uint16_t i
, lxri
, xri_cnt
, els_xri_cnt
;
3836 uint16_t nvmet_xri_cnt
;
3837 LIST_HEAD(nvmet_sgl_list
);
3841 * update on pci function's nvmet xri-sgl list
3843 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
3845 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3846 nvmet_xri_cnt
= phba
->sli4_hba
.max_cfg_param
.max_xri
- els_xri_cnt
;
3847 if (nvmet_xri_cnt
> phba
->sli4_hba
.nvmet_xri_cnt
) {
3848 /* els xri-sgl expanded */
3849 xri_cnt
= nvmet_xri_cnt
- phba
->sli4_hba
.nvmet_xri_cnt
;
3850 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3851 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3852 phba
->sli4_hba
.nvmet_xri_cnt
, nvmet_xri_cnt
);
3853 /* allocate the additional nvmet sgls */
3854 for (i
= 0; i
< xri_cnt
; i
++) {
3855 sglq_entry
= kzalloc(sizeof(struct lpfc_sglq
),
3857 if (sglq_entry
== NULL
) {
3858 lpfc_printf_log(phba
, KERN_ERR
,
3860 "6303 Failure to allocate an "
3861 "NVMET sgl entry:%d\n", i
);
3865 sglq_entry
->buff_type
= NVMET_BUFF_TYPE
;
3866 sglq_entry
->virt
= lpfc_nvmet_buf_alloc(phba
, 0,
3868 if (sglq_entry
->virt
== NULL
) {
3870 lpfc_printf_log(phba
, KERN_ERR
,
3872 "6304 Failure to allocate an "
3873 "NVMET buf:%d\n", i
);
3877 sglq_entry
->sgl
= sglq_entry
->virt
;
3878 memset(sglq_entry
->sgl
, 0,
3879 phba
->cfg_sg_dma_buf_size
);
3880 sglq_entry
->state
= SGL_FREED
;
3881 list_add_tail(&sglq_entry
->list
, &nvmet_sgl_list
);
3883 spin_lock_irq(&phba
->hbalock
);
3884 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
3885 list_splice_init(&nvmet_sgl_list
,
3886 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
3887 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
3888 spin_unlock_irq(&phba
->hbalock
);
3889 } else if (nvmet_xri_cnt
< phba
->sli4_hba
.nvmet_xri_cnt
) {
3890 /* nvmet xri-sgl shrunk */
3891 xri_cnt
= phba
->sli4_hba
.nvmet_xri_cnt
- nvmet_xri_cnt
;
3892 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3893 "6305 NVMET xri-sgl count decreased from "
3894 "%d to %d\n", phba
->sli4_hba
.nvmet_xri_cnt
,
3896 spin_lock_irq(&phba
->hbalock
);
3897 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
3898 list_splice_init(&phba
->sli4_hba
.lpfc_nvmet_sgl_list
,
3900 /* release extra nvmet sgls from list */
3901 for (i
= 0; i
< xri_cnt
; i
++) {
3902 list_remove_head(&nvmet_sgl_list
,
3903 sglq_entry
, struct lpfc_sglq
, list
);
3905 lpfc_nvmet_buf_free(phba
, sglq_entry
->virt
,
3910 list_splice_init(&nvmet_sgl_list
,
3911 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
3912 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
3913 spin_unlock_irq(&phba
->hbalock
);
3915 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3916 "6306 NVMET xri-sgl count unchanged: %d\n",
3918 phba
->sli4_hba
.nvmet_xri_cnt
= nvmet_xri_cnt
;
3920 /* update xris to nvmet sgls on the list */
3922 sglq_entry_next
= NULL
;
3923 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
3924 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
, list
) {
3925 lxri
= lpfc_sli4_next_xritag(phba
);
3926 if (lxri
== NO_XRI
) {
3927 lpfc_printf_log(phba
, KERN_ERR
,
3929 "6307 Failed to allocate xri for "
3934 sglq_entry
->sli4_lxritag
= lxri
;
3935 sglq_entry
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
3940 lpfc_free_nvmet_sgl_list(phba
);
3945 lpfc_io_buf_flush(struct lpfc_hba
*phba
, struct list_head
*cbuf
)
3948 struct lpfc_sli4_hdw_queue
*qp
;
3949 struct lpfc_io_buf
*lpfc_cmd
;
3950 struct lpfc_io_buf
*iobufp
, *prev_iobufp
;
3951 int idx
, cnt
, xri
, inserted
;
3954 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
3955 qp
= &phba
->sli4_hba
.hdwq
[idx
];
3956 spin_lock_irq(&qp
->io_buf_list_get_lock
);
3957 spin_lock(&qp
->io_buf_list_put_lock
);
3959 /* Take everything off the get and put lists */
3960 list_splice_init(&qp
->lpfc_io_buf_list_get
, &blist
);
3961 list_splice(&qp
->lpfc_io_buf_list_put
, &blist
);
3962 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_get
);
3963 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_put
);
3964 cnt
+= qp
->get_io_bufs
+ qp
->put_io_bufs
;
3965 qp
->get_io_bufs
= 0;
3966 qp
->put_io_bufs
= 0;
3967 qp
->total_io_bufs
= 0;
3968 spin_unlock(&qp
->io_buf_list_put_lock
);
3969 spin_unlock_irq(&qp
->io_buf_list_get_lock
);
3973 * Take IO buffers off blist and put on cbuf sorted by XRI.
3974 * This is because POST_SGL takes a sequential range of XRIs
3975 * to post to the firmware.
3977 for (idx
= 0; idx
< cnt
; idx
++) {
3978 list_remove_head(&blist
, lpfc_cmd
, struct lpfc_io_buf
, list
);
3982 list_add_tail(&lpfc_cmd
->list
, cbuf
);
3985 xri
= lpfc_cmd
->cur_iocbq
.sli4_xritag
;
3988 list_for_each_entry(iobufp
, cbuf
, list
) {
3989 if (xri
< iobufp
->cur_iocbq
.sli4_xritag
) {
3991 list_add(&lpfc_cmd
->list
,
3992 &prev_iobufp
->list
);
3994 list_add(&lpfc_cmd
->list
, cbuf
);
3998 prev_iobufp
= iobufp
;
4001 list_add_tail(&lpfc_cmd
->list
, cbuf
);
4007 lpfc_io_buf_replenish(struct lpfc_hba
*phba
, struct list_head
*cbuf
)
4009 struct lpfc_sli4_hdw_queue
*qp
;
4010 struct lpfc_io_buf
*lpfc_cmd
;
4013 qp
= phba
->sli4_hba
.hdwq
;
4015 while (!list_empty(cbuf
)) {
4016 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
4017 list_remove_head(cbuf
, lpfc_cmd
,
4018 struct lpfc_io_buf
, list
);
4022 qp
= &phba
->sli4_hba
.hdwq
[idx
];
4023 lpfc_cmd
->hdwq_no
= idx
;
4024 lpfc_cmd
->hdwq
= qp
;
4025 lpfc_cmd
->cur_iocbq
.wqe_cmpl
= NULL
;
4026 lpfc_cmd
->cur_iocbq
.iocb_cmpl
= NULL
;
4027 spin_lock(&qp
->io_buf_list_put_lock
);
4028 list_add_tail(&lpfc_cmd
->list
,
4029 &qp
->lpfc_io_buf_list_put
);
4031 qp
->total_io_bufs
++;
4032 spin_unlock(&qp
->io_buf_list_put_lock
);
4039 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4040 * @phba: pointer to lpfc hba data structure.
4042 * This routine first calculates the sizes of the current els and allocated
4043 * scsi sgl lists, and then goes through all sgls to updates the physical
4044 * XRIs assigned due to port function reset. During port initialization, the
4045 * current els and allocated scsi sgl lists are 0s.
4048 * 0 - successful (for now, it always returns 0)
4051 lpfc_sli4_io_sgl_update(struct lpfc_hba
*phba
)
4053 struct lpfc_io_buf
*lpfc_ncmd
= NULL
, *lpfc_ncmd_next
= NULL
;
4054 uint16_t i
, lxri
, els_xri_cnt
;
4055 uint16_t io_xri_cnt
, io_xri_max
;
4056 LIST_HEAD(io_sgl_list
);
4060 * update on pci function's allocated nvme xri-sgl list
4063 /* maximum number of xris available for nvme buffers */
4064 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
4065 io_xri_max
= phba
->sli4_hba
.max_cfg_param
.max_xri
- els_xri_cnt
;
4066 phba
->sli4_hba
.io_xri_max
= io_xri_max
;
4068 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4069 "6074 Current allocated XRI sgl count:%d, "
4070 "maximum XRI count:%d\n",
4071 phba
->sli4_hba
.io_xri_cnt
,
4072 phba
->sli4_hba
.io_xri_max
);
4074 cnt
= lpfc_io_buf_flush(phba
, &io_sgl_list
);
4076 if (phba
->sli4_hba
.io_xri_cnt
> phba
->sli4_hba
.io_xri_max
) {
4077 /* max nvme xri shrunk below the allocated nvme buffers */
4078 io_xri_cnt
= phba
->sli4_hba
.io_xri_cnt
-
4079 phba
->sli4_hba
.io_xri_max
;
4080 /* release the extra allocated nvme buffers */
4081 for (i
= 0; i
< io_xri_cnt
; i
++) {
4082 list_remove_head(&io_sgl_list
, lpfc_ncmd
,
4083 struct lpfc_io_buf
, list
);
4085 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4087 lpfc_ncmd
->dma_handle
);
4091 phba
->sli4_hba
.io_xri_cnt
-= io_xri_cnt
;
4094 /* update xris associated to remaining allocated nvme buffers */
4096 lpfc_ncmd_next
= NULL
;
4097 phba
->sli4_hba
.io_xri_cnt
= cnt
;
4098 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
4099 &io_sgl_list
, list
) {
4100 lxri
= lpfc_sli4_next_xritag(phba
);
4101 if (lxri
== NO_XRI
) {
4102 lpfc_printf_log(phba
, KERN_ERR
,
4104 "6075 Failed to allocate xri for "
4109 lpfc_ncmd
->cur_iocbq
.sli4_lxritag
= lxri
;
4110 lpfc_ncmd
->cur_iocbq
.sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
4112 cnt
= lpfc_io_buf_replenish(phba
, &io_sgl_list
);
4121 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4122 * @phba: Pointer to lpfc hba data structure.
4123 * @num_to_alloc: The requested number of buffers to allocate.
4125 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4126 * the nvme buffer contains all the necessary information needed to initiate
4127 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4128 * them on a list, it post them to the port by using SGL block post.
4131 * int - number of IO buffers that were allocated and posted.
4132 * 0 = failure, less than num_to_alloc is a partial failure.
4135 lpfc_new_io_buf(struct lpfc_hba
*phba
, int num_to_alloc
)
4137 struct lpfc_io_buf
*lpfc_ncmd
;
4138 struct lpfc_iocbq
*pwqeq
;
4139 uint16_t iotag
, lxri
= 0;
4140 int bcnt
, num_posted
;
4141 LIST_HEAD(prep_nblist
);
4142 LIST_HEAD(post_nblist
);
4143 LIST_HEAD(nvme_nblist
);
4145 phba
->sli4_hba
.io_xri_cnt
= 0;
4146 for (bcnt
= 0; bcnt
< num_to_alloc
; bcnt
++) {
4147 lpfc_ncmd
= kzalloc(sizeof(*lpfc_ncmd
), GFP_KERNEL
);
4151 * Get memory from the pci pool to map the virt space to
4152 * pci bus space for an I/O. The DMA buffer includes the
4153 * number of SGE's necessary to support the sg_tablesize.
4155 lpfc_ncmd
->data
= dma_pool_zalloc(phba
->lpfc_sg_dma_buf_pool
,
4157 &lpfc_ncmd
->dma_handle
);
4158 if (!lpfc_ncmd
->data
) {
4163 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
) {
4164 INIT_LIST_HEAD(&lpfc_ncmd
->dma_sgl_xtra_list
);
4167 * 4K Page alignment is CRITICAL to BlockGuard, double
4170 if ((phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
4171 (((unsigned long)(lpfc_ncmd
->data
) &
4172 (unsigned long)(SLI4_PAGE_SIZE
- 1)) != 0)) {
4173 lpfc_printf_log(phba
, KERN_ERR
,
4175 "3369 Memory alignment err: "
4177 (unsigned long)lpfc_ncmd
->data
);
4178 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4180 lpfc_ncmd
->dma_handle
);
4186 INIT_LIST_HEAD(&lpfc_ncmd
->dma_cmd_rsp_list
);
4188 lxri
= lpfc_sli4_next_xritag(phba
);
4189 if (lxri
== NO_XRI
) {
4190 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4191 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
4195 pwqeq
= &lpfc_ncmd
->cur_iocbq
;
4197 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4198 iotag
= lpfc_sli_next_iotag(phba
, pwqeq
);
4200 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4201 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
4203 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4204 "6121 Failed to allocate IOTAG for"
4205 " XRI:0x%x\n", lxri
);
4206 lpfc_sli4_free_xri(phba
, lxri
);
4209 pwqeq
->sli4_lxritag
= lxri
;
4210 pwqeq
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
4211 pwqeq
->context1
= lpfc_ncmd
;
4213 /* Initialize local short-hand pointers. */
4214 lpfc_ncmd
->dma_sgl
= lpfc_ncmd
->data
;
4215 lpfc_ncmd
->dma_phys_sgl
= lpfc_ncmd
->dma_handle
;
4216 lpfc_ncmd
->cur_iocbq
.context1
= lpfc_ncmd
;
4217 spin_lock_init(&lpfc_ncmd
->buf_lock
);
4219 /* add the nvme buffer to a post list */
4220 list_add_tail(&lpfc_ncmd
->list
, &post_nblist
);
4221 phba
->sli4_hba
.io_xri_cnt
++;
4223 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
4224 "6114 Allocate %d out of %d requested new NVME "
4225 "buffers\n", bcnt
, num_to_alloc
);
4227 /* post the list of nvme buffer sgls to port if available */
4228 if (!list_empty(&post_nblist
))
4229 num_posted
= lpfc_sli4_post_io_sgl_list(
4230 phba
, &post_nblist
, bcnt
);
4238 lpfc_get_wwpn(struct lpfc_hba
*phba
)
4242 LPFC_MBOXQ_t
*mboxq
;
4245 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
4248 return (uint64_t)-1;
4250 /* First get WWN of HBA instance */
4251 lpfc_read_nv(phba
, mboxq
);
4252 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4253 if (rc
!= MBX_SUCCESS
) {
4254 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4255 "6019 Mailbox failed , mbxCmd x%x "
4256 "READ_NV, mbxStatus x%x\n",
4257 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
4258 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
4259 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4260 return (uint64_t) -1;
4263 memcpy(&wwn
, (char *)mb
->un
.varRDnvp
.portname
, sizeof(uint64_t));
4264 /* wwn is WWPN of HBA instance */
4265 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4266 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4267 return be64_to_cpu(wwn
);
4269 return rol64(wwn
, 32);
4273 * lpfc_create_port - Create an FC port
4274 * @phba: pointer to lpfc hba data structure.
4275 * @instance: a unique integer ID to this FC port.
4276 * @dev: pointer to the device data structure.
4278 * This routine creates a FC port for the upper layer protocol. The FC port
4279 * can be created on top of either a physical port or a virtual port provided
4280 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4281 * and associates the FC port created before adding the shost into the SCSI
4285 * @vport - pointer to the virtual N_Port data structure.
4286 * NULL - port create failed.
4289 lpfc_create_port(struct lpfc_hba
*phba
, int instance
, struct device
*dev
)
4291 struct lpfc_vport
*vport
;
4292 struct Scsi_Host
*shost
= NULL
;
4293 struct scsi_host_template
*template;
4297 bool use_no_reset_hba
= false;
4300 if (lpfc_no_hba_reset_cnt
) {
4301 if (phba
->sli_rev
< LPFC_SLI_REV4
&&
4302 dev
== &phba
->pcidev
->dev
) {
4303 /* Reset the port first */
4304 lpfc_sli_brdrestart(phba
);
4305 rc
= lpfc_sli_chipset_init(phba
);
4309 wwn
= lpfc_get_wwpn(phba
);
4312 for (i
= 0; i
< lpfc_no_hba_reset_cnt
; i
++) {
4313 if (wwn
== lpfc_no_hba_reset
[i
]) {
4314 lpfc_printf_log(phba
, KERN_ERR
,
4316 "6020 Setting use_no_reset port=%llx\n",
4318 use_no_reset_hba
= true;
4323 /* Seed template for SCSI host registration */
4324 if (dev
== &phba
->pcidev
->dev
) {
4325 template = &phba
->port_template
;
4327 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
) {
4328 /* Seed physical port template */
4329 memcpy(template, &lpfc_template
, sizeof(*template));
4331 if (use_no_reset_hba
)
4332 /* template is for a no reset SCSI Host */
4333 template->eh_host_reset_handler
= NULL
;
4335 /* Template for all vports this physical port creates */
4336 memcpy(&phba
->vport_template
, &lpfc_template
,
4338 phba
->vport_template
.shost_attrs
= lpfc_vport_attrs
;
4339 phba
->vport_template
.eh_bus_reset_handler
= NULL
;
4340 phba
->vport_template
.eh_host_reset_handler
= NULL
;
4341 phba
->vport_template
.vendor_id
= 0;
4343 /* Initialize the host templates with updated value */
4344 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4345 template->sg_tablesize
= phba
->cfg_scsi_seg_cnt
;
4346 phba
->vport_template
.sg_tablesize
=
4347 phba
->cfg_scsi_seg_cnt
;
4349 template->sg_tablesize
= phba
->cfg_sg_seg_cnt
;
4350 phba
->vport_template
.sg_tablesize
=
4351 phba
->cfg_sg_seg_cnt
;
4355 /* NVMET is for physical port only */
4356 memcpy(template, &lpfc_template_nvme
,
4360 template = &phba
->vport_template
;
4363 shost
= scsi_host_alloc(template, sizeof(struct lpfc_vport
));
4367 vport
= (struct lpfc_vport
*) shost
->hostdata
;
4369 vport
->load_flag
|= FC_LOADING
;
4370 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
4371 vport
->fc_rscn_flush
= 0;
4372 lpfc_get_vport_cfgparam(vport
);
4374 /* Adjust value in vport */
4375 vport
->cfg_enable_fc4_type
= phba
->cfg_enable_fc4_type
;
4377 shost
->unique_id
= instance
;
4378 shost
->max_id
= LPFC_MAX_TARGET
;
4379 shost
->max_lun
= vport
->cfg_max_luns
;
4380 shost
->this_id
= -1;
4381 shost
->max_cmd_len
= 16;
4383 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4384 if (!phba
->cfg_fcp_mq_threshold
||
4385 phba
->cfg_fcp_mq_threshold
> phba
->cfg_hdw_queue
)
4386 phba
->cfg_fcp_mq_threshold
= phba
->cfg_hdw_queue
;
4388 shost
->nr_hw_queues
= min_t(int, 2 * num_possible_nodes(),
4389 phba
->cfg_fcp_mq_threshold
);
4391 shost
->dma_boundary
=
4392 phba
->sli4_hba
.pc_sli4_params
.sge_supp_len
-1;
4394 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
4395 shost
->sg_tablesize
= LPFC_MAX_SG_TABLESIZE
;
4397 shost
->sg_tablesize
= phba
->cfg_scsi_seg_cnt
;
4399 /* SLI-3 has a limited number of hardware queues (3),
4400 * thus there is only one for FCP processing.
4402 shost
->nr_hw_queues
= 1;
4405 * Set initial can_queue value since 0 is no longer supported and
4406 * scsi_add_host will fail. This will be adjusted later based on the
4407 * max xri value determined in hba setup.
4409 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
4410 if (dev
!= &phba
->pcidev
->dev
) {
4411 shost
->transportt
= lpfc_vport_transport_template
;
4412 vport
->port_type
= LPFC_NPIV_PORT
;
4414 shost
->transportt
= lpfc_transport_template
;
4415 vport
->port_type
= LPFC_PHYSICAL_PORT
;
4418 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
4419 "9081 CreatePort TMPLATE type %x TBLsize %d "
4421 vport
->port_type
, shost
->sg_tablesize
,
4422 phba
->cfg_scsi_seg_cnt
, phba
->cfg_sg_seg_cnt
);
4424 /* Initialize all internally managed lists. */
4425 INIT_LIST_HEAD(&vport
->fc_nodes
);
4426 INIT_LIST_HEAD(&vport
->rcv_buffer_list
);
4427 spin_lock_init(&vport
->work_port_lock
);
4429 timer_setup(&vport
->fc_disctmo
, lpfc_disc_timeout
, 0);
4431 timer_setup(&vport
->els_tmofunc
, lpfc_els_timeout
, 0);
4433 timer_setup(&vport
->delayed_disc_tmo
, lpfc_delayed_disc_tmo
, 0);
4435 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
)
4436 lpfc_setup_bg(phba
, shost
);
4438 error
= scsi_add_host_with_dma(shost
, dev
, &phba
->pcidev
->dev
);
4442 spin_lock_irq(&phba
->port_list_lock
);
4443 list_add_tail(&vport
->listentry
, &phba
->port_list
);
4444 spin_unlock_irq(&phba
->port_list_lock
);
4448 scsi_host_put(shost
);
4454 * destroy_port - destroy an FC port
4455 * @vport: pointer to an lpfc virtual N_Port data structure.
4457 * This routine destroys a FC port from the upper layer protocol. All the
4458 * resources associated with the port are released.
4461 destroy_port(struct lpfc_vport
*vport
)
4463 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4464 struct lpfc_hba
*phba
= vport
->phba
;
4466 lpfc_debugfs_terminate(vport
);
4467 fc_remove_host(shost
);
4468 scsi_remove_host(shost
);
4470 spin_lock_irq(&phba
->port_list_lock
);
4471 list_del_init(&vport
->listentry
);
4472 spin_unlock_irq(&phba
->port_list_lock
);
4474 lpfc_cleanup(vport
);
4479 * lpfc_get_instance - Get a unique integer ID
4481 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4482 * uses the kernel idr facility to perform the task.
4485 * instance - a unique integer ID allocated as the new instance.
4486 * -1 - lpfc get instance failed.
4489 lpfc_get_instance(void)
4493 ret
= idr_alloc(&lpfc_hba_index
, NULL
, 0, 0, GFP_KERNEL
);
4494 return ret
< 0 ? -1 : ret
;
4498 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4499 * @shost: pointer to SCSI host data structure.
4500 * @time: elapsed time of the scan in jiffies.
4502 * This routine is called by the SCSI layer with a SCSI host to determine
4503 * whether the scan host is finished.
4505 * Note: there is no scan_start function as adapter initialization will have
4506 * asynchronously kicked off the link initialization.
4509 * 0 - SCSI host scan is not over yet.
4510 * 1 - SCSI host scan is over.
4512 int lpfc_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
4514 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
4515 struct lpfc_hba
*phba
= vport
->phba
;
4518 spin_lock_irq(shost
->host_lock
);
4520 if (vport
->load_flag
& FC_UNLOADING
) {
4524 if (time
>= msecs_to_jiffies(30 * 1000)) {
4525 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4526 "0461 Scanning longer than 30 "
4527 "seconds. Continuing initialization\n");
4531 if (time
>= msecs_to_jiffies(15 * 1000) &&
4532 phba
->link_state
<= LPFC_LINK_DOWN
) {
4533 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4534 "0465 Link down longer than 15 "
4535 "seconds. Continuing initialization\n");
4540 if (vport
->port_state
!= LPFC_VPORT_READY
)
4542 if (vport
->num_disc_nodes
|| vport
->fc_prli_sent
)
4544 if (vport
->fc_map_cnt
== 0 && time
< msecs_to_jiffies(2 * 1000))
4546 if ((phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) != 0)
4552 spin_unlock_irq(shost
->host_lock
);
4556 static void lpfc_host_supported_speeds_set(struct Scsi_Host
*shost
)
4558 struct lpfc_vport
*vport
= (struct lpfc_vport
*)shost
->hostdata
;
4559 struct lpfc_hba
*phba
= vport
->phba
;
4561 fc_host_supported_speeds(shost
) = 0;
4563 * Avoid reporting supported link speed for FCoE as it can't be
4564 * controlled via FCoE.
4566 if (phba
->hba_flag
& HBA_FCOE_MODE
)
4569 if (phba
->lmt
& LMT_128Gb
)
4570 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_128GBIT
;
4571 if (phba
->lmt
& LMT_64Gb
)
4572 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_64GBIT
;
4573 if (phba
->lmt
& LMT_32Gb
)
4574 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_32GBIT
;
4575 if (phba
->lmt
& LMT_16Gb
)
4576 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_16GBIT
;
4577 if (phba
->lmt
& LMT_10Gb
)
4578 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_10GBIT
;
4579 if (phba
->lmt
& LMT_8Gb
)
4580 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_8GBIT
;
4581 if (phba
->lmt
& LMT_4Gb
)
4582 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_4GBIT
;
4583 if (phba
->lmt
& LMT_2Gb
)
4584 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_2GBIT
;
4585 if (phba
->lmt
& LMT_1Gb
)
4586 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_1GBIT
;
4590 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4591 * @shost: pointer to SCSI host data structure.
4593 * This routine initializes a given SCSI host attributes on a FC port. The
4594 * SCSI host can be either on top of a physical port or a virtual port.
4596 void lpfc_host_attrib_init(struct Scsi_Host
*shost
)
4598 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
4599 struct lpfc_hba
*phba
= vport
->phba
;
4601 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
4604 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
4605 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
4606 fc_host_supported_classes(shost
) = FC_COS_CLASS3
;
4608 memset(fc_host_supported_fc4s(shost
), 0,
4609 sizeof(fc_host_supported_fc4s(shost
)));
4610 fc_host_supported_fc4s(shost
)[2] = 1;
4611 fc_host_supported_fc4s(shost
)[7] = 1;
4613 lpfc_vport_symbolic_node_name(vport
, fc_host_symbolic_name(shost
),
4614 sizeof fc_host_symbolic_name(shost
));
4616 lpfc_host_supported_speeds_set(shost
);
4618 fc_host_maxframe_size(shost
) =
4619 (((uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
4620 (uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeLsb
;
4622 fc_host_dev_loss_tmo(shost
) = vport
->cfg_devloss_tmo
;
4624 /* This value is also unchanging */
4625 memset(fc_host_active_fc4s(shost
), 0,
4626 sizeof(fc_host_active_fc4s(shost
)));
4627 fc_host_active_fc4s(shost
)[2] = 1;
4628 fc_host_active_fc4s(shost
)[7] = 1;
4630 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
4631 spin_lock_irq(shost
->host_lock
);
4632 vport
->load_flag
&= ~FC_LOADING
;
4633 spin_unlock_irq(shost
->host_lock
);
4637 * lpfc_stop_port_s3 - Stop SLI3 device port
4638 * @phba: pointer to lpfc hba data structure.
4640 * This routine is invoked to stop an SLI3 device port, it stops the device
4641 * from generating interrupts and stops the device driver's timers for the
4645 lpfc_stop_port_s3(struct lpfc_hba
*phba
)
4647 /* Clear all interrupt enable conditions */
4648 writel(0, phba
->HCregaddr
);
4649 readl(phba
->HCregaddr
); /* flush */
4650 /* Clear all pending interrupts */
4651 writel(0xffffffff, phba
->HAregaddr
);
4652 readl(phba
->HAregaddr
); /* flush */
4654 /* Reset some HBA SLI setup states */
4655 lpfc_stop_hba_timers(phba
);
4656 phba
->pport
->work_port_events
= 0;
4660 * lpfc_stop_port_s4 - Stop SLI4 device port
4661 * @phba: pointer to lpfc hba data structure.
4663 * This routine is invoked to stop an SLI4 device port, it stops the device
4664 * from generating interrupts and stops the device driver's timers for the
4668 lpfc_stop_port_s4(struct lpfc_hba
*phba
)
4670 /* Reset some HBA SLI4 setup states */
4671 lpfc_stop_hba_timers(phba
);
4673 phba
->pport
->work_port_events
= 0;
4674 phba
->sli4_hba
.intr_enable
= 0;
4678 * lpfc_stop_port - Wrapper function for stopping hba port
4679 * @phba: Pointer to HBA context object.
4681 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4682 * the API jump table function pointer from the lpfc_hba struct.
4685 lpfc_stop_port(struct lpfc_hba
*phba
)
4687 phba
->lpfc_stop_port(phba
);
4690 flush_workqueue(phba
->wq
);
4694 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4695 * @phba: Pointer to hba for which this call is being executed.
4697 * This routine starts the timer waiting for the FCF rediscovery to complete.
4700 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba
*phba
)
4702 unsigned long fcf_redisc_wait_tmo
=
4703 (jiffies
+ msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO
));
4704 /* Start fcf rediscovery wait period timer */
4705 mod_timer(&phba
->fcf
.redisc_wait
, fcf_redisc_wait_tmo
);
4706 spin_lock_irq(&phba
->hbalock
);
4707 /* Allow action to new fcf asynchronous event */
4708 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
4709 /* Mark the FCF rediscovery pending state */
4710 phba
->fcf
.fcf_flag
|= FCF_REDISC_PEND
;
4711 spin_unlock_irq(&phba
->hbalock
);
4715 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4716 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4718 * This routine is invoked when waiting for FCF table rediscover has been
4719 * timed out. If new FCF record(s) has (have) been discovered during the
4720 * wait period, a new FCF event shall be added to the FCOE async event
4721 * list, and then worker thread shall be waked up for processing from the
4722 * worker thread context.
4725 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list
*t
)
4727 struct lpfc_hba
*phba
= from_timer(phba
, t
, fcf
.redisc_wait
);
4729 /* Don't send FCF rediscovery event if timer cancelled */
4730 spin_lock_irq(&phba
->hbalock
);
4731 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
4732 spin_unlock_irq(&phba
->hbalock
);
4735 /* Clear FCF rediscovery timer pending flag */
4736 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
4737 /* FCF rediscovery event to worker thread */
4738 phba
->fcf
.fcf_flag
|= FCF_REDISC_EVT
;
4739 spin_unlock_irq(&phba
->hbalock
);
4740 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
4741 "2776 FCF rediscover quiescent timer expired\n");
4742 /* wake up worker thread */
4743 lpfc_worker_wake_up(phba
);
4747 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4748 * @phba: pointer to lpfc hba data structure.
4749 * @acqe_link: pointer to the async link completion queue entry.
4751 * This routine is to parse the SLI4 link-attention link fault code.
4754 lpfc_sli4_parse_latt_fault(struct lpfc_hba
*phba
,
4755 struct lpfc_acqe_link
*acqe_link
)
4757 switch (bf_get(lpfc_acqe_link_fault
, acqe_link
)) {
4758 case LPFC_ASYNC_LINK_FAULT_NONE
:
4759 case LPFC_ASYNC_LINK_FAULT_LOCAL
:
4760 case LPFC_ASYNC_LINK_FAULT_REMOTE
:
4761 case LPFC_ASYNC_LINK_FAULT_LR_LRR
:
4764 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4765 "0398 Unknown link fault code: x%x\n",
4766 bf_get(lpfc_acqe_link_fault
, acqe_link
));
4772 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4773 * @phba: pointer to lpfc hba data structure.
4774 * @acqe_link: pointer to the async link completion queue entry.
4776 * This routine is to parse the SLI4 link attention type and translate it
4777 * into the base driver's link attention type coding.
4779 * Return: Link attention type in terms of base driver's coding.
4782 lpfc_sli4_parse_latt_type(struct lpfc_hba
*phba
,
4783 struct lpfc_acqe_link
*acqe_link
)
4787 switch (bf_get(lpfc_acqe_link_status
, acqe_link
)) {
4788 case LPFC_ASYNC_LINK_STATUS_DOWN
:
4789 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN
:
4790 att_type
= LPFC_ATT_LINK_DOWN
;
4792 case LPFC_ASYNC_LINK_STATUS_UP
:
4793 /* Ignore physical link up events - wait for logical link up */
4794 att_type
= LPFC_ATT_RESERVED
;
4796 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP
:
4797 att_type
= LPFC_ATT_LINK_UP
;
4800 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4801 "0399 Invalid link attention type: x%x\n",
4802 bf_get(lpfc_acqe_link_status
, acqe_link
));
4803 att_type
= LPFC_ATT_RESERVED
;
4810 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4811 * @phba: pointer to lpfc hba data structure.
4813 * This routine is to get an SLI3 FC port's link speed in Mbps.
4815 * Return: link speed in terms of Mbps.
4818 lpfc_sli_port_speed_get(struct lpfc_hba
*phba
)
4820 uint32_t link_speed
;
4822 if (!lpfc_is_link_up(phba
))
4825 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
4826 switch (phba
->fc_linkspeed
) {
4827 case LPFC_LINK_SPEED_1GHZ
:
4830 case LPFC_LINK_SPEED_2GHZ
:
4833 case LPFC_LINK_SPEED_4GHZ
:
4836 case LPFC_LINK_SPEED_8GHZ
:
4839 case LPFC_LINK_SPEED_10GHZ
:
4842 case LPFC_LINK_SPEED_16GHZ
:
4849 if (phba
->sli4_hba
.link_state
.logical_speed
)
4851 phba
->sli4_hba
.link_state
.logical_speed
;
4853 link_speed
= phba
->sli4_hba
.link_state
.speed
;
4859 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4860 * @phba: pointer to lpfc hba data structure.
4861 * @evt_code: asynchronous event code.
4862 * @speed_code: asynchronous event link speed code.
4864 * This routine is to parse the giving SLI4 async event link speed code into
4865 * value of Mbps for the link speed.
4867 * Return: link speed in terms of Mbps.
4870 lpfc_sli4_port_speed_parse(struct lpfc_hba
*phba
, uint32_t evt_code
,
4873 uint32_t port_speed
;
4876 case LPFC_TRAILER_CODE_LINK
:
4877 switch (speed_code
) {
4878 case LPFC_ASYNC_LINK_SPEED_ZERO
:
4881 case LPFC_ASYNC_LINK_SPEED_10MBPS
:
4884 case LPFC_ASYNC_LINK_SPEED_100MBPS
:
4887 case LPFC_ASYNC_LINK_SPEED_1GBPS
:
4890 case LPFC_ASYNC_LINK_SPEED_10GBPS
:
4893 case LPFC_ASYNC_LINK_SPEED_20GBPS
:
4896 case LPFC_ASYNC_LINK_SPEED_25GBPS
:
4899 case LPFC_ASYNC_LINK_SPEED_40GBPS
:
4902 case LPFC_ASYNC_LINK_SPEED_100GBPS
:
4903 port_speed
= 100000;
4909 case LPFC_TRAILER_CODE_FC
:
4910 switch (speed_code
) {
4911 case LPFC_FC_LA_SPEED_UNKNOWN
:
4914 case LPFC_FC_LA_SPEED_1G
:
4917 case LPFC_FC_LA_SPEED_2G
:
4920 case LPFC_FC_LA_SPEED_4G
:
4923 case LPFC_FC_LA_SPEED_8G
:
4926 case LPFC_FC_LA_SPEED_10G
:
4929 case LPFC_FC_LA_SPEED_16G
:
4932 case LPFC_FC_LA_SPEED_32G
:
4935 case LPFC_FC_LA_SPEED_64G
:
4938 case LPFC_FC_LA_SPEED_128G
:
4939 port_speed
= 128000;
4952 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4953 * @phba: pointer to lpfc hba data structure.
4954 * @acqe_link: pointer to the async link completion queue entry.
4956 * This routine is to handle the SLI4 asynchronous FCoE link event.
4959 lpfc_sli4_async_link_evt(struct lpfc_hba
*phba
,
4960 struct lpfc_acqe_link
*acqe_link
)
4962 struct lpfc_dmabuf
*mp
;
4965 struct lpfc_mbx_read_top
*la
;
4969 att_type
= lpfc_sli4_parse_latt_type(phba
, acqe_link
);
4970 if (att_type
!= LPFC_ATT_LINK_DOWN
&& att_type
!= LPFC_ATT_LINK_UP
)
4972 phba
->fcoe_eventtag
= acqe_link
->event_tag
;
4973 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4975 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4976 "0395 The mboxq allocation failed\n");
4979 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
4981 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4982 "0396 The lpfc_dmabuf allocation failed\n");
4985 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
4987 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4988 "0397 The mbuf allocation failed\n");
4989 goto out_free_dmabuf
;
4992 /* Cleanup any outstanding ELS commands */
4993 lpfc_els_flush_all_cmd(phba
);
4995 /* Block ELS IOCBs until we have done process link event */
4996 phba
->sli4_hba
.els_wq
->pring
->flag
|= LPFC_STOP_IOCB_EVENT
;
4998 /* Update link event statistics */
4999 phba
->sli
.slistat
.link_event
++;
5001 /* Create lpfc_handle_latt mailbox command from link ACQE */
5002 lpfc_read_topology(phba
, pmb
, mp
);
5003 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
5004 pmb
->vport
= phba
->pport
;
5006 /* Keep the link status for extra SLI4 state machine reference */
5007 phba
->sli4_hba
.link_state
.speed
=
5008 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_LINK
,
5009 bf_get(lpfc_acqe_link_speed
, acqe_link
));
5010 phba
->sli4_hba
.link_state
.duplex
=
5011 bf_get(lpfc_acqe_link_duplex
, acqe_link
);
5012 phba
->sli4_hba
.link_state
.status
=
5013 bf_get(lpfc_acqe_link_status
, acqe_link
);
5014 phba
->sli4_hba
.link_state
.type
=
5015 bf_get(lpfc_acqe_link_type
, acqe_link
);
5016 phba
->sli4_hba
.link_state
.number
=
5017 bf_get(lpfc_acqe_link_number
, acqe_link
);
5018 phba
->sli4_hba
.link_state
.fault
=
5019 bf_get(lpfc_acqe_link_fault
, acqe_link
);
5020 phba
->sli4_hba
.link_state
.logical_speed
=
5021 bf_get(lpfc_acqe_logical_link_speed
, acqe_link
) * 10;
5023 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5024 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5025 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5026 "Logical speed:%dMbps Fault:%d\n",
5027 phba
->sli4_hba
.link_state
.speed
,
5028 phba
->sli4_hba
.link_state
.topology
,
5029 phba
->sli4_hba
.link_state
.status
,
5030 phba
->sli4_hba
.link_state
.type
,
5031 phba
->sli4_hba
.link_state
.number
,
5032 phba
->sli4_hba
.link_state
.logical_speed
,
5033 phba
->sli4_hba
.link_state
.fault
);
5035 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5036 * topology info. Note: Optional for non FC-AL ports.
5038 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
5039 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
5040 if (rc
== MBX_NOT_FINISHED
)
5041 goto out_free_dmabuf
;
5045 * For FCoE Mode: fill in all the topology information we need and call
5046 * the READ_TOPOLOGY completion routine to continue without actually
5047 * sending the READ_TOPOLOGY mailbox command to the port.
5049 /* Initialize completion status */
5051 mb
->mbxStatus
= MBX_SUCCESS
;
5053 /* Parse port fault information field */
5054 lpfc_sli4_parse_latt_fault(phba
, acqe_link
);
5056 /* Parse and translate link attention fields */
5057 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
5058 la
->eventTag
= acqe_link
->event_tag
;
5059 bf_set(lpfc_mbx_read_top_att_type
, la
, att_type
);
5060 bf_set(lpfc_mbx_read_top_link_spd
, la
,
5061 (bf_get(lpfc_acqe_link_speed
, acqe_link
)));
5063 /* Fake the the following irrelvant fields */
5064 bf_set(lpfc_mbx_read_top_topology
, la
, LPFC_TOPOLOGY_PT_PT
);
5065 bf_set(lpfc_mbx_read_top_alpa_granted
, la
, 0);
5066 bf_set(lpfc_mbx_read_top_il
, la
, 0);
5067 bf_set(lpfc_mbx_read_top_pb
, la
, 0);
5068 bf_set(lpfc_mbx_read_top_fa
, la
, 0);
5069 bf_set(lpfc_mbx_read_top_mm
, la
, 0);
5071 /* Invoke the lpfc_handle_latt mailbox command callback function */
5072 lpfc_mbx_cmpl_read_topology(phba
, pmb
);
5079 mempool_free(pmb
, phba
->mbox_mem_pool
);
5083 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5085 * @phba: pointer to lpfc hba data structure.
5086 * @speed_code: asynchronous event link speed code.
5088 * This routine is to parse the giving SLI4 async event link speed code into
5089 * value of Read topology link speed.
5091 * Return: link speed in terms of Read topology.
5094 lpfc_async_link_speed_to_read_top(struct lpfc_hba
*phba
, uint8_t speed_code
)
5098 switch (speed_code
) {
5099 case LPFC_FC_LA_SPEED_1G
:
5100 port_speed
= LPFC_LINK_SPEED_1GHZ
;
5102 case LPFC_FC_LA_SPEED_2G
:
5103 port_speed
= LPFC_LINK_SPEED_2GHZ
;
5105 case LPFC_FC_LA_SPEED_4G
:
5106 port_speed
= LPFC_LINK_SPEED_4GHZ
;
5108 case LPFC_FC_LA_SPEED_8G
:
5109 port_speed
= LPFC_LINK_SPEED_8GHZ
;
5111 case LPFC_FC_LA_SPEED_16G
:
5112 port_speed
= LPFC_LINK_SPEED_16GHZ
;
5114 case LPFC_FC_LA_SPEED_32G
:
5115 port_speed
= LPFC_LINK_SPEED_32GHZ
;
5117 case LPFC_FC_LA_SPEED_64G
:
5118 port_speed
= LPFC_LINK_SPEED_64GHZ
;
5120 case LPFC_FC_LA_SPEED_128G
:
5121 port_speed
= LPFC_LINK_SPEED_128GHZ
;
5123 case LPFC_FC_LA_SPEED_256G
:
5124 port_speed
= LPFC_LINK_SPEED_256GHZ
;
5134 #define trunk_link_status(__idx)\
5135 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5136 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5137 "Link up" : "Link down") : "NA"
5138 /* Did port __idx reported an error */
5139 #define trunk_port_fault(__idx)\
5140 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5141 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5144 lpfc_update_trunk_link_status(struct lpfc_hba
*phba
,
5145 struct lpfc_acqe_fc_la
*acqe_fc
)
5147 uint8_t port_fault
= bf_get(lpfc_acqe_fc_la_trunk_linkmask
, acqe_fc
);
5148 uint8_t err
= bf_get(lpfc_acqe_fc_la_trunk_fault
, acqe_fc
);
5150 phba
->sli4_hba
.link_state
.speed
=
5151 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_FC
,
5152 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
5154 phba
->sli4_hba
.link_state
.logical_speed
=
5155 bf_get(lpfc_acqe_fc_la_llink_spd
, acqe_fc
) * 10;
5156 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5157 phba
->fc_linkspeed
=
5158 lpfc_async_link_speed_to_read_top(
5160 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
5162 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0
, acqe_fc
)) {
5163 phba
->trunk_link
.link0
.state
=
5164 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0
, acqe_fc
)
5165 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
5166 phba
->trunk_link
.link0
.fault
= port_fault
& 0x1 ? err
: 0;
5168 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1
, acqe_fc
)) {
5169 phba
->trunk_link
.link1
.state
=
5170 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1
, acqe_fc
)
5171 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
5172 phba
->trunk_link
.link1
.fault
= port_fault
& 0x2 ? err
: 0;
5174 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2
, acqe_fc
)) {
5175 phba
->trunk_link
.link2
.state
=
5176 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2
, acqe_fc
)
5177 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
5178 phba
->trunk_link
.link2
.fault
= port_fault
& 0x4 ? err
: 0;
5180 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3
, acqe_fc
)) {
5181 phba
->trunk_link
.link3
.state
=
5182 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3
, acqe_fc
)
5183 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
5184 phba
->trunk_link
.link3
.fault
= port_fault
& 0x8 ? err
: 0;
5187 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5188 "2910 Async FC Trunking Event - Speed:%d\n"
5189 "\tLogical speed:%d "
5190 "port0: %s port1: %s port2: %s port3: %s\n",
5191 phba
->sli4_hba
.link_state
.speed
,
5192 phba
->sli4_hba
.link_state
.logical_speed
,
5193 trunk_link_status(0), trunk_link_status(1),
5194 trunk_link_status(2), trunk_link_status(3));
5197 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5198 "3202 trunk error:0x%x (%s) seen on port0:%s "
5200 * SLI-4: We have only 0xA error codes
5201 * defined as of now. print an appropriate
5202 * message in case driver needs to be updated.
5204 "port1:%s port2:%s port3:%s\n", err
, err
> 0xA ?
5205 "UNDEFINED. update driver." : trunk_errmsg
[err
],
5206 trunk_port_fault(0), trunk_port_fault(1),
5207 trunk_port_fault(2), trunk_port_fault(3));
5212 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5213 * @phba: pointer to lpfc hba data structure.
5214 * @acqe_fc: pointer to the async fc completion queue entry.
5216 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5217 * that the event was received and then issue a read_topology mailbox command so
5218 * that the rest of the driver will treat it the same as SLI3.
5221 lpfc_sli4_async_fc_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_fc_la
*acqe_fc
)
5223 struct lpfc_dmabuf
*mp
;
5226 struct lpfc_mbx_read_top
*la
;
5229 if (bf_get(lpfc_trailer_type
, acqe_fc
) !=
5230 LPFC_FC_LA_EVENT_TYPE_FC_LINK
) {
5231 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5232 "2895 Non FC link Event detected.(%d)\n",
5233 bf_get(lpfc_trailer_type
, acqe_fc
));
5237 if (bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
) ==
5238 LPFC_FC_LA_TYPE_TRUNKING_EVENT
) {
5239 lpfc_update_trunk_link_status(phba
, acqe_fc
);
5243 /* Keep the link status for extra SLI4 state machine reference */
5244 phba
->sli4_hba
.link_state
.speed
=
5245 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_FC
,
5246 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
5247 phba
->sli4_hba
.link_state
.duplex
= LPFC_ASYNC_LINK_DUPLEX_FULL
;
5248 phba
->sli4_hba
.link_state
.topology
=
5249 bf_get(lpfc_acqe_fc_la_topology
, acqe_fc
);
5250 phba
->sli4_hba
.link_state
.status
=
5251 bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
);
5252 phba
->sli4_hba
.link_state
.type
=
5253 bf_get(lpfc_acqe_fc_la_port_type
, acqe_fc
);
5254 phba
->sli4_hba
.link_state
.number
=
5255 bf_get(lpfc_acqe_fc_la_port_number
, acqe_fc
);
5256 phba
->sli4_hba
.link_state
.fault
=
5257 bf_get(lpfc_acqe_link_fault
, acqe_fc
);
5259 if (bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
) ==
5260 LPFC_FC_LA_TYPE_LINK_DOWN
)
5261 phba
->sli4_hba
.link_state
.logical_speed
= 0;
5262 else if (!phba
->sli4_hba
.conf_trunk
)
5263 phba
->sli4_hba
.link_state
.logical_speed
=
5264 bf_get(lpfc_acqe_fc_la_llink_spd
, acqe_fc
) * 10;
5266 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5267 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5268 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5269 "%dMbps Fault:%d\n",
5270 phba
->sli4_hba
.link_state
.speed
,
5271 phba
->sli4_hba
.link_state
.topology
,
5272 phba
->sli4_hba
.link_state
.status
,
5273 phba
->sli4_hba
.link_state
.type
,
5274 phba
->sli4_hba
.link_state
.number
,
5275 phba
->sli4_hba
.link_state
.logical_speed
,
5276 phba
->sli4_hba
.link_state
.fault
);
5277 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5279 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5280 "2897 The mboxq allocation failed\n");
5283 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
5285 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5286 "2898 The lpfc_dmabuf allocation failed\n");
5289 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
5291 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5292 "2899 The mbuf allocation failed\n");
5293 goto out_free_dmabuf
;
5296 /* Cleanup any outstanding ELS commands */
5297 lpfc_els_flush_all_cmd(phba
);
5299 /* Block ELS IOCBs until we have done process link event */
5300 phba
->sli4_hba
.els_wq
->pring
->flag
|= LPFC_STOP_IOCB_EVENT
;
5302 /* Update link event statistics */
5303 phba
->sli
.slistat
.link_event
++;
5305 /* Create lpfc_handle_latt mailbox command from link ACQE */
5306 lpfc_read_topology(phba
, pmb
, mp
);
5307 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
5308 pmb
->vport
= phba
->pport
;
5310 if (phba
->sli4_hba
.link_state
.status
!= LPFC_FC_LA_TYPE_LINK_UP
) {
5311 phba
->link_flag
&= ~(LS_MDS_LINK_DOWN
| LS_MDS_LOOPBACK
);
5313 switch (phba
->sli4_hba
.link_state
.status
) {
5314 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN
:
5315 phba
->link_flag
|= LS_MDS_LINK_DOWN
;
5317 case LPFC_FC_LA_TYPE_MDS_LOOPBACK
:
5318 phba
->link_flag
|= LS_MDS_LOOPBACK
;
5324 /* Initialize completion status */
5326 mb
->mbxStatus
= MBX_SUCCESS
;
5328 /* Parse port fault information field */
5329 lpfc_sli4_parse_latt_fault(phba
, (void *)acqe_fc
);
5331 /* Parse and translate link attention fields */
5332 la
= (struct lpfc_mbx_read_top
*)&pmb
->u
.mb
.un
.varReadTop
;
5333 la
->eventTag
= acqe_fc
->event_tag
;
5335 if (phba
->sli4_hba
.link_state
.status
==
5336 LPFC_FC_LA_TYPE_UNEXP_WWPN
) {
5337 bf_set(lpfc_mbx_read_top_att_type
, la
,
5338 LPFC_FC_LA_TYPE_UNEXP_WWPN
);
5340 bf_set(lpfc_mbx_read_top_att_type
, la
,
5341 LPFC_FC_LA_TYPE_LINK_DOWN
);
5343 /* Invoke the mailbox command callback function */
5344 lpfc_mbx_cmpl_read_topology(phba
, pmb
);
5349 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
5350 if (rc
== MBX_NOT_FINISHED
)
5351 goto out_free_dmabuf
;
5357 mempool_free(pmb
, phba
->mbox_mem_pool
);
5361 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5362 * @phba: pointer to lpfc hba data structure.
5363 * @acqe_sli: pointer to the async SLI completion queue entry.
5365 * This routine is to handle the SLI4 asynchronous SLI events.
5368 lpfc_sli4_async_sli_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_sli
*acqe_sli
)
5374 uint8_t operational
= 0;
5375 struct temp_event temp_event_data
;
5376 struct lpfc_acqe_misconfigured_event
*misconfigured
;
5377 struct Scsi_Host
*shost
;
5378 struct lpfc_vport
**vports
;
5381 evt_type
= bf_get(lpfc_trailer_type
, acqe_sli
);
5383 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5384 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5385 "x%08x x%08x x%08x\n", evt_type
,
5386 acqe_sli
->event_data1
, acqe_sli
->event_data2
,
5387 acqe_sli
->reserved
, acqe_sli
->trailer
);
5389 port_name
= phba
->Port
[0];
5390 if (port_name
== 0x00)
5391 port_name
= '?'; /* get port name is empty */
5394 case LPFC_SLI_EVENT_TYPE_OVER_TEMP
:
5395 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
5396 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
5397 temp_event_data
.data
= (uint32_t)acqe_sli
->event_data1
;
5399 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5400 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5401 acqe_sli
->event_data1
, port_name
);
5403 phba
->sfp_warning
|= LPFC_TRANSGRESSION_HIGH_TEMPERATURE
;
5404 shost
= lpfc_shost_from_vport(phba
->pport
);
5405 fc_host_post_vendor_event(shost
, fc_get_event_number(),
5406 sizeof(temp_event_data
),
5407 (char *)&temp_event_data
,
5408 SCSI_NL_VID_TYPE_PCI
5409 | PCI_VENDOR_ID_EMULEX
);
5411 case LPFC_SLI_EVENT_TYPE_NORM_TEMP
:
5412 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
5413 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
5414 temp_event_data
.data
= (uint32_t)acqe_sli
->event_data1
;
5416 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5417 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5418 acqe_sli
->event_data1
, port_name
);
5420 shost
= lpfc_shost_from_vport(phba
->pport
);
5421 fc_host_post_vendor_event(shost
, fc_get_event_number(),
5422 sizeof(temp_event_data
),
5423 (char *)&temp_event_data
,
5424 SCSI_NL_VID_TYPE_PCI
5425 | PCI_VENDOR_ID_EMULEX
);
5427 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED
:
5428 misconfigured
= (struct lpfc_acqe_misconfigured_event
*)
5429 &acqe_sli
->event_data1
;
5431 /* fetch the status for this port */
5432 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
5433 case LPFC_LINK_NUMBER_0
:
5434 status
= bf_get(lpfc_sli_misconfigured_port0_state
,
5435 &misconfigured
->theEvent
);
5436 operational
= bf_get(lpfc_sli_misconfigured_port0_op
,
5437 &misconfigured
->theEvent
);
5439 case LPFC_LINK_NUMBER_1
:
5440 status
= bf_get(lpfc_sli_misconfigured_port1_state
,
5441 &misconfigured
->theEvent
);
5442 operational
= bf_get(lpfc_sli_misconfigured_port1_op
,
5443 &misconfigured
->theEvent
);
5445 case LPFC_LINK_NUMBER_2
:
5446 status
= bf_get(lpfc_sli_misconfigured_port2_state
,
5447 &misconfigured
->theEvent
);
5448 operational
= bf_get(lpfc_sli_misconfigured_port2_op
,
5449 &misconfigured
->theEvent
);
5451 case LPFC_LINK_NUMBER_3
:
5452 status
= bf_get(lpfc_sli_misconfigured_port3_state
,
5453 &misconfigured
->theEvent
);
5454 operational
= bf_get(lpfc_sli_misconfigured_port3_op
,
5455 &misconfigured
->theEvent
);
5458 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5460 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5461 "event: Invalid link %d",
5462 phba
->sli4_hba
.lnk_info
.lnk_no
);
5466 /* Skip if optic state unchanged */
5467 if (phba
->sli4_hba
.lnk_info
.optic_state
== status
)
5471 case LPFC_SLI_EVENT_STATUS_VALID
:
5472 sprintf(message
, "Physical Link is functional");
5474 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT
:
5475 sprintf(message
, "Optics faulted/incorrectly "
5476 "installed/not installed - Reseat optics, "
5477 "if issue not resolved, replace.");
5479 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE
:
5481 "Optics of two types installed - Remove one "
5482 "optic or install matching pair of optics.");
5484 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED
:
5485 sprintf(message
, "Incompatible optics - Replace with "
5486 "compatible optics for card to function.");
5488 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED
:
5489 sprintf(message
, "Unqualified optics - Replace with "
5490 "Avago optics for Warranty and Technical "
5491 "Support - Link is%s operational",
5492 (operational
) ? " not" : "");
5494 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED
:
5495 sprintf(message
, "Uncertified optics - Replace with "
5496 "Avago-certified optics to enable link "
5497 "operation - Link is%s operational",
5498 (operational
) ? " not" : "");
5501 /* firmware is reporting a status we don't know about */
5502 sprintf(message
, "Unknown event status x%02x", status
);
5506 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5507 rc
= lpfc_sli4_read_config(phba
);
5510 lpfc_printf_log(phba
, KERN_ERR
,
5512 "3194 Unable to retrieve supported "
5513 "speeds, rc = 0x%x\n", rc
);
5515 vports
= lpfc_create_vport_work_array(phba
);
5516 if (vports
!= NULL
) {
5517 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
;
5519 shost
= lpfc_shost_from_vport(vports
[i
]);
5520 lpfc_host_supported_speeds_set(shost
);
5523 lpfc_destroy_vport_work_array(phba
, vports
);
5525 phba
->sli4_hba
.lnk_info
.optic_state
= status
;
5526 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5527 "3176 Port Name %c %s\n", port_name
, message
);
5529 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT
:
5530 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5531 "3192 Remote DPort Test Initiated - "
5532 "Event Data1:x%08x Event Data2: x%08x\n",
5533 acqe_sli
->event_data1
, acqe_sli
->event_data2
);
5535 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN
:
5536 /* Misconfigured WWN. Reports that the SLI Port is configured
5537 * to use FA-WWN, but the attached device doesn’t support it.
5538 * No driver action is required.
5539 * Event Data1 - N.A, Event Data2 - N.A
5541 lpfc_log_msg(phba
, KERN_WARNING
, LOG_SLI
,
5542 "2699 Misconfigured FA-WWN - Attached device does "
5543 "not support FA-WWN\n");
5545 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE
:
5546 /* EEPROM failure. No driver action is required */
5547 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5548 "2518 EEPROM failure - "
5549 "Event Data1: x%08x Event Data2: x%08x\n",
5550 acqe_sli
->event_data1
, acqe_sli
->event_data2
);
5553 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5554 "3193 Unrecognized SLI event, type: 0x%x",
5561 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5562 * @vport: pointer to vport data structure.
5564 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5565 * response to a CVL event.
5567 * Return the pointer to the ndlp with the vport if successful, otherwise
5570 static struct lpfc_nodelist
*
5571 lpfc_sli4_perform_vport_cvl(struct lpfc_vport
*vport
)
5573 struct lpfc_nodelist
*ndlp
;
5574 struct Scsi_Host
*shost
;
5575 struct lpfc_hba
*phba
;
5582 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
5584 /* Cannot find existing Fabric ndlp, so allocate a new one */
5585 ndlp
= lpfc_nlp_init(vport
, Fabric_DID
);
5588 /* Set the node type */
5589 ndlp
->nlp_type
|= NLP_FABRIC
;
5590 /* Put ndlp onto node list */
5591 lpfc_enqueue_node(vport
, ndlp
);
5593 if ((phba
->pport
->port_state
< LPFC_FLOGI
) &&
5594 (phba
->pport
->port_state
!= LPFC_VPORT_FAILED
))
5596 /* If virtual link is not yet instantiated ignore CVL */
5597 if ((vport
!= phba
->pport
) && (vport
->port_state
< LPFC_FDISC
)
5598 && (vport
->port_state
!= LPFC_VPORT_FAILED
))
5600 shost
= lpfc_shost_from_vport(vport
);
5603 lpfc_linkdown_port(vport
);
5604 lpfc_cleanup_pending_mbox(vport
);
5605 spin_lock_irq(shost
->host_lock
);
5606 vport
->fc_flag
|= FC_VPORT_CVL_RCVD
;
5607 spin_unlock_irq(shost
->host_lock
);
5613 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
5614 * @phba: pointer to lpfc hba data structure.
5616 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5617 * response to a FCF dead event.
5620 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba
*phba
)
5622 struct lpfc_vport
**vports
;
5625 vports
= lpfc_create_vport_work_array(phba
);
5627 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
5628 lpfc_sli4_perform_vport_cvl(vports
[i
]);
5629 lpfc_destroy_vport_work_array(phba
, vports
);
5633 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
5634 * @phba: pointer to lpfc hba data structure.
5635 * @acqe_fip: pointer to the async fcoe completion queue entry.
5637 * This routine is to handle the SLI4 asynchronous fcoe event.
5640 lpfc_sli4_async_fip_evt(struct lpfc_hba
*phba
,
5641 struct lpfc_acqe_fip
*acqe_fip
)
5643 uint8_t event_type
= bf_get(lpfc_trailer_type
, acqe_fip
);
5645 struct lpfc_vport
*vport
;
5646 struct lpfc_nodelist
*ndlp
;
5647 int active_vlink_present
;
5648 struct lpfc_vport
**vports
;
5651 phba
->fc_eventTag
= acqe_fip
->event_tag
;
5652 phba
->fcoe_eventtag
= acqe_fip
->event_tag
;
5653 switch (event_type
) {
5654 case LPFC_FIP_EVENT_TYPE_NEW_FCF
:
5655 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD
:
5656 if (event_type
== LPFC_FIP_EVENT_TYPE_NEW_FCF
)
5657 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5658 "2546 New FCF event, evt_tag:x%x, "
5660 acqe_fip
->event_tag
,
5663 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
|
5665 "2788 FCF param modified event, "
5666 "evt_tag:x%x, index:x%x\n",
5667 acqe_fip
->event_tag
,
5669 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
5671 * During period of FCF discovery, read the FCF
5672 * table record indexed by the event to update
5673 * FCF roundrobin failover eligible FCF bmask.
5675 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
5677 "2779 Read FCF (x%x) for updating "
5678 "roundrobin FCF failover bmask\n",
5680 rc
= lpfc_sli4_read_fcf_rec(phba
, acqe_fip
->index
);
5683 /* If the FCF discovery is in progress, do nothing. */
5684 spin_lock_irq(&phba
->hbalock
);
5685 if (phba
->hba_flag
& FCF_TS_INPROG
) {
5686 spin_unlock_irq(&phba
->hbalock
);
5689 /* If fast FCF failover rescan event is pending, do nothing */
5690 if (phba
->fcf
.fcf_flag
& (FCF_REDISC_EVT
| FCF_REDISC_PEND
)) {
5691 spin_unlock_irq(&phba
->hbalock
);
5695 /* If the FCF has been in discovered state, do nothing. */
5696 if (phba
->fcf
.fcf_flag
& FCF_SCAN_DONE
) {
5697 spin_unlock_irq(&phba
->hbalock
);
5700 spin_unlock_irq(&phba
->hbalock
);
5702 /* Otherwise, scan the entire FCF table and re-discover SAN */
5703 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
5704 "2770 Start FCF table scan per async FCF "
5705 "event, evt_tag:x%x, index:x%x\n",
5706 acqe_fip
->event_tag
, acqe_fip
->index
);
5707 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
5708 LPFC_FCOE_FCF_GET_FIRST
);
5710 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5711 "2547 Issue FCF scan read FCF mailbox "
5712 "command failed (x%x)\n", rc
);
5715 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL
:
5716 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5717 "2548 FCF Table full count 0x%x tag 0x%x\n",
5718 bf_get(lpfc_acqe_fip_fcf_count
, acqe_fip
),
5719 acqe_fip
->event_tag
);
5722 case LPFC_FIP_EVENT_TYPE_FCF_DEAD
:
5723 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
5724 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5725 "2549 FCF (x%x) disconnected from network, "
5726 "tag:x%x\n", acqe_fip
->index
,
5727 acqe_fip
->event_tag
);
5729 * If we are in the middle of FCF failover process, clear
5730 * the corresponding FCF bit in the roundrobin bitmap.
5732 spin_lock_irq(&phba
->hbalock
);
5733 if ((phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) &&
5734 (phba
->fcf
.current_rec
.fcf_indx
!= acqe_fip
->index
)) {
5735 spin_unlock_irq(&phba
->hbalock
);
5736 /* Update FLOGI FCF failover eligible FCF bmask */
5737 lpfc_sli4_fcf_rr_index_clear(phba
, acqe_fip
->index
);
5740 spin_unlock_irq(&phba
->hbalock
);
5742 /* If the event is not for currently used fcf do nothing */
5743 if (phba
->fcf
.current_rec
.fcf_indx
!= acqe_fip
->index
)
5747 * Otherwise, request the port to rediscover the entire FCF
5748 * table for a fast recovery from case that the current FCF
5749 * is no longer valid as we are not in the middle of FCF
5750 * failover process already.
5752 spin_lock_irq(&phba
->hbalock
);
5753 /* Mark the fast failover process in progress */
5754 phba
->fcf
.fcf_flag
|= FCF_DEAD_DISC
;
5755 spin_unlock_irq(&phba
->hbalock
);
5757 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
5758 "2771 Start FCF fast failover process due to "
5759 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5760 "\n", acqe_fip
->event_tag
, acqe_fip
->index
);
5761 rc
= lpfc_sli4_redisc_fcf_table(phba
);
5763 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
5765 "2772 Issue FCF rediscover mailbox "
5766 "command failed, fail through to FCF "
5768 spin_lock_irq(&phba
->hbalock
);
5769 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
5770 spin_unlock_irq(&phba
->hbalock
);
5772 * Last resort will fail over by treating this
5773 * as a link down to FCF registration.
5775 lpfc_sli4_fcf_dead_failthrough(phba
);
5777 /* Reset FCF roundrobin bmask for new discovery */
5778 lpfc_sli4_clear_fcf_rr_bmask(phba
);
5780 * Handling fast FCF failover to a DEAD FCF event is
5781 * considered equalivant to receiving CVL to all vports.
5783 lpfc_sli4_perform_all_vport_cvl(phba
);
5786 case LPFC_FIP_EVENT_TYPE_CVL
:
5787 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
5788 lpfc_printf_log(phba
, KERN_ERR
,
5790 "2718 Clear Virtual Link Received for VPI 0x%x"
5791 " tag 0x%x\n", acqe_fip
->index
, acqe_fip
->event_tag
);
5793 vport
= lpfc_find_vport_by_vpid(phba
,
5795 ndlp
= lpfc_sli4_perform_vport_cvl(vport
);
5798 active_vlink_present
= 0;
5800 vports
= lpfc_create_vport_work_array(phba
);
5802 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
;
5804 if ((!(vports
[i
]->fc_flag
&
5805 FC_VPORT_CVL_RCVD
)) &&
5806 (vports
[i
]->port_state
> LPFC_FDISC
)) {
5807 active_vlink_present
= 1;
5811 lpfc_destroy_vport_work_array(phba
, vports
);
5815 * Don't re-instantiate if vport is marked for deletion.
5816 * If we are here first then vport_delete is going to wait
5817 * for discovery to complete.
5819 if (!(vport
->load_flag
& FC_UNLOADING
) &&
5820 active_vlink_present
) {
5822 * If there are other active VLinks present,
5823 * re-instantiate the Vlink using FDISC.
5825 mod_timer(&ndlp
->nlp_delayfunc
,
5826 jiffies
+ msecs_to_jiffies(1000));
5827 spin_lock_irq(&ndlp
->lock
);
5828 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
5829 spin_unlock_irq(&ndlp
->lock
);
5830 ndlp
->nlp_last_elscmd
= ELS_CMD_FDISC
;
5831 vport
->port_state
= LPFC_FDISC
;
5834 * Otherwise, we request port to rediscover
5835 * the entire FCF table for a fast recovery
5836 * from possible case that the current FCF
5837 * is no longer valid if we are not already
5838 * in the FCF failover process.
5840 spin_lock_irq(&phba
->hbalock
);
5841 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
5842 spin_unlock_irq(&phba
->hbalock
);
5845 /* Mark the fast failover process in progress */
5846 phba
->fcf
.fcf_flag
|= FCF_ACVL_DISC
;
5847 spin_unlock_irq(&phba
->hbalock
);
5848 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
5850 "2773 Start FCF failover per CVL, "
5851 "evt_tag:x%x\n", acqe_fip
->event_tag
);
5852 rc
= lpfc_sli4_redisc_fcf_table(phba
);
5854 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
5856 "2774 Issue FCF rediscover "
5857 "mailbox command failed, "
5858 "through to CVL event\n");
5859 spin_lock_irq(&phba
->hbalock
);
5860 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
5861 spin_unlock_irq(&phba
->hbalock
);
5863 * Last resort will be re-try on the
5864 * the current registered FCF entry.
5866 lpfc_retry_pport_discovery(phba
);
5869 * Reset FCF roundrobin bmask for new
5872 lpfc_sli4_clear_fcf_rr_bmask(phba
);
5876 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5877 "0288 Unknown FCoE event type 0x%x event tag "
5878 "0x%x\n", event_type
, acqe_fip
->event_tag
);
5884 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5885 * @phba: pointer to lpfc hba data structure.
5886 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
5888 * This routine is to handle the SLI4 asynchronous dcbx event.
5891 lpfc_sli4_async_dcbx_evt(struct lpfc_hba
*phba
,
5892 struct lpfc_acqe_dcbx
*acqe_dcbx
)
5894 phba
->fc_eventTag
= acqe_dcbx
->event_tag
;
5895 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5896 "0290 The SLI4 DCBX asynchronous event is not "
5901 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5902 * @phba: pointer to lpfc hba data structure.
5903 * @acqe_grp5: pointer to the async grp5 completion queue entry.
5905 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5906 * is an asynchronous notified of a logical link speed change. The Port
5907 * reports the logical link speed in units of 10Mbps.
5910 lpfc_sli4_async_grp5_evt(struct lpfc_hba
*phba
,
5911 struct lpfc_acqe_grp5
*acqe_grp5
)
5913 uint16_t prev_ll_spd
;
5915 phba
->fc_eventTag
= acqe_grp5
->event_tag
;
5916 phba
->fcoe_eventtag
= acqe_grp5
->event_tag
;
5917 prev_ll_spd
= phba
->sli4_hba
.link_state
.logical_speed
;
5918 phba
->sli4_hba
.link_state
.logical_speed
=
5919 (bf_get(lpfc_acqe_grp5_llink_spd
, acqe_grp5
)) * 10;
5920 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5921 "2789 GRP5 Async Event: Updating logical link speed "
5922 "from %dMbps to %dMbps\n", prev_ll_spd
,
5923 phba
->sli4_hba
.link_state
.logical_speed
);
5927 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5928 * @phba: pointer to lpfc hba data structure.
5930 * This routine is invoked by the worker thread to process all the pending
5931 * SLI4 asynchronous events.
5933 void lpfc_sli4_async_event_proc(struct lpfc_hba
*phba
)
5935 struct lpfc_cq_event
*cq_event
;
5936 unsigned long iflags
;
5938 /* First, declare the async event has been handled */
5939 spin_lock_irqsave(&phba
->hbalock
, iflags
);
5940 phba
->hba_flag
&= ~ASYNC_EVENT
;
5941 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5943 /* Now, handle all the async events */
5944 spin_lock_irqsave(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
5945 while (!list_empty(&phba
->sli4_hba
.sp_asynce_work_queue
)) {
5946 list_remove_head(&phba
->sli4_hba
.sp_asynce_work_queue
,
5947 cq_event
, struct lpfc_cq_event
, list
);
5948 spin_unlock_irqrestore(&phba
->sli4_hba
.asynce_list_lock
,
5951 /* Process the asynchronous event */
5952 switch (bf_get(lpfc_trailer_code
, &cq_event
->cqe
.mcqe_cmpl
)) {
5953 case LPFC_TRAILER_CODE_LINK
:
5954 lpfc_sli4_async_link_evt(phba
,
5955 &cq_event
->cqe
.acqe_link
);
5957 case LPFC_TRAILER_CODE_FCOE
:
5958 lpfc_sli4_async_fip_evt(phba
, &cq_event
->cqe
.acqe_fip
);
5960 case LPFC_TRAILER_CODE_DCBX
:
5961 lpfc_sli4_async_dcbx_evt(phba
,
5962 &cq_event
->cqe
.acqe_dcbx
);
5964 case LPFC_TRAILER_CODE_GRP5
:
5965 lpfc_sli4_async_grp5_evt(phba
,
5966 &cq_event
->cqe
.acqe_grp5
);
5968 case LPFC_TRAILER_CODE_FC
:
5969 lpfc_sli4_async_fc_evt(phba
, &cq_event
->cqe
.acqe_fc
);
5971 case LPFC_TRAILER_CODE_SLI
:
5972 lpfc_sli4_async_sli_evt(phba
, &cq_event
->cqe
.acqe_sli
);
5975 lpfc_printf_log(phba
, KERN_ERR
,
5977 "1804 Invalid asynchronous event code: "
5978 "x%x\n", bf_get(lpfc_trailer_code
,
5979 &cq_event
->cqe
.mcqe_cmpl
));
5983 /* Free the completion event processed to the free pool */
5984 lpfc_sli4_cq_event_release(phba
, cq_event
);
5985 spin_lock_irqsave(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
5987 spin_unlock_irqrestore(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
5991 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5992 * @phba: pointer to lpfc hba data structure.
5994 * This routine is invoked by the worker thread to process FCF table
5995 * rediscovery pending completion event.
5997 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba
*phba
)
6001 spin_lock_irq(&phba
->hbalock
);
6002 /* Clear FCF rediscovery timeout event */
6003 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_EVT
;
6004 /* Clear driver fast failover FCF record flag */
6005 phba
->fcf
.failover_rec
.flag
= 0;
6006 /* Set state for FCF fast failover */
6007 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
6008 spin_unlock_irq(&phba
->hbalock
);
6010 /* Scan FCF table from the first entry to re-discover SAN */
6011 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
6012 "2777 Start post-quiescent FCF table scan\n");
6013 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
6015 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6016 "2747 Issue FCF scan read FCF mailbox "
6017 "command failed 0x%x\n", rc
);
6021 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
6022 * @phba: pointer to lpfc hba data structure.
6023 * @dev_grp: The HBA PCI-Device group number.
6025 * This routine is invoked to set up the per HBA PCI-Device group function
6026 * API jump table entries.
6028 * Return: 0 if success, otherwise -ENODEV
6031 lpfc_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
6035 /* Set up lpfc PCI-device group */
6036 phba
->pci_dev_grp
= dev_grp
;
6038 /* The LPFC_PCI_DEV_OC uses SLI4 */
6039 if (dev_grp
== LPFC_PCI_DEV_OC
)
6040 phba
->sli_rev
= LPFC_SLI_REV4
;
6042 /* Set up device INIT API function jump table */
6043 rc
= lpfc_init_api_table_setup(phba
, dev_grp
);
6046 /* Set up SCSI API function jump table */
6047 rc
= lpfc_scsi_api_table_setup(phba
, dev_grp
);
6050 /* Set up SLI API function jump table */
6051 rc
= lpfc_sli_api_table_setup(phba
, dev_grp
);
6054 /* Set up MBOX API function jump table */
6055 rc
= lpfc_mbox_api_table_setup(phba
, dev_grp
);
6063 * lpfc_log_intr_mode - Log the active interrupt mode
6064 * @phba: pointer to lpfc hba data structure.
6065 * @intr_mode: active interrupt mode adopted.
6067 * This routine it invoked to log the currently used active interrupt mode
6070 static void lpfc_log_intr_mode(struct lpfc_hba
*phba
, uint32_t intr_mode
)
6072 switch (intr_mode
) {
6074 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6075 "0470 Enable INTx interrupt mode.\n");
6078 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6079 "0481 Enabled MSI interrupt mode.\n");
6082 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6083 "0480 Enabled MSI-X interrupt mode.\n");
6086 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6087 "0482 Illegal interrupt mode.\n");
6094 * lpfc_enable_pci_dev - Enable a generic PCI device.
6095 * @phba: pointer to lpfc hba data structure.
6097 * This routine is invoked to enable the PCI device that is common to all
6102 * other values - error
6105 lpfc_enable_pci_dev(struct lpfc_hba
*phba
)
6107 struct pci_dev
*pdev
;
6109 /* Obtain PCI device reference */
6113 pdev
= phba
->pcidev
;
6114 /* Enable PCI device */
6115 if (pci_enable_device_mem(pdev
))
6117 /* Request PCI resource for the device */
6118 if (pci_request_mem_regions(pdev
, LPFC_DRIVER_NAME
))
6119 goto out_disable_device
;
6120 /* Set up device as PCI master and save state for EEH */
6121 pci_set_master(pdev
);
6122 pci_try_set_mwi(pdev
);
6123 pci_save_state(pdev
);
6125 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6126 if (pci_is_pcie(pdev
))
6127 pdev
->needs_freset
= 1;
6132 pci_disable_device(pdev
);
6134 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6135 "1401 Failed to enable pci device\n");
6140 * lpfc_disable_pci_dev - Disable a generic PCI device.
6141 * @phba: pointer to lpfc hba data structure.
6143 * This routine is invoked to disable the PCI device that is common to all
6147 lpfc_disable_pci_dev(struct lpfc_hba
*phba
)
6149 struct pci_dev
*pdev
;
6151 /* Obtain PCI device reference */
6155 pdev
= phba
->pcidev
;
6156 /* Release PCI resource and disable PCI device */
6157 pci_release_mem_regions(pdev
);
6158 pci_disable_device(pdev
);
6164 * lpfc_reset_hba - Reset a hba
6165 * @phba: pointer to lpfc hba data structure.
6167 * This routine is invoked to reset a hba device. It brings the HBA
6168 * offline, performs a board restart, and then brings the board back
6169 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6170 * on outstanding mailbox commands.
6173 lpfc_reset_hba(struct lpfc_hba
*phba
)
6175 /* If resets are disabled then set error state and return. */
6176 if (!phba
->cfg_enable_hba_reset
) {
6177 phba
->link_state
= LPFC_HBA_ERROR
;
6180 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
6181 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
6183 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
6185 lpfc_sli_brdrestart(phba
);
6187 lpfc_unblock_mgmt_io(phba
);
6191 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6192 * @phba: pointer to lpfc hba data structure.
6194 * This function enables the PCI SR-IOV virtual functions to a physical
6195 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6196 * enable the number of virtual functions to the physical function. As
6197 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6198 * API call does not considered as an error condition for most of the device.
6201 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba
*phba
)
6203 struct pci_dev
*pdev
= phba
->pcidev
;
6207 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
6211 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_TOTAL_VF
, &nr_virtfn
);
6216 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6217 * @phba: pointer to lpfc hba data structure.
6218 * @nr_vfn: number of virtual functions to be enabled.
6220 * This function enables the PCI SR-IOV virtual functions to a physical
6221 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6222 * enable the number of virtual functions to the physical function. As
6223 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6224 * API call does not considered as an error condition for most of the device.
6227 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba
*phba
, int nr_vfn
)
6229 struct pci_dev
*pdev
= phba
->pcidev
;
6230 uint16_t max_nr_vfn
;
6233 max_nr_vfn
= lpfc_sli_sriov_nr_virtfn_get(phba
);
6234 if (nr_vfn
> max_nr_vfn
) {
6235 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6236 "3057 Requested vfs (%d) greater than "
6237 "supported vfs (%d)", nr_vfn
, max_nr_vfn
);
6241 rc
= pci_enable_sriov(pdev
, nr_vfn
);
6243 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6244 "2806 Failed to enable sriov on this device "
6245 "with vfn number nr_vf:%d, rc:%d\n",
6248 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6249 "2807 Successful enable sriov on this device "
6250 "with vfn number nr_vf:%d\n", nr_vfn
);
6255 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
6256 * @phba: pointer to lpfc hba data structure.
6258 * This routine is invoked to set up the driver internal resources before the
6259 * device specific resource setup to support the HBA device it attached to.
6263 * other values - error
6266 lpfc_setup_driver_resource_phase1(struct lpfc_hba
*phba
)
6268 struct lpfc_sli
*psli
= &phba
->sli
;
6271 * Driver resources common to all SLI revisions
6273 atomic_set(&phba
->fast_event_count
, 0);
6274 atomic_set(&phba
->dbg_log_idx
, 0);
6275 atomic_set(&phba
->dbg_log_cnt
, 0);
6276 atomic_set(&phba
->dbg_log_dmping
, 0);
6277 spin_lock_init(&phba
->hbalock
);
6279 /* Initialize port_list spinlock */
6280 spin_lock_init(&phba
->port_list_lock
);
6281 INIT_LIST_HEAD(&phba
->port_list
);
6283 INIT_LIST_HEAD(&phba
->work_list
);
6284 init_waitqueue_head(&phba
->wait_4_mlo_m_q
);
6286 /* Initialize the wait queue head for the kernel thread */
6287 init_waitqueue_head(&phba
->work_waitq
);
6289 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6290 "1403 Protocols supported %s %s %s\n",
6291 ((phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
) ?
6293 ((phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) ?
6295 (phba
->nvmet_support
? "NVMET" : " "));
6297 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6298 spin_lock_init(&phba
->scsi_buf_list_get_lock
);
6299 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_get
);
6300 spin_lock_init(&phba
->scsi_buf_list_put_lock
);
6301 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
6303 /* Initialize the fabric iocb list */
6304 INIT_LIST_HEAD(&phba
->fabric_iocb_list
);
6306 /* Initialize list to save ELS buffers */
6307 INIT_LIST_HEAD(&phba
->elsbuf
);
6309 /* Initialize FCF connection rec list */
6310 INIT_LIST_HEAD(&phba
->fcf_conn_rec_list
);
6312 /* Initialize OAS configuration list */
6313 spin_lock_init(&phba
->devicelock
);
6314 INIT_LIST_HEAD(&phba
->luns
);
6316 /* MBOX heartbeat timer */
6317 timer_setup(&psli
->mbox_tmo
, lpfc_mbox_timeout
, 0);
6318 /* Fabric block timer */
6319 timer_setup(&phba
->fabric_block_timer
, lpfc_fabric_block_timeout
, 0);
6320 /* EA polling mode timer */
6321 timer_setup(&phba
->eratt_poll
, lpfc_poll_eratt
, 0);
6322 /* Heartbeat timer */
6323 timer_setup(&phba
->hb_tmofunc
, lpfc_hb_timeout
, 0);
6325 INIT_DELAYED_WORK(&phba
->eq_delay_work
, lpfc_hb_eq_delay_work
);
6327 INIT_DELAYED_WORK(&phba
->idle_stat_delay_work
,
6328 lpfc_idle_stat_delay_work
);
6334 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6335 * @phba: pointer to lpfc hba data structure.
6337 * This routine is invoked to set up the driver internal resources specific to
6338 * support the SLI-3 HBA device it attached to.
6342 * other values - error
6345 lpfc_sli_driver_resource_setup(struct lpfc_hba
*phba
)
6350 * Initialize timers used by driver
6353 /* FCP polling mode timer */
6354 timer_setup(&phba
->fcp_poll_timer
, lpfc_poll_timeout
, 0);
6356 /* Host attention work mask setup */
6357 phba
->work_ha_mask
= (HA_ERATT
| HA_MBATT
| HA_LATT
);
6358 phba
->work_ha_mask
|= (HA_RXMASK
<< (LPFC_ELS_RING
* 4));
6360 /* Get all the module params for configuring this host */
6361 lpfc_get_cfgparam(phba
);
6362 /* Set up phase-1 common device driver resources */
6364 rc
= lpfc_setup_driver_resource_phase1(phba
);
6368 if (phba
->pcidev
->device
== PCI_DEVICE_ID_HORNET
) {
6369 phba
->menlo_flag
|= HBA_MENLO_SUPPORT
;
6370 /* check for menlo minimum sg count */
6371 if (phba
->cfg_sg_seg_cnt
< LPFC_DEFAULT_MENLO_SG_SEG_CNT
)
6372 phba
->cfg_sg_seg_cnt
= LPFC_DEFAULT_MENLO_SG_SEG_CNT
;
6375 if (!phba
->sli
.sli3_ring
)
6376 phba
->sli
.sli3_ring
= kcalloc(LPFC_SLI3_MAX_RING
,
6377 sizeof(struct lpfc_sli_ring
),
6379 if (!phba
->sli
.sli3_ring
)
6383 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
6384 * used to create the sg_dma_buf_pool must be dynamically calculated.
6387 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6388 entry_sz
= sizeof(struct sli4_sge
);
6390 entry_sz
= sizeof(struct ulp_bde64
);
6392 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6393 if (phba
->cfg_enable_bg
) {
6395 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6396 * the FCP rsp, and a BDE for each. Sice we have no control
6397 * over how many protection data segments the SCSI Layer
6398 * will hand us (ie: there could be one for every block
6399 * in the IO), we just allocate enough BDEs to accomidate
6400 * our max amount and we need to limit lpfc_sg_seg_cnt to
6401 * minimize the risk of running out.
6403 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
6404 sizeof(struct fcp_rsp
) +
6405 (LPFC_MAX_SG_SEG_CNT
* entry_sz
);
6407 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SG_SEG_CNT_DIF
)
6408 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SG_SEG_CNT_DIF
;
6410 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6411 phba
->cfg_total_seg_cnt
= LPFC_MAX_SG_SEG_CNT
;
6414 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6415 * the FCP rsp, a BDE for each, and a BDE for up to
6416 * cfg_sg_seg_cnt data segments.
6418 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
6419 sizeof(struct fcp_rsp
) +
6420 ((phba
->cfg_sg_seg_cnt
+ 2) * entry_sz
);
6422 /* Total BDEs in BPL for scsi_sg_list */
6423 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ 2;
6426 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
6427 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6428 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
6429 phba
->cfg_total_seg_cnt
);
6431 phba
->max_vpi
= LPFC_MAX_VPI
;
6432 /* This will be set to correct value after config_port mbox */
6433 phba
->max_vports
= 0;
6436 * Initialize the SLI Layer to run with lpfc HBAs.
6438 lpfc_sli_setup(phba
);
6439 lpfc_sli_queue_init(phba
);
6441 /* Allocate device driver memory */
6442 if (lpfc_mem_alloc(phba
, BPL_ALIGN_SZ
))
6445 phba
->lpfc_sg_dma_buf_pool
=
6446 dma_pool_create("lpfc_sg_dma_buf_pool",
6447 &phba
->pcidev
->dev
, phba
->cfg_sg_dma_buf_size
,
6450 if (!phba
->lpfc_sg_dma_buf_pool
)
6453 phba
->lpfc_cmd_rsp_buf_pool
=
6454 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6456 sizeof(struct fcp_cmnd
) +
6457 sizeof(struct fcp_rsp
),
6460 if (!phba
->lpfc_cmd_rsp_buf_pool
)
6461 goto fail_free_dma_buf_pool
;
6464 * Enable sr-iov virtual functions if supported and configured
6465 * through the module parameter.
6467 if (phba
->cfg_sriov_nr_virtfn
> 0) {
6468 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
6469 phba
->cfg_sriov_nr_virtfn
);
6471 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6472 "2808 Requested number of SR-IOV "
6473 "virtual functions (%d) is not "
6475 phba
->cfg_sriov_nr_virtfn
);
6476 phba
->cfg_sriov_nr_virtfn
= 0;
6482 fail_free_dma_buf_pool
:
6483 dma_pool_destroy(phba
->lpfc_sg_dma_buf_pool
);
6484 phba
->lpfc_sg_dma_buf_pool
= NULL
;
6486 lpfc_mem_free(phba
);
6491 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6492 * @phba: pointer to lpfc hba data structure.
6494 * This routine is invoked to unset the driver internal resources set up
6495 * specific for supporting the SLI-3 HBA device it attached to.
6498 lpfc_sli_driver_resource_unset(struct lpfc_hba
*phba
)
6500 /* Free device driver memory allocated */
6501 lpfc_mem_free_all(phba
);
6507 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6508 * @phba: pointer to lpfc hba data structure.
6510 * This routine is invoked to set up the driver internal resources specific to
6511 * support the SLI-4 HBA device it attached to.
6515 * other values - error
6518 lpfc_sli4_driver_resource_setup(struct lpfc_hba
*phba
)
6520 LPFC_MBOXQ_t
*mboxq
;
6522 int rc
, i
, max_buf_size
;
6523 uint8_t pn_page
[LPFC_MAX_SUPPORTED_PAGES
] = {0};
6524 struct lpfc_mqe
*mqe
;
6531 phba
->sli4_hba
.num_present_cpu
= lpfc_present_cpu
;
6532 phba
->sli4_hba
.num_possible_cpu
= cpumask_last(cpu_possible_mask
) + 1;
6533 phba
->sli4_hba
.curr_disp_cpu
= 0;
6535 /* Get all the module params for configuring this host */
6536 lpfc_get_cfgparam(phba
);
6538 /* Set up phase-1 common device driver resources */
6539 rc
= lpfc_setup_driver_resource_phase1(phba
);
6543 /* Before proceed, wait for POST done and device ready */
6544 rc
= lpfc_sli4_post_status_check(phba
);
6548 /* Allocate all driver workqueues here */
6550 /* The lpfc_wq workqueue for deferred irq use */
6551 phba
->wq
= alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM
, 0);
6554 * Initialize timers used by driver
6557 timer_setup(&phba
->rrq_tmr
, lpfc_rrq_timeout
, 0);
6559 /* FCF rediscover timer */
6560 timer_setup(&phba
->fcf
.redisc_wait
, lpfc_sli4_fcf_redisc_wait_tmo
, 0);
6563 * Control structure for handling external multi-buffer mailbox
6564 * command pass-through.
6566 memset((uint8_t *)&phba
->mbox_ext_buf_ctx
, 0,
6567 sizeof(struct lpfc_mbox_ext_buf_ctx
));
6568 INIT_LIST_HEAD(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
6570 phba
->max_vpi
= LPFC_MAX_VPI
;
6572 /* This will be set to correct value after the read_config mbox */
6573 phba
->max_vports
= 0;
6575 /* Program the default value of vlan_id and fc_map */
6576 phba
->valid_vlan
= 0;
6577 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
6578 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
6579 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
6582 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
6583 * we will associate a new ring, for each EQ/CQ/WQ tuple.
6584 * The WQ create will allocate the ring.
6587 /* Initialize buffer queue management fields */
6588 INIT_LIST_HEAD(&phba
->hbqs
[LPFC_ELS_HBQ
].hbq_buffer_list
);
6589 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_sli4_rb_alloc
;
6590 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_sli4_rb_free
;
6593 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6595 /* Initialize the Abort buffer list used by driver */
6596 spin_lock_init(&phba
->sli4_hba
.abts_io_buf_list_lock
);
6597 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_io_buf_list
);
6599 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
6600 /* Initialize the Abort nvme buffer list used by driver */
6601 spin_lock_init(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
6602 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
6603 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_nvmet_io_wait_list
);
6604 spin_lock_init(&phba
->sli4_hba
.t_active_list_lock
);
6605 INIT_LIST_HEAD(&phba
->sli4_hba
.t_active_ctx_list
);
6608 /* This abort list used by worker thread */
6609 spin_lock_init(&phba
->sli4_hba
.sgl_list_lock
);
6610 spin_lock_init(&phba
->sli4_hba
.nvmet_io_wait_lock
);
6611 spin_lock_init(&phba
->sli4_hba
.asynce_list_lock
);
6612 spin_lock_init(&phba
->sli4_hba
.els_xri_abrt_list_lock
);
6615 * Initialize driver internal slow-path work queues
6618 /* Driver internel slow-path CQ Event pool */
6619 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_cqe_event_pool
);
6620 /* Response IOCB work queue list */
6621 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_queue_event
);
6622 /* Asynchronous event CQ Event work queue list */
6623 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_asynce_work_queue
);
6624 /* Slow-path XRI aborted CQ Event work queue list */
6625 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
6626 /* Receive queue CQ Event work queue list */
6627 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_unsol_work_queue
);
6629 /* Initialize extent block lists. */
6630 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_blk_list
);
6631 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_xri_blk_list
);
6632 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_vfi_blk_list
);
6633 INIT_LIST_HEAD(&phba
->lpfc_vpi_blk_list
);
6635 /* Initialize mboxq lists. If the early init routines fail
6636 * these lists need to be correctly initialized.
6638 INIT_LIST_HEAD(&phba
->sli
.mboxq
);
6639 INIT_LIST_HEAD(&phba
->sli
.mboxq_cmpl
);
6641 /* initialize optic_state to 0xFF */
6642 phba
->sli4_hba
.lnk_info
.optic_state
= 0xff;
6644 /* Allocate device driver memory */
6645 rc
= lpfc_mem_alloc(phba
, SGL_ALIGN_SZ
);
6649 /* IF Type 2 ports get initialized now. */
6650 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) >=
6651 LPFC_SLI_INTF_IF_TYPE_2
) {
6652 rc
= lpfc_pci_function_reset(phba
);
6657 phba
->temp_sensor_support
= 1;
6660 /* Create the bootstrap mailbox command */
6661 rc
= lpfc_create_bootstrap_mbox(phba
);
6665 /* Set up the host's endian order with the device. */
6666 rc
= lpfc_setup_endian_order(phba
);
6668 goto out_free_bsmbx
;
6670 /* Set up the hba's configuration parameters. */
6671 rc
= lpfc_sli4_read_config(phba
);
6673 goto out_free_bsmbx
;
6674 rc
= lpfc_mem_alloc_active_rrq_pool_s4(phba
);
6676 goto out_free_bsmbx
;
6678 /* IF Type 0 ports get initialized now. */
6679 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
6680 LPFC_SLI_INTF_IF_TYPE_0
) {
6681 rc
= lpfc_pci_function_reset(phba
);
6683 goto out_free_bsmbx
;
6686 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
6690 goto out_free_bsmbx
;
6693 /* Check for NVMET being configured */
6694 phba
->nvmet_support
= 0;
6695 if (lpfc_enable_nvmet_cnt
) {
6697 /* First get WWN of HBA instance */
6698 lpfc_read_nv(phba
, mboxq
);
6699 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6700 if (rc
!= MBX_SUCCESS
) {
6701 lpfc_printf_log(phba
, KERN_ERR
,
6703 "6016 Mailbox failed , mbxCmd x%x "
6704 "READ_NV, mbxStatus x%x\n",
6705 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
6706 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
6707 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6709 goto out_free_bsmbx
;
6712 memcpy(&wwn
, (char *)mb
->un
.varRDnvp
.nodename
,
6714 wwn
= cpu_to_be64(wwn
);
6715 phba
->sli4_hba
.wwnn
.u
.name
= wwn
;
6716 memcpy(&wwn
, (char *)mb
->un
.varRDnvp
.portname
,
6718 /* wwn is WWPN of HBA instance */
6719 wwn
= cpu_to_be64(wwn
);
6720 phba
->sli4_hba
.wwpn
.u
.name
= wwn
;
6722 /* Check to see if it matches any module parameter */
6723 for (i
= 0; i
< lpfc_enable_nvmet_cnt
; i
++) {
6724 if (wwn
== lpfc_enable_nvmet
[i
]) {
6725 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6726 if (lpfc_nvmet_mem_alloc(phba
))
6729 phba
->nvmet_support
= 1; /* a match */
6731 lpfc_printf_log(phba
, KERN_ERR
,
6733 "6017 NVME Target %016llx\n",
6736 lpfc_printf_log(phba
, KERN_ERR
,
6738 "6021 Can't enable NVME Target."
6739 " NVME_TARGET_FC infrastructure"
6740 " is not in kernel\n");
6742 /* Not supported for NVMET */
6743 phba
->cfg_xri_rebalancing
= 0;
6744 if (phba
->irq_chann_mode
== NHT_MODE
) {
6745 phba
->cfg_irq_chann
=
6746 phba
->sli4_hba
.num_present_cpu
;
6747 phba
->cfg_hdw_queue
=
6748 phba
->sli4_hba
.num_present_cpu
;
6749 phba
->irq_chann_mode
= NORMAL_MODE
;
6756 lpfc_nvme_mod_param_dep(phba
);
6758 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
6759 lpfc_supported_pages(mboxq
);
6760 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6762 mqe
= &mboxq
->u
.mqe
;
6763 memcpy(&pn_page
[0], ((uint8_t *)&mqe
->un
.supp_pages
.word3
),
6764 LPFC_MAX_SUPPORTED_PAGES
);
6765 for (i
= 0; i
< LPFC_MAX_SUPPORTED_PAGES
; i
++) {
6766 switch (pn_page
[i
]) {
6767 case LPFC_SLI4_PARAMETERS
:
6768 phba
->sli4_hba
.pc_sli4_params
.supported
= 1;
6774 /* Read the port's SLI4 Parameters capabilities if supported. */
6775 if (phba
->sli4_hba
.pc_sli4_params
.supported
)
6776 rc
= lpfc_pc_sli4_params_get(phba
, mboxq
);
6778 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6780 goto out_free_bsmbx
;
6785 * Get sli4 parameters that override parameters from Port capabilities.
6786 * If this call fails, it isn't critical unless the SLI4 parameters come
6789 rc
= lpfc_get_sli4_parameters(phba
, mboxq
);
6791 if_type
= bf_get(lpfc_sli_intf_if_type
,
6792 &phba
->sli4_hba
.sli_intf
);
6793 if_fam
= bf_get(lpfc_sli_intf_sli_family
,
6794 &phba
->sli4_hba
.sli_intf
);
6795 if (phba
->sli4_hba
.extents_in_use
&&
6796 phba
->sli4_hba
.rpi_hdrs_in_use
) {
6797 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6798 "2999 Unsupported SLI4 Parameters "
6799 "Extents and RPI headers enabled.\n");
6800 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
&&
6801 if_fam
== LPFC_SLI_INTF_FAMILY_BE2
) {
6802 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6804 goto out_free_bsmbx
;
6807 if (!(if_type
== LPFC_SLI_INTF_IF_TYPE_0
&&
6808 if_fam
== LPFC_SLI_INTF_FAMILY_BE2
)) {
6809 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6811 goto out_free_bsmbx
;
6816 * 1 for cmd, 1 for rsp, NVME adds an extra one
6817 * for boundary conditions in its max_sgl_segment template.
6820 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
6824 * It doesn't matter what family our adapter is in, we are
6825 * limited to 2 Pages, 512 SGEs, for our SGL.
6826 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6828 max_buf_size
= (2 * SLI4_PAGE_SIZE
);
6831 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6832 * used to create the sg_dma_buf_pool must be calculated.
6834 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) {
6835 /* Both cfg_enable_bg and cfg_external_dif code paths */
6838 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6839 * the FCP rsp, and a SGE. Sice we have no control
6840 * over how many protection segments the SCSI Layer
6841 * will hand us (ie: there could be one for every block
6842 * in the IO), just allocate enough SGEs to accomidate
6843 * our max amount and we need to limit lpfc_sg_seg_cnt
6844 * to minimize the risk of running out.
6846 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
6847 sizeof(struct fcp_rsp
) + max_buf_size
;
6849 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6850 phba
->cfg_total_seg_cnt
= LPFC_MAX_SGL_SEG_CNT
;
6853 * If supporting DIF, reduce the seg count for scsi to
6854 * allow room for the DIF sges.
6856 if (phba
->cfg_enable_bg
&&
6857 phba
->cfg_sg_seg_cnt
> LPFC_MAX_BG_SLI4_SEG_CNT_DIF
)
6858 phba
->cfg_scsi_seg_cnt
= LPFC_MAX_BG_SLI4_SEG_CNT_DIF
;
6860 phba
->cfg_scsi_seg_cnt
= phba
->cfg_sg_seg_cnt
;
6864 * The scsi_buf for a regular I/O holds the FCP cmnd,
6865 * the FCP rsp, a SGE for each, and a SGE for up to
6866 * cfg_sg_seg_cnt data segments.
6868 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
6869 sizeof(struct fcp_rsp
) +
6870 ((phba
->cfg_sg_seg_cnt
+ extra
) *
6871 sizeof(struct sli4_sge
));
6873 /* Total SGEs for scsi_sg_list */
6874 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ extra
;
6875 phba
->cfg_scsi_seg_cnt
= phba
->cfg_sg_seg_cnt
;
6878 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6879 * need to post 1 page for the SGL.
6883 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
6884 phba
->cfg_sg_dma_buf_size
= LPFC_DEFAULT_XPSGL_SIZE
;
6885 else if (phba
->cfg_sg_dma_buf_size
<= LPFC_MIN_SG_SLI4_BUF_SZ
)
6886 phba
->cfg_sg_dma_buf_size
= LPFC_MIN_SG_SLI4_BUF_SZ
;
6888 phba
->cfg_sg_dma_buf_size
=
6889 SLI4_PAGE_ALIGN(phba
->cfg_sg_dma_buf_size
);
6891 phba
->border_sge_num
= phba
->cfg_sg_dma_buf_size
/
6892 sizeof(struct sli4_sge
);
6894 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6895 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
6896 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_NVME_SEG_CNT
) {
6897 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
6898 "6300 Reducing NVME sg segment "
6900 LPFC_MAX_NVME_SEG_CNT
);
6901 phba
->cfg_nvme_seg_cnt
= LPFC_MAX_NVME_SEG_CNT
;
6903 phba
->cfg_nvme_seg_cnt
= phba
->cfg_sg_seg_cnt
;
6906 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
6907 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6908 "total:%d scsi:%d nvme:%d\n",
6909 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
6910 phba
->cfg_total_seg_cnt
, phba
->cfg_scsi_seg_cnt
,
6911 phba
->cfg_nvme_seg_cnt
);
6913 if (phba
->cfg_sg_dma_buf_size
< SLI4_PAGE_SIZE
)
6914 i
= phba
->cfg_sg_dma_buf_size
;
6918 phba
->lpfc_sg_dma_buf_pool
=
6919 dma_pool_create("lpfc_sg_dma_buf_pool",
6921 phba
->cfg_sg_dma_buf_size
,
6923 if (!phba
->lpfc_sg_dma_buf_pool
)
6924 goto out_free_bsmbx
;
6926 phba
->lpfc_cmd_rsp_buf_pool
=
6927 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6929 sizeof(struct fcp_cmnd
) +
6930 sizeof(struct fcp_rsp
),
6932 if (!phba
->lpfc_cmd_rsp_buf_pool
)
6933 goto out_free_sg_dma_buf
;
6935 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6937 /* Verify OAS is supported */
6938 lpfc_sli4_oas_verify(phba
);
6940 /* Verify RAS support on adapter */
6941 lpfc_sli4_ras_init(phba
);
6943 /* Verify all the SLI4 queues */
6944 rc
= lpfc_sli4_queue_verify(phba
);
6946 goto out_free_cmd_rsp_buf
;
6948 /* Create driver internal CQE event pool */
6949 rc
= lpfc_sli4_cq_event_pool_create(phba
);
6951 goto out_free_cmd_rsp_buf
;
6953 /* Initialize sgl lists per host */
6954 lpfc_init_sgl_list(phba
);
6956 /* Allocate and initialize active sgl array */
6957 rc
= lpfc_init_active_sgl_array(phba
);
6959 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6960 "1430 Failed to initialize sgl list.\n");
6961 goto out_destroy_cq_event_pool
;
6963 rc
= lpfc_sli4_init_rpi_hdrs(phba
);
6965 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6966 "1432 Failed to initialize rpi headers.\n");
6967 goto out_free_active_sgl
;
6970 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6971 longs
= (LPFC_SLI4_FCF_TBL_INDX_MAX
+ BITS_PER_LONG
- 1)/BITS_PER_LONG
;
6972 phba
->fcf
.fcf_rr_bmask
= kcalloc(longs
, sizeof(unsigned long),
6974 if (!phba
->fcf
.fcf_rr_bmask
) {
6975 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6976 "2759 Failed allocate memory for FCF round "
6977 "robin failover bmask\n");
6979 goto out_remove_rpi_hdrs
;
6982 phba
->sli4_hba
.hba_eq_hdl
= kcalloc(phba
->cfg_irq_chann
,
6983 sizeof(struct lpfc_hba_eq_hdl
),
6985 if (!phba
->sli4_hba
.hba_eq_hdl
) {
6986 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6987 "2572 Failed allocate memory for "
6988 "fast-path per-EQ handle array\n");
6990 goto out_free_fcf_rr_bmask
;
6993 phba
->sli4_hba
.cpu_map
= kcalloc(phba
->sli4_hba
.num_possible_cpu
,
6994 sizeof(struct lpfc_vector_map_info
),
6996 if (!phba
->sli4_hba
.cpu_map
) {
6997 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6998 "3327 Failed allocate memory for msi-x "
6999 "interrupt vector mapping\n");
7001 goto out_free_hba_eq_hdl
;
7004 phba
->sli4_hba
.eq_info
= alloc_percpu(struct lpfc_eq_intr_info
);
7005 if (!phba
->sli4_hba
.eq_info
) {
7006 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7007 "3321 Failed allocation for per_cpu stats\n");
7009 goto out_free_hba_cpu_map
;
7012 phba
->sli4_hba
.idle_stat
= kcalloc(phba
->sli4_hba
.num_possible_cpu
,
7013 sizeof(*phba
->sli4_hba
.idle_stat
),
7015 if (!phba
->sli4_hba
.idle_stat
) {
7016 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7017 "3390 Failed allocation for idle_stat\n");
7019 goto out_free_hba_eq_info
;
7022 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7023 phba
->sli4_hba
.c_stat
= alloc_percpu(struct lpfc_hdwq_stat
);
7024 if (!phba
->sli4_hba
.c_stat
) {
7025 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7026 "3332 Failed allocating per cpu hdwq stats\n");
7028 goto out_free_hba_idle_stat
;
7033 * Enable sr-iov virtual functions if supported and configured
7034 * through the module parameter.
7036 if (phba
->cfg_sriov_nr_virtfn
> 0) {
7037 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
7038 phba
->cfg_sriov_nr_virtfn
);
7040 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
7041 "3020 Requested number of SR-IOV "
7042 "virtual functions (%d) is not "
7044 phba
->cfg_sriov_nr_virtfn
);
7045 phba
->cfg_sriov_nr_virtfn
= 0;
7051 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7052 out_free_hba_idle_stat
:
7053 kfree(phba
->sli4_hba
.idle_stat
);
7055 out_free_hba_eq_info
:
7056 free_percpu(phba
->sli4_hba
.eq_info
);
7057 out_free_hba_cpu_map
:
7058 kfree(phba
->sli4_hba
.cpu_map
);
7059 out_free_hba_eq_hdl
:
7060 kfree(phba
->sli4_hba
.hba_eq_hdl
);
7061 out_free_fcf_rr_bmask
:
7062 kfree(phba
->fcf
.fcf_rr_bmask
);
7063 out_remove_rpi_hdrs
:
7064 lpfc_sli4_remove_rpi_hdrs(phba
);
7065 out_free_active_sgl
:
7066 lpfc_free_active_sgl(phba
);
7067 out_destroy_cq_event_pool
:
7068 lpfc_sli4_cq_event_pool_destroy(phba
);
7069 out_free_cmd_rsp_buf
:
7070 dma_pool_destroy(phba
->lpfc_cmd_rsp_buf_pool
);
7071 phba
->lpfc_cmd_rsp_buf_pool
= NULL
;
7072 out_free_sg_dma_buf
:
7073 dma_pool_destroy(phba
->lpfc_sg_dma_buf_pool
);
7074 phba
->lpfc_sg_dma_buf_pool
= NULL
;
7076 lpfc_destroy_bootstrap_mbox(phba
);
7078 lpfc_mem_free(phba
);
7083 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
7084 * @phba: pointer to lpfc hba data structure.
7086 * This routine is invoked to unset the driver internal resources set up
7087 * specific for supporting the SLI-4 HBA device it attached to.
7090 lpfc_sli4_driver_resource_unset(struct lpfc_hba
*phba
)
7092 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
7094 free_percpu(phba
->sli4_hba
.eq_info
);
7095 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7096 free_percpu(phba
->sli4_hba
.c_stat
);
7098 kfree(phba
->sli4_hba
.idle_stat
);
7100 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
7101 kfree(phba
->sli4_hba
.cpu_map
);
7102 phba
->sli4_hba
.num_possible_cpu
= 0;
7103 phba
->sli4_hba
.num_present_cpu
= 0;
7104 phba
->sli4_hba
.curr_disp_cpu
= 0;
7105 cpumask_clear(&phba
->sli4_hba
.irq_aff_mask
);
7107 /* Free memory allocated for fast-path work queue handles */
7108 kfree(phba
->sli4_hba
.hba_eq_hdl
);
7110 /* Free the allocated rpi headers. */
7111 lpfc_sli4_remove_rpi_hdrs(phba
);
7112 lpfc_sli4_remove_rpis(phba
);
7114 /* Free eligible FCF index bmask */
7115 kfree(phba
->fcf
.fcf_rr_bmask
);
7117 /* Free the ELS sgl list */
7118 lpfc_free_active_sgl(phba
);
7119 lpfc_free_els_sgl_list(phba
);
7120 lpfc_free_nvmet_sgl_list(phba
);
7122 /* Free the completion queue EQ event pool */
7123 lpfc_sli4_cq_event_release_all(phba
);
7124 lpfc_sli4_cq_event_pool_destroy(phba
);
7126 /* Release resource identifiers. */
7127 lpfc_sli4_dealloc_resource_identifiers(phba
);
7129 /* Free the bsmbx region. */
7130 lpfc_destroy_bootstrap_mbox(phba
);
7132 /* Free the SLI Layer memory with SLI4 HBAs */
7133 lpfc_mem_free_all(phba
);
7135 /* Free the current connect table */
7136 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
7137 &phba
->fcf_conn_rec_list
, list
) {
7138 list_del_init(&conn_entry
->list
);
7146 * lpfc_init_api_table_setup - Set up init api function jump table
7147 * @phba: The hba struct for which this call is being executed.
7148 * @dev_grp: The HBA PCI-Device group number.
7150 * This routine sets up the device INIT interface API function jump table
7153 * Returns: 0 - success, -ENODEV - failure.
7156 lpfc_init_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
7158 phba
->lpfc_hba_init_link
= lpfc_hba_init_link
;
7159 phba
->lpfc_hba_down_link
= lpfc_hba_down_link
;
7160 phba
->lpfc_selective_reset
= lpfc_selective_reset
;
7162 case LPFC_PCI_DEV_LP
:
7163 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s3
;
7164 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s3
;
7165 phba
->lpfc_stop_port
= lpfc_stop_port_s3
;
7167 case LPFC_PCI_DEV_OC
:
7168 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s4
;
7169 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s4
;
7170 phba
->lpfc_stop_port
= lpfc_stop_port_s4
;
7173 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7174 "1431 Invalid HBA PCI-device group: 0x%x\n",
7182 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7183 * @phba: pointer to lpfc hba data structure.
7185 * This routine is invoked to set up the driver internal resources after the
7186 * device specific resource setup to support the HBA device it attached to.
7190 * other values - error
7193 lpfc_setup_driver_resource_phase2(struct lpfc_hba
*phba
)
7197 /* Startup the kernel thread for this host adapter. */
7198 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
7199 "lpfc_worker_%d", phba
->brd_no
);
7200 if (IS_ERR(phba
->worker_thread
)) {
7201 error
= PTR_ERR(phba
->worker_thread
);
7209 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7210 * @phba: pointer to lpfc hba data structure.
7212 * This routine is invoked to unset the driver internal resources set up after
7213 * the device specific resource setup for supporting the HBA device it
7217 lpfc_unset_driver_resource_phase2(struct lpfc_hba
*phba
)
7220 flush_workqueue(phba
->wq
);
7221 destroy_workqueue(phba
->wq
);
7225 /* Stop kernel worker thread */
7226 if (phba
->worker_thread
)
7227 kthread_stop(phba
->worker_thread
);
7231 * lpfc_free_iocb_list - Free iocb list.
7232 * @phba: pointer to lpfc hba data structure.
7234 * This routine is invoked to free the driver's IOCB list and memory.
7237 lpfc_free_iocb_list(struct lpfc_hba
*phba
)
7239 struct lpfc_iocbq
*iocbq_entry
= NULL
, *iocbq_next
= NULL
;
7241 spin_lock_irq(&phba
->hbalock
);
7242 list_for_each_entry_safe(iocbq_entry
, iocbq_next
,
7243 &phba
->lpfc_iocb_list
, list
) {
7244 list_del(&iocbq_entry
->list
);
7246 phba
->total_iocbq_bufs
--;
7248 spin_unlock_irq(&phba
->hbalock
);
7254 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7255 * @phba: pointer to lpfc hba data structure.
7256 * @iocb_count: number of requested iocbs
7258 * This routine is invoked to allocate and initizlize the driver's IOCB
7259 * list and set up the IOCB tag array accordingly.
7263 * other values - error
7266 lpfc_init_iocb_list(struct lpfc_hba
*phba
, int iocb_count
)
7268 struct lpfc_iocbq
*iocbq_entry
= NULL
;
7272 /* Initialize and populate the iocb list per host. */
7273 INIT_LIST_HEAD(&phba
->lpfc_iocb_list
);
7274 for (i
= 0; i
< iocb_count
; i
++) {
7275 iocbq_entry
= kzalloc(sizeof(struct lpfc_iocbq
), GFP_KERNEL
);
7276 if (iocbq_entry
== NULL
) {
7277 printk(KERN_ERR
"%s: only allocated %d iocbs of "
7278 "expected %d count. Unloading driver.\n",
7279 __func__
, i
, iocb_count
);
7280 goto out_free_iocbq
;
7283 iotag
= lpfc_sli_next_iotag(phba
, iocbq_entry
);
7286 printk(KERN_ERR
"%s: failed to allocate IOTAG. "
7287 "Unloading driver.\n", __func__
);
7288 goto out_free_iocbq
;
7290 iocbq_entry
->sli4_lxritag
= NO_XRI
;
7291 iocbq_entry
->sli4_xritag
= NO_XRI
;
7293 spin_lock_irq(&phba
->hbalock
);
7294 list_add(&iocbq_entry
->list
, &phba
->lpfc_iocb_list
);
7295 phba
->total_iocbq_bufs
++;
7296 spin_unlock_irq(&phba
->hbalock
);
7302 lpfc_free_iocb_list(phba
);
7308 * lpfc_free_sgl_list - Free a given sgl list.
7309 * @phba: pointer to lpfc hba data structure.
7310 * @sglq_list: pointer to the head of sgl list.
7312 * This routine is invoked to free a give sgl list and memory.
7315 lpfc_free_sgl_list(struct lpfc_hba
*phba
, struct list_head
*sglq_list
)
7317 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
7319 list_for_each_entry_safe(sglq_entry
, sglq_next
, sglq_list
, list
) {
7320 list_del(&sglq_entry
->list
);
7321 lpfc_mbuf_free(phba
, sglq_entry
->virt
, sglq_entry
->phys
);
7327 * lpfc_free_els_sgl_list - Free els sgl list.
7328 * @phba: pointer to lpfc hba data structure.
7330 * This routine is invoked to free the driver's els sgl list and memory.
7333 lpfc_free_els_sgl_list(struct lpfc_hba
*phba
)
7335 LIST_HEAD(sglq_list
);
7337 /* Retrieve all els sgls from driver list */
7338 spin_lock_irq(&phba
->hbalock
);
7339 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
7340 list_splice_init(&phba
->sli4_hba
.lpfc_els_sgl_list
, &sglq_list
);
7341 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
7342 spin_unlock_irq(&phba
->hbalock
);
7344 /* Now free the sgl list */
7345 lpfc_free_sgl_list(phba
, &sglq_list
);
7349 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7350 * @phba: pointer to lpfc hba data structure.
7352 * This routine is invoked to free the driver's nvmet sgl list and memory.
7355 lpfc_free_nvmet_sgl_list(struct lpfc_hba
*phba
)
7357 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
7358 LIST_HEAD(sglq_list
);
7360 /* Retrieve all nvmet sgls from driver list */
7361 spin_lock_irq(&phba
->hbalock
);
7362 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
7363 list_splice_init(&phba
->sli4_hba
.lpfc_nvmet_sgl_list
, &sglq_list
);
7364 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
7365 spin_unlock_irq(&phba
->hbalock
);
7367 /* Now free the sgl list */
7368 list_for_each_entry_safe(sglq_entry
, sglq_next
, &sglq_list
, list
) {
7369 list_del(&sglq_entry
->list
);
7370 lpfc_nvmet_buf_free(phba
, sglq_entry
->virt
, sglq_entry
->phys
);
7374 /* Update the nvmet_xri_cnt to reflect no current sgls.
7375 * The next initialization cycle sets the count and allocates
7376 * the sgls over again.
7378 phba
->sli4_hba
.nvmet_xri_cnt
= 0;
7382 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7383 * @phba: pointer to lpfc hba data structure.
7385 * This routine is invoked to allocate the driver's active sgl memory.
7386 * This array will hold the sglq_entry's for active IOs.
7389 lpfc_init_active_sgl_array(struct lpfc_hba
*phba
)
7392 size
= sizeof(struct lpfc_sglq
*);
7393 size
*= phba
->sli4_hba
.max_cfg_param
.max_xri
;
7395 phba
->sli4_hba
.lpfc_sglq_active_list
=
7396 kzalloc(size
, GFP_KERNEL
);
7397 if (!phba
->sli4_hba
.lpfc_sglq_active_list
)
7403 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7404 * @phba: pointer to lpfc hba data structure.
7406 * This routine is invoked to walk through the array of active sglq entries
7407 * and free all of the resources.
7408 * This is just a place holder for now.
7411 lpfc_free_active_sgl(struct lpfc_hba
*phba
)
7413 kfree(phba
->sli4_hba
.lpfc_sglq_active_list
);
7417 * lpfc_init_sgl_list - Allocate and initialize sgl list.
7418 * @phba: pointer to lpfc hba data structure.
7420 * This routine is invoked to allocate and initizlize the driver's sgl
7421 * list and set up the sgl xritag tag array accordingly.
7425 lpfc_init_sgl_list(struct lpfc_hba
*phba
)
7427 /* Initialize and populate the sglq list per host/VF. */
7428 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_els_sgl_list
);
7429 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
7430 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
7431 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
7433 /* els xri-sgl book keeping */
7434 phba
->sli4_hba
.els_xri_cnt
= 0;
7436 /* nvme xri-buffer book keeping */
7437 phba
->sli4_hba
.io_xri_cnt
= 0;
7441 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7442 * @phba: pointer to lpfc hba data structure.
7444 * This routine is invoked to post rpi header templates to the
7445 * port for those SLI4 ports that do not support extents. This routine
7446 * posts a PAGE_SIZE memory region to the port to hold up to
7447 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7448 * and should be called only when interrupts are disabled.
7452 * -ERROR - otherwise.
7455 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba
*phba
)
7458 struct lpfc_rpi_hdr
*rpi_hdr
;
7460 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_hdr_list
);
7461 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
7463 if (phba
->sli4_hba
.extents_in_use
)
7466 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
7468 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7469 "0391 Error during rpi post operation\n");
7470 lpfc_sli4_remove_rpis(phba
);
7478 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7479 * @phba: pointer to lpfc hba data structure.
7481 * This routine is invoked to allocate a single 4KB memory region to
7482 * support rpis and stores them in the phba. This single region
7483 * provides support for up to 64 rpis. The region is used globally
7487 * A valid rpi hdr on success.
7488 * A NULL pointer on any failure.
7490 struct lpfc_rpi_hdr
*
7491 lpfc_sli4_create_rpi_hdr(struct lpfc_hba
*phba
)
7493 uint16_t rpi_limit
, curr_rpi_range
;
7494 struct lpfc_dmabuf
*dmabuf
;
7495 struct lpfc_rpi_hdr
*rpi_hdr
;
7498 * If the SLI4 port supports extents, posting the rpi header isn't
7499 * required. Set the expected maximum count and let the actual value
7500 * get set when extents are fully allocated.
7502 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
7504 if (phba
->sli4_hba
.extents_in_use
)
7507 /* The limit on the logical index is just the max_rpi count. */
7508 rpi_limit
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
7510 spin_lock_irq(&phba
->hbalock
);
7512 * Establish the starting RPI in this header block. The starting
7513 * rpi is normalized to a zero base because the physical rpi is
7516 curr_rpi_range
= phba
->sli4_hba
.next_rpi
;
7517 spin_unlock_irq(&phba
->hbalock
);
7519 /* Reached full RPI range */
7520 if (curr_rpi_range
== rpi_limit
)
7524 * First allocate the protocol header region for the port. The
7525 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7527 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
7531 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
7532 LPFC_HDR_TEMPLATE_SIZE
,
7533 &dmabuf
->phys
, GFP_KERNEL
);
7534 if (!dmabuf
->virt
) {
7536 goto err_free_dmabuf
;
7539 if (!IS_ALIGNED(dmabuf
->phys
, LPFC_HDR_TEMPLATE_SIZE
)) {
7541 goto err_free_coherent
;
7544 /* Save the rpi header data for cleanup later. */
7545 rpi_hdr
= kzalloc(sizeof(struct lpfc_rpi_hdr
), GFP_KERNEL
);
7547 goto err_free_coherent
;
7549 rpi_hdr
->dmabuf
= dmabuf
;
7550 rpi_hdr
->len
= LPFC_HDR_TEMPLATE_SIZE
;
7551 rpi_hdr
->page_count
= 1;
7552 spin_lock_irq(&phba
->hbalock
);
7554 /* The rpi_hdr stores the logical index only. */
7555 rpi_hdr
->start_rpi
= curr_rpi_range
;
7556 rpi_hdr
->next_rpi
= phba
->sli4_hba
.next_rpi
+ LPFC_RPI_HDR_COUNT
;
7557 list_add_tail(&rpi_hdr
->list
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
);
7559 spin_unlock_irq(&phba
->hbalock
);
7563 dma_free_coherent(&phba
->pcidev
->dev
, LPFC_HDR_TEMPLATE_SIZE
,
7564 dmabuf
->virt
, dmabuf
->phys
);
7571 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7572 * @phba: pointer to lpfc hba data structure.
7574 * This routine is invoked to remove all memory resources allocated
7575 * to support rpis for SLI4 ports not supporting extents. This routine
7576 * presumes the caller has released all rpis consumed by fabric or port
7577 * logins and is prepared to have the header pages removed.
7580 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba
*phba
)
7582 struct lpfc_rpi_hdr
*rpi_hdr
, *next_rpi_hdr
;
7584 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
7587 list_for_each_entry_safe(rpi_hdr
, next_rpi_hdr
,
7588 &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
7589 list_del(&rpi_hdr
->list
);
7590 dma_free_coherent(&phba
->pcidev
->dev
, rpi_hdr
->len
,
7591 rpi_hdr
->dmabuf
->virt
, rpi_hdr
->dmabuf
->phys
);
7592 kfree(rpi_hdr
->dmabuf
);
7596 /* There are no rpis available to the port now. */
7597 phba
->sli4_hba
.next_rpi
= 0;
7601 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7602 * @pdev: pointer to pci device data structure.
7604 * This routine is invoked to allocate the driver hba data structure for an
7605 * HBA device. If the allocation is successful, the phba reference to the
7606 * PCI device data structure is set.
7609 * pointer to @phba - successful
7612 static struct lpfc_hba
*
7613 lpfc_hba_alloc(struct pci_dev
*pdev
)
7615 struct lpfc_hba
*phba
;
7617 /* Allocate memory for HBA structure */
7618 phba
= kzalloc(sizeof(struct lpfc_hba
), GFP_KERNEL
);
7620 dev_err(&pdev
->dev
, "failed to allocate hba struct\n");
7624 /* Set reference to PCI device in HBA structure */
7625 phba
->pcidev
= pdev
;
7627 /* Assign an unused board number */
7628 phba
->brd_no
= lpfc_get_instance();
7629 if (phba
->brd_no
< 0) {
7633 phba
->eratt_poll_interval
= LPFC_ERATT_POLL_INTERVAL
;
7635 spin_lock_init(&phba
->ct_ev_lock
);
7636 INIT_LIST_HEAD(&phba
->ct_ev_waiters
);
7642 * lpfc_hba_free - Free driver hba data structure with a device.
7643 * @phba: pointer to lpfc hba data structure.
7645 * This routine is invoked to free the driver hba data structure with an
7649 lpfc_hba_free(struct lpfc_hba
*phba
)
7651 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7652 kfree(phba
->sli4_hba
.hdwq
);
7654 /* Release the driver assigned board number */
7655 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
7657 /* Free memory allocated with sli3 rings */
7658 kfree(phba
->sli
.sli3_ring
);
7659 phba
->sli
.sli3_ring
= NULL
;
7666 * lpfc_create_shost - Create hba physical port with associated scsi host.
7667 * @phba: pointer to lpfc hba data structure.
7669 * This routine is invoked to create HBA physical port and associate a SCSI
7674 * other values - error
7677 lpfc_create_shost(struct lpfc_hba
*phba
)
7679 struct lpfc_vport
*vport
;
7680 struct Scsi_Host
*shost
;
7682 /* Initialize HBA FC structure */
7683 phba
->fc_edtov
= FF_DEF_EDTOV
;
7684 phba
->fc_ratov
= FF_DEF_RATOV
;
7685 phba
->fc_altov
= FF_DEF_ALTOV
;
7686 phba
->fc_arbtov
= FF_DEF_ARBTOV
;
7688 atomic_set(&phba
->sdev_cnt
, 0);
7689 vport
= lpfc_create_port(phba
, phba
->brd_no
, &phba
->pcidev
->dev
);
7693 shost
= lpfc_shost_from_vport(vport
);
7694 phba
->pport
= vport
;
7696 if (phba
->nvmet_support
) {
7697 /* Only 1 vport (pport) will support NVME target */
7698 phba
->targetport
= NULL
;
7699 phba
->cfg_enable_fc4_type
= LPFC_ENABLE_NVME
;
7700 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_NVME_DISC
,
7701 "6076 NVME Target Found\n");
7704 lpfc_debugfs_initialize(vport
);
7705 /* Put reference to SCSI host to driver's device private data */
7706 pci_set_drvdata(phba
->pcidev
, shost
);
7709 * At this point we are fully registered with PSA. In addition,
7710 * any initial discovery should be completed.
7712 vport
->load_flag
|= FC_ALLOW_FDMI
;
7713 if (phba
->cfg_enable_SmartSAN
||
7714 (phba
->cfg_fdmi_on
== LPFC_FDMI_SUPPORT
)) {
7716 /* Setup appropriate attribute masks */
7717 vport
->fdmi_hba_mask
= LPFC_FDMI2_HBA_ATTR
;
7718 if (phba
->cfg_enable_SmartSAN
)
7719 vport
->fdmi_port_mask
= LPFC_FDMI2_SMART_ATTR
;
7721 vport
->fdmi_port_mask
= LPFC_FDMI2_PORT_ATTR
;
7727 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7728 * @phba: pointer to lpfc hba data structure.
7730 * This routine is invoked to destroy HBA physical port and the associated
7734 lpfc_destroy_shost(struct lpfc_hba
*phba
)
7736 struct lpfc_vport
*vport
= phba
->pport
;
7738 /* Destroy physical port that associated with the SCSI host */
7739 destroy_port(vport
);
7745 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7746 * @phba: pointer to lpfc hba data structure.
7747 * @shost: the shost to be used to detect Block guard settings.
7749 * This routine sets up the local Block guard protocol settings for @shost.
7750 * This routine also allocates memory for debugging bg buffers.
7753 lpfc_setup_bg(struct lpfc_hba
*phba
, struct Scsi_Host
*shost
)
7758 if (phba
->cfg_prot_mask
&& phba
->cfg_prot_guard
) {
7759 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7760 "1478 Registering BlockGuard with the "
7763 old_mask
= phba
->cfg_prot_mask
;
7764 old_guard
= phba
->cfg_prot_guard
;
7766 /* Only allow supported values */
7767 phba
->cfg_prot_mask
&= (SHOST_DIF_TYPE1_PROTECTION
|
7768 SHOST_DIX_TYPE0_PROTECTION
|
7769 SHOST_DIX_TYPE1_PROTECTION
);
7770 phba
->cfg_prot_guard
&= (SHOST_DIX_GUARD_IP
|
7771 SHOST_DIX_GUARD_CRC
);
7773 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7774 if (phba
->cfg_prot_mask
== SHOST_DIX_TYPE1_PROTECTION
)
7775 phba
->cfg_prot_mask
|= SHOST_DIF_TYPE1_PROTECTION
;
7777 if (phba
->cfg_prot_mask
&& phba
->cfg_prot_guard
) {
7778 if ((old_mask
!= phba
->cfg_prot_mask
) ||
7779 (old_guard
!= phba
->cfg_prot_guard
))
7780 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7781 "1475 Registering BlockGuard with the "
7782 "SCSI layer: mask %d guard %d\n",
7783 phba
->cfg_prot_mask
,
7784 phba
->cfg_prot_guard
);
7786 scsi_host_set_prot(shost
, phba
->cfg_prot_mask
);
7787 scsi_host_set_guard(shost
, phba
->cfg_prot_guard
);
7789 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7790 "1479 Not Registering BlockGuard with the SCSI "
7791 "layer, Bad protection parameters: %d %d\n",
7792 old_mask
, old_guard
);
7797 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7798 * @phba: pointer to lpfc hba data structure.
7800 * This routine is invoked to perform all the necessary post initialization
7801 * setup for the device.
7804 lpfc_post_init_setup(struct lpfc_hba
*phba
)
7806 struct Scsi_Host
*shost
;
7807 struct lpfc_adapter_event_header adapter_event
;
7809 /* Get the default values for Model Name and Description */
7810 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
7813 * hba setup may have changed the hba_queue_depth so we need to
7814 * adjust the value of can_queue.
7816 shost
= pci_get_drvdata(phba
->pcidev
);
7817 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
7819 lpfc_host_attrib_init(shost
);
7821 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
7822 spin_lock_irq(shost
->host_lock
);
7823 lpfc_poll_start_timer(phba
);
7824 spin_unlock_irq(shost
->host_lock
);
7827 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7828 "0428 Perform SCSI scan\n");
7829 /* Send board arrival event to upper layer */
7830 adapter_event
.event_type
= FC_REG_ADAPTER_EVENT
;
7831 adapter_event
.subcategory
= LPFC_EVENT_ARRIVAL
;
7832 fc_host_post_vendor_event(shost
, fc_get_event_number(),
7833 sizeof(adapter_event
),
7834 (char *) &adapter_event
,
7840 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7841 * @phba: pointer to lpfc hba data structure.
7843 * This routine is invoked to set up the PCI device memory space for device
7844 * with SLI-3 interface spec.
7848 * other values - error
7851 lpfc_sli_pci_mem_setup(struct lpfc_hba
*phba
)
7853 struct pci_dev
*pdev
= phba
->pcidev
;
7854 unsigned long bar0map_len
, bar2map_len
;
7862 /* Set the device DMA mask size */
7863 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
7865 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
7870 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7871 * required by each mapping.
7873 phba
->pci_bar0_map
= pci_resource_start(pdev
, 0);
7874 bar0map_len
= pci_resource_len(pdev
, 0);
7876 phba
->pci_bar2_map
= pci_resource_start(pdev
, 2);
7877 bar2map_len
= pci_resource_len(pdev
, 2);
7879 /* Map HBA SLIM to a kernel virtual address. */
7880 phba
->slim_memmap_p
= ioremap(phba
->pci_bar0_map
, bar0map_len
);
7881 if (!phba
->slim_memmap_p
) {
7882 dev_printk(KERN_ERR
, &pdev
->dev
,
7883 "ioremap failed for SLIM memory.\n");
7887 /* Map HBA Control Registers to a kernel virtual address. */
7888 phba
->ctrl_regs_memmap_p
= ioremap(phba
->pci_bar2_map
, bar2map_len
);
7889 if (!phba
->ctrl_regs_memmap_p
) {
7890 dev_printk(KERN_ERR
, &pdev
->dev
,
7891 "ioremap failed for HBA control registers.\n");
7892 goto out_iounmap_slim
;
7895 /* Allocate memory for SLI-2 structures */
7896 phba
->slim2p
.virt
= dma_alloc_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
7897 &phba
->slim2p
.phys
, GFP_KERNEL
);
7898 if (!phba
->slim2p
.virt
)
7901 phba
->mbox
= phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, mbx
);
7902 phba
->mbox_ext
= (phba
->slim2p
.virt
+
7903 offsetof(struct lpfc_sli2_slim
, mbx_ext_words
));
7904 phba
->pcb
= (phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, pcb
));
7905 phba
->IOCBs
= (phba
->slim2p
.virt
+
7906 offsetof(struct lpfc_sli2_slim
, IOCBs
));
7908 phba
->hbqslimp
.virt
= dma_alloc_coherent(&pdev
->dev
,
7909 lpfc_sli_hbq_size(),
7910 &phba
->hbqslimp
.phys
,
7912 if (!phba
->hbqslimp
.virt
)
7915 hbq_count
= lpfc_sli_hbq_count();
7916 ptr
= phba
->hbqslimp
.virt
;
7917 for (i
= 0; i
< hbq_count
; ++i
) {
7918 phba
->hbqs
[i
].hbq_virt
= ptr
;
7919 INIT_LIST_HEAD(&phba
->hbqs
[i
].hbq_buffer_list
);
7920 ptr
+= (lpfc_hbq_defs
[i
]->entry_count
*
7921 sizeof(struct lpfc_hbq_entry
));
7923 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_els_hbq_alloc
;
7924 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_els_hbq_free
;
7926 memset(phba
->hbqslimp
.virt
, 0, lpfc_sli_hbq_size());
7928 phba
->MBslimaddr
= phba
->slim_memmap_p
;
7929 phba
->HAregaddr
= phba
->ctrl_regs_memmap_p
+ HA_REG_OFFSET
;
7930 phba
->CAregaddr
= phba
->ctrl_regs_memmap_p
+ CA_REG_OFFSET
;
7931 phba
->HSregaddr
= phba
->ctrl_regs_memmap_p
+ HS_REG_OFFSET
;
7932 phba
->HCregaddr
= phba
->ctrl_regs_memmap_p
+ HC_REG_OFFSET
;
7937 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
7938 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
7940 iounmap(phba
->ctrl_regs_memmap_p
);
7942 iounmap(phba
->slim_memmap_p
);
7948 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7949 * @phba: pointer to lpfc hba data structure.
7951 * This routine is invoked to unset the PCI device memory space for device
7952 * with SLI-3 interface spec.
7955 lpfc_sli_pci_mem_unset(struct lpfc_hba
*phba
)
7957 struct pci_dev
*pdev
;
7959 /* Obtain PCI device reference */
7963 pdev
= phba
->pcidev
;
7965 /* Free coherent DMA memory allocated */
7966 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
7967 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
7968 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
7969 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
7971 /* I/O memory unmap */
7972 iounmap(phba
->ctrl_regs_memmap_p
);
7973 iounmap(phba
->slim_memmap_p
);
7979 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7980 * @phba: pointer to lpfc hba data structure.
7982 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7983 * done and check status.
7985 * Return 0 if successful, otherwise -ENODEV.
7988 lpfc_sli4_post_status_check(struct lpfc_hba
*phba
)
7990 struct lpfc_register portsmphr_reg
, uerrlo_reg
, uerrhi_reg
;
7991 struct lpfc_register reg_data
;
7992 int i
, port_error
= 0;
7995 memset(&portsmphr_reg
, 0, sizeof(portsmphr_reg
));
7996 memset(®_data
, 0, sizeof(reg_data
));
7997 if (!phba
->sli4_hba
.PSMPHRregaddr
)
8000 /* Wait up to 30 seconds for the SLI Port POST done and ready */
8001 for (i
= 0; i
< 3000; i
++) {
8002 if (lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
8003 &portsmphr_reg
.word0
) ||
8004 (bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
))) {
8005 /* Port has a fatal POST error, break out */
8006 port_error
= -ENODEV
;
8009 if (LPFC_POST_STAGE_PORT_READY
==
8010 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
))
8016 * If there was a port error during POST, then don't proceed with
8017 * other register reads as the data may not be valid. Just exit.
8020 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8021 "1408 Port Failed POST - portsmphr=0x%x, "
8022 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8023 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8024 portsmphr_reg
.word0
,
8025 bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
),
8026 bf_get(lpfc_port_smphr_sfi
, &portsmphr_reg
),
8027 bf_get(lpfc_port_smphr_nip
, &portsmphr_reg
),
8028 bf_get(lpfc_port_smphr_ipc
, &portsmphr_reg
),
8029 bf_get(lpfc_port_smphr_scr1
, &portsmphr_reg
),
8030 bf_get(lpfc_port_smphr_scr2
, &portsmphr_reg
),
8031 bf_get(lpfc_port_smphr_host_scratch
, &portsmphr_reg
),
8032 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
));
8034 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8035 "2534 Device Info: SLIFamily=0x%x, "
8036 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8037 "SLIHint_2=0x%x, FT=0x%x\n",
8038 bf_get(lpfc_sli_intf_sli_family
,
8039 &phba
->sli4_hba
.sli_intf
),
8040 bf_get(lpfc_sli_intf_slirev
,
8041 &phba
->sli4_hba
.sli_intf
),
8042 bf_get(lpfc_sli_intf_if_type
,
8043 &phba
->sli4_hba
.sli_intf
),
8044 bf_get(lpfc_sli_intf_sli_hint1
,
8045 &phba
->sli4_hba
.sli_intf
),
8046 bf_get(lpfc_sli_intf_sli_hint2
,
8047 &phba
->sli4_hba
.sli_intf
),
8048 bf_get(lpfc_sli_intf_func_type
,
8049 &phba
->sli4_hba
.sli_intf
));
8051 * Check for other Port errors during the initialization
8052 * process. Fail the load if the port did not come up
8055 if_type
= bf_get(lpfc_sli_intf_if_type
,
8056 &phba
->sli4_hba
.sli_intf
);
8058 case LPFC_SLI_INTF_IF_TYPE_0
:
8059 phba
->sli4_hba
.ue_mask_lo
=
8060 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
);
8061 phba
->sli4_hba
.ue_mask_hi
=
8062 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
);
8064 readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
);
8066 readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
);
8067 if ((~phba
->sli4_hba
.ue_mask_lo
& uerrlo_reg
.word0
) ||
8068 (~phba
->sli4_hba
.ue_mask_hi
& uerrhi_reg
.word0
)) {
8069 lpfc_printf_log(phba
, KERN_ERR
,
8071 "1422 Unrecoverable Error "
8072 "Detected during POST "
8073 "uerr_lo_reg=0x%x, "
8074 "uerr_hi_reg=0x%x, "
8075 "ue_mask_lo_reg=0x%x, "
8076 "ue_mask_hi_reg=0x%x\n",
8079 phba
->sli4_hba
.ue_mask_lo
,
8080 phba
->sli4_hba
.ue_mask_hi
);
8081 port_error
= -ENODEV
;
8084 case LPFC_SLI_INTF_IF_TYPE_2
:
8085 case LPFC_SLI_INTF_IF_TYPE_6
:
8086 /* Final checks. The port status should be clean. */
8087 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
8089 (bf_get(lpfc_sliport_status_err
, ®_data
) &&
8090 !bf_get(lpfc_sliport_status_rn
, ®_data
))) {
8091 phba
->work_status
[0] =
8092 readl(phba
->sli4_hba
.u
.if_type2
.
8094 phba
->work_status
[1] =
8095 readl(phba
->sli4_hba
.u
.if_type2
.
8097 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8098 "2888 Unrecoverable port error "
8099 "following POST: port status reg "
8100 "0x%x, port_smphr reg 0x%x, "
8101 "error 1=0x%x, error 2=0x%x\n",
8103 portsmphr_reg
.word0
,
8104 phba
->work_status
[0],
8105 phba
->work_status
[1]);
8106 port_error
= -ENODEV
;
8109 case LPFC_SLI_INTF_IF_TYPE_1
:
8118 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
8119 * @phba: pointer to lpfc hba data structure.
8120 * @if_type: The SLI4 interface type getting configured.
8122 * This routine is invoked to set up SLI4 BAR0 PCI config space register
8126 lpfc_sli4_bar0_register_memmap(struct lpfc_hba
*phba
, uint32_t if_type
)
8129 case LPFC_SLI_INTF_IF_TYPE_0
:
8130 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
=
8131 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_LO
;
8132 phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
=
8133 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_HI
;
8134 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
=
8135 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_LO
;
8136 phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
=
8137 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_HI
;
8138 phba
->sli4_hba
.SLIINTFregaddr
=
8139 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
8141 case LPFC_SLI_INTF_IF_TYPE_2
:
8142 phba
->sli4_hba
.u
.if_type2
.EQDregaddr
=
8143 phba
->sli4_hba
.conf_regs_memmap_p
+
8144 LPFC_CTL_PORT_EQ_DELAY_OFFSET
;
8145 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
=
8146 phba
->sli4_hba
.conf_regs_memmap_p
+
8147 LPFC_CTL_PORT_ER1_OFFSET
;
8148 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
=
8149 phba
->sli4_hba
.conf_regs_memmap_p
+
8150 LPFC_CTL_PORT_ER2_OFFSET
;
8151 phba
->sli4_hba
.u
.if_type2
.CTRLregaddr
=
8152 phba
->sli4_hba
.conf_regs_memmap_p
+
8153 LPFC_CTL_PORT_CTL_OFFSET
;
8154 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
=
8155 phba
->sli4_hba
.conf_regs_memmap_p
+
8156 LPFC_CTL_PORT_STA_OFFSET
;
8157 phba
->sli4_hba
.SLIINTFregaddr
=
8158 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
8159 phba
->sli4_hba
.PSMPHRregaddr
=
8160 phba
->sli4_hba
.conf_regs_memmap_p
+
8161 LPFC_CTL_PORT_SEM_OFFSET
;
8162 phba
->sli4_hba
.RQDBregaddr
=
8163 phba
->sli4_hba
.conf_regs_memmap_p
+
8164 LPFC_ULP0_RQ_DOORBELL
;
8165 phba
->sli4_hba
.WQDBregaddr
=
8166 phba
->sli4_hba
.conf_regs_memmap_p
+
8167 LPFC_ULP0_WQ_DOORBELL
;
8168 phba
->sli4_hba
.CQDBregaddr
=
8169 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_EQCQ_DOORBELL
;
8170 phba
->sli4_hba
.EQDBregaddr
= phba
->sli4_hba
.CQDBregaddr
;
8171 phba
->sli4_hba
.MQDBregaddr
=
8172 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_MQ_DOORBELL
;
8173 phba
->sli4_hba
.BMBXregaddr
=
8174 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_BMBX
;
8176 case LPFC_SLI_INTF_IF_TYPE_6
:
8177 phba
->sli4_hba
.u
.if_type2
.EQDregaddr
=
8178 phba
->sli4_hba
.conf_regs_memmap_p
+
8179 LPFC_CTL_PORT_EQ_DELAY_OFFSET
;
8180 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
=
8181 phba
->sli4_hba
.conf_regs_memmap_p
+
8182 LPFC_CTL_PORT_ER1_OFFSET
;
8183 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
=
8184 phba
->sli4_hba
.conf_regs_memmap_p
+
8185 LPFC_CTL_PORT_ER2_OFFSET
;
8186 phba
->sli4_hba
.u
.if_type2
.CTRLregaddr
=
8187 phba
->sli4_hba
.conf_regs_memmap_p
+
8188 LPFC_CTL_PORT_CTL_OFFSET
;
8189 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
=
8190 phba
->sli4_hba
.conf_regs_memmap_p
+
8191 LPFC_CTL_PORT_STA_OFFSET
;
8192 phba
->sli4_hba
.PSMPHRregaddr
=
8193 phba
->sli4_hba
.conf_regs_memmap_p
+
8194 LPFC_CTL_PORT_SEM_OFFSET
;
8195 phba
->sli4_hba
.BMBXregaddr
=
8196 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_BMBX
;
8198 case LPFC_SLI_INTF_IF_TYPE_1
:
8200 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
8201 "FATAL - unsupported SLI4 interface type - %d\n",
8208 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8209 * @phba: pointer to lpfc hba data structure.
8210 * @if_type: sli if type to operate on.
8212 * This routine is invoked to set up SLI4 BAR1 register memory map.
8215 lpfc_sli4_bar1_register_memmap(struct lpfc_hba
*phba
, uint32_t if_type
)
8218 case LPFC_SLI_INTF_IF_TYPE_0
:
8219 phba
->sli4_hba
.PSMPHRregaddr
=
8220 phba
->sli4_hba
.ctrl_regs_memmap_p
+
8221 LPFC_SLIPORT_IF0_SMPHR
;
8222 phba
->sli4_hba
.ISRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
8224 phba
->sli4_hba
.IMRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
8226 phba
->sli4_hba
.ISCRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
8229 case LPFC_SLI_INTF_IF_TYPE_6
:
8230 phba
->sli4_hba
.RQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
8231 LPFC_IF6_RQ_DOORBELL
;
8232 phba
->sli4_hba
.WQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
8233 LPFC_IF6_WQ_DOORBELL
;
8234 phba
->sli4_hba
.CQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
8235 LPFC_IF6_CQ_DOORBELL
;
8236 phba
->sli4_hba
.EQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
8237 LPFC_IF6_EQ_DOORBELL
;
8238 phba
->sli4_hba
.MQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
8239 LPFC_IF6_MQ_DOORBELL
;
8241 case LPFC_SLI_INTF_IF_TYPE_2
:
8242 case LPFC_SLI_INTF_IF_TYPE_1
:
8244 dev_err(&phba
->pcidev
->dev
,
8245 "FATAL - unsupported SLI4 interface type - %d\n",
8252 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8253 * @phba: pointer to lpfc hba data structure.
8254 * @vf: virtual function number
8256 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8257 * based on the given viftual function number, @vf.
8259 * Return 0 if successful, otherwise -ENODEV.
8262 lpfc_sli4_bar2_register_memmap(struct lpfc_hba
*phba
, uint32_t vf
)
8264 if (vf
> LPFC_VIR_FUNC_MAX
)
8267 phba
->sli4_hba
.RQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
8268 vf
* LPFC_VFR_PAGE_SIZE
+
8269 LPFC_ULP0_RQ_DOORBELL
);
8270 phba
->sli4_hba
.WQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
8271 vf
* LPFC_VFR_PAGE_SIZE
+
8272 LPFC_ULP0_WQ_DOORBELL
);
8273 phba
->sli4_hba
.CQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
8274 vf
* LPFC_VFR_PAGE_SIZE
+
8275 LPFC_EQCQ_DOORBELL
);
8276 phba
->sli4_hba
.EQDBregaddr
= phba
->sli4_hba
.CQDBregaddr
;
8277 phba
->sli4_hba
.MQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
8278 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_MQ_DOORBELL
);
8279 phba
->sli4_hba
.BMBXregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
8280 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_BMBX
);
8285 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8286 * @phba: pointer to lpfc hba data structure.
8288 * This routine is invoked to create the bootstrap mailbox
8289 * region consistent with the SLI-4 interface spec. This
8290 * routine allocates all memory necessary to communicate
8291 * mailbox commands to the port and sets up all alignment
8292 * needs. No locks are expected to be held when calling
8297 * -ENOMEM - could not allocated memory.
8300 lpfc_create_bootstrap_mbox(struct lpfc_hba
*phba
)
8303 struct lpfc_dmabuf
*dmabuf
;
8304 struct dma_address
*dma_address
;
8308 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
8313 * The bootstrap mailbox region is comprised of 2 parts
8314 * plus an alignment restriction of 16 bytes.
8316 bmbx_size
= sizeof(struct lpfc_bmbx_create
) + (LPFC_ALIGN_16_BYTE
- 1);
8317 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
, bmbx_size
,
8318 &dmabuf
->phys
, GFP_KERNEL
);
8319 if (!dmabuf
->virt
) {
8325 * Initialize the bootstrap mailbox pointers now so that the register
8326 * operations are simple later. The mailbox dma address is required
8327 * to be 16-byte aligned. Also align the virtual memory as each
8328 * maibox is copied into the bmbx mailbox region before issuing the
8329 * command to the port.
8331 phba
->sli4_hba
.bmbx
.dmabuf
= dmabuf
;
8332 phba
->sli4_hba
.bmbx
.bmbx_size
= bmbx_size
;
8334 phba
->sli4_hba
.bmbx
.avirt
= PTR_ALIGN(dmabuf
->virt
,
8335 LPFC_ALIGN_16_BYTE
);
8336 phba
->sli4_hba
.bmbx
.aphys
= ALIGN(dmabuf
->phys
,
8337 LPFC_ALIGN_16_BYTE
);
8340 * Set the high and low physical addresses now. The SLI4 alignment
8341 * requirement is 16 bytes and the mailbox is posted to the port
8342 * as two 30-bit addresses. The other data is a bit marking whether
8343 * the 30-bit address is the high or low address.
8344 * Upcast bmbx aphys to 64bits so shift instruction compiles
8345 * clean on 32 bit machines.
8347 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
8348 phys_addr
= (uint64_t)phba
->sli4_hba
.bmbx
.aphys
;
8349 pa_addr
= (uint32_t) ((phys_addr
>> 34) & 0x3fffffff);
8350 dma_address
->addr_hi
= (uint32_t) ((pa_addr
<< 2) |
8351 LPFC_BMBX_BIT1_ADDR_HI
);
8353 pa_addr
= (uint32_t) ((phba
->sli4_hba
.bmbx
.aphys
>> 4) & 0x3fffffff);
8354 dma_address
->addr_lo
= (uint32_t) ((pa_addr
<< 2) |
8355 LPFC_BMBX_BIT1_ADDR_LO
);
8360 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8361 * @phba: pointer to lpfc hba data structure.
8363 * This routine is invoked to teardown the bootstrap mailbox
8364 * region and release all host resources. This routine requires
8365 * the caller to ensure all mailbox commands recovered, no
8366 * additional mailbox comands are sent, and interrupts are disabled
8367 * before calling this routine.
8371 lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*phba
)
8373 dma_free_coherent(&phba
->pcidev
->dev
,
8374 phba
->sli4_hba
.bmbx
.bmbx_size
,
8375 phba
->sli4_hba
.bmbx
.dmabuf
->virt
,
8376 phba
->sli4_hba
.bmbx
.dmabuf
->phys
);
8378 kfree(phba
->sli4_hba
.bmbx
.dmabuf
);
8379 memset(&phba
->sli4_hba
.bmbx
, 0, sizeof(struct lpfc_bmbx
));
8382 static const char * const lpfc_topo_to_str
[] = {
8392 #define LINK_FLAGS_DEF 0x0
8393 #define LINK_FLAGS_P2P 0x1
8394 #define LINK_FLAGS_LOOP 0x2
8396 * lpfc_map_topology - Map the topology read from READ_CONFIG
8397 * @phba: pointer to lpfc hba data structure.
8398 * @rd_config: pointer to read config data
8400 * This routine is invoked to map the topology values as read
8401 * from the read config mailbox command. If the persistent
8402 * topology feature is supported, the firmware will provide the
8403 * saved topology information to be used in INIT_LINK
8406 lpfc_map_topology(struct lpfc_hba
*phba
, struct lpfc_mbx_read_config
*rd_config
)
8410 ptv
= bf_get(lpfc_mbx_rd_conf_ptv
, rd_config
);
8411 tf
= bf_get(lpfc_mbx_rd_conf_tf
, rd_config
);
8412 pt
= bf_get(lpfc_mbx_rd_conf_pt
, rd_config
);
8414 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8415 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8418 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
8419 "2019 FW does not support persistent topology "
8420 "Using driver parameter defined value [%s]",
8421 lpfc_topo_to_str
[phba
->cfg_topology
]);
8424 /* FW supports persistent topology - override module parameter value */
8425 phba
->hba_flag
|= HBA_PERSISTENT_TOPO
;
8426 switch (phba
->pcidev
->device
) {
8427 case PCI_DEVICE_ID_LANCER_G7_FC
:
8428 case PCI_DEVICE_ID_LANCER_G6_FC
:
8430 phba
->cfg_topology
= ((pt
== LINK_FLAGS_LOOP
)
8431 ? FLAGS_TOPOLOGY_MODE_LOOP
8432 : FLAGS_TOPOLOGY_MODE_PT_PT
);
8434 phba
->hba_flag
&= ~HBA_PERSISTENT_TOPO
;
8439 /* If topology failover set - pt is '0' or '1' */
8440 phba
->cfg_topology
= (pt
? FLAGS_TOPOLOGY_MODE_PT_LOOP
:
8441 FLAGS_TOPOLOGY_MODE_LOOP_PT
);
8443 phba
->cfg_topology
= ((pt
== LINK_FLAGS_P2P
)
8444 ? FLAGS_TOPOLOGY_MODE_PT_PT
8445 : FLAGS_TOPOLOGY_MODE_LOOP
);
8449 if (phba
->hba_flag
& HBA_PERSISTENT_TOPO
) {
8450 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8451 "2020 Using persistent topology value [%s]",
8452 lpfc_topo_to_str
[phba
->cfg_topology
]);
8454 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
8455 "2021 Invalid topology values from FW "
8456 "Using driver parameter defined value [%s]",
8457 lpfc_topo_to_str
[phba
->cfg_topology
]);
8462 * lpfc_sli4_read_config - Get the config parameters.
8463 * @phba: pointer to lpfc hba data structure.
8465 * This routine is invoked to read the configuration parameters from the HBA.
8466 * The configuration parameters are used to set the base and maximum values
8467 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8468 * allocation for the port.
8472 * -ENOMEM - No available memory
8473 * -EIO - The mailbox failed to complete successfully.
8476 lpfc_sli4_read_config(struct lpfc_hba
*phba
)
8479 struct lpfc_mbx_read_config
*rd_config
;
8480 union lpfc_sli4_cfg_shdr
*shdr
;
8481 uint32_t shdr_status
, shdr_add_status
;
8482 struct lpfc_mbx_get_func_cfg
*get_func_cfg
;
8483 struct lpfc_rsrc_desc_fcfcoe
*desc
;
8485 uint16_t forced_link_speed
;
8486 uint32_t if_type
, qmin
;
8487 int length
, i
, rc
= 0, rc2
;
8489 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
8491 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8492 "2011 Unable to allocate memory for issuing "
8493 "SLI_CONFIG_SPECIAL mailbox command\n");
8497 lpfc_read_config(phba
, pmb
);
8499 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
8500 if (rc
!= MBX_SUCCESS
) {
8501 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8502 "2012 Mailbox failed , mbxCmd x%x "
8503 "READ_CONFIG, mbxStatus x%x\n",
8504 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
8505 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
8508 rd_config
= &pmb
->u
.mqe
.un
.rd_config
;
8509 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv
, rd_config
)) {
8510 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
8511 phba
->sli4_hba
.lnk_info
.lnk_tp
=
8512 bf_get(lpfc_mbx_rd_conf_lnk_type
, rd_config
);
8513 phba
->sli4_hba
.lnk_info
.lnk_no
=
8514 bf_get(lpfc_mbx_rd_conf_lnk_numb
, rd_config
);
8515 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8516 "3081 lnk_type:%d, lnk_numb:%d\n",
8517 phba
->sli4_hba
.lnk_info
.lnk_tp
,
8518 phba
->sli4_hba
.lnk_info
.lnk_no
);
8520 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
8521 "3082 Mailbox (x%x) returned ldv:x0\n",
8522 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
));
8523 if (bf_get(lpfc_mbx_rd_conf_bbscn_def
, rd_config
)) {
8524 phba
->bbcredit_support
= 1;
8525 phba
->sli4_hba
.bbscn_params
.word0
= rd_config
->word8
;
8528 phba
->sli4_hba
.conf_trunk
=
8529 bf_get(lpfc_mbx_rd_conf_trunk
, rd_config
);
8530 phba
->sli4_hba
.extents_in_use
=
8531 bf_get(lpfc_mbx_rd_conf_extnts_inuse
, rd_config
);
8532 phba
->sli4_hba
.max_cfg_param
.max_xri
=
8533 bf_get(lpfc_mbx_rd_conf_xri_count
, rd_config
);
8534 /* Reduce resource usage in kdump environment */
8535 if (is_kdump_kernel() &&
8536 phba
->sli4_hba
.max_cfg_param
.max_xri
> 512)
8537 phba
->sli4_hba
.max_cfg_param
.max_xri
= 512;
8538 phba
->sli4_hba
.max_cfg_param
.xri_base
=
8539 bf_get(lpfc_mbx_rd_conf_xri_base
, rd_config
);
8540 phba
->sli4_hba
.max_cfg_param
.max_vpi
=
8541 bf_get(lpfc_mbx_rd_conf_vpi_count
, rd_config
);
8542 /* Limit the max we support */
8543 if (phba
->sli4_hba
.max_cfg_param
.max_vpi
> LPFC_MAX_VPORTS
)
8544 phba
->sli4_hba
.max_cfg_param
.max_vpi
= LPFC_MAX_VPORTS
;
8545 phba
->sli4_hba
.max_cfg_param
.vpi_base
=
8546 bf_get(lpfc_mbx_rd_conf_vpi_base
, rd_config
);
8547 phba
->sli4_hba
.max_cfg_param
.max_rpi
=
8548 bf_get(lpfc_mbx_rd_conf_rpi_count
, rd_config
);
8549 phba
->sli4_hba
.max_cfg_param
.rpi_base
=
8550 bf_get(lpfc_mbx_rd_conf_rpi_base
, rd_config
);
8551 phba
->sli4_hba
.max_cfg_param
.max_vfi
=
8552 bf_get(lpfc_mbx_rd_conf_vfi_count
, rd_config
);
8553 phba
->sli4_hba
.max_cfg_param
.vfi_base
=
8554 bf_get(lpfc_mbx_rd_conf_vfi_base
, rd_config
);
8555 phba
->sli4_hba
.max_cfg_param
.max_fcfi
=
8556 bf_get(lpfc_mbx_rd_conf_fcfi_count
, rd_config
);
8557 phba
->sli4_hba
.max_cfg_param
.max_eq
=
8558 bf_get(lpfc_mbx_rd_conf_eq_count
, rd_config
);
8559 phba
->sli4_hba
.max_cfg_param
.max_rq
=
8560 bf_get(lpfc_mbx_rd_conf_rq_count
, rd_config
);
8561 phba
->sli4_hba
.max_cfg_param
.max_wq
=
8562 bf_get(lpfc_mbx_rd_conf_wq_count
, rd_config
);
8563 phba
->sli4_hba
.max_cfg_param
.max_cq
=
8564 bf_get(lpfc_mbx_rd_conf_cq_count
, rd_config
);
8565 phba
->lmt
= bf_get(lpfc_mbx_rd_conf_lmt
, rd_config
);
8566 phba
->sli4_hba
.next_xri
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
8567 phba
->vpi_base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
8568 phba
->vfi_base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
8569 phba
->max_vpi
= (phba
->sli4_hba
.max_cfg_param
.max_vpi
> 0) ?
8570 (phba
->sli4_hba
.max_cfg_param
.max_vpi
- 1) : 0;
8571 phba
->max_vports
= phba
->max_vpi
;
8572 lpfc_map_topology(phba
, rd_config
);
8573 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8574 "2003 cfg params Extents? %d "
8579 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
8580 phba
->sli4_hba
.extents_in_use
,
8581 phba
->sli4_hba
.max_cfg_param
.xri_base
,
8582 phba
->sli4_hba
.max_cfg_param
.max_xri
,
8583 phba
->sli4_hba
.max_cfg_param
.vpi_base
,
8584 phba
->sli4_hba
.max_cfg_param
.max_vpi
,
8585 phba
->sli4_hba
.max_cfg_param
.vfi_base
,
8586 phba
->sli4_hba
.max_cfg_param
.max_vfi
,
8587 phba
->sli4_hba
.max_cfg_param
.rpi_base
,
8588 phba
->sli4_hba
.max_cfg_param
.max_rpi
,
8589 phba
->sli4_hba
.max_cfg_param
.max_fcfi
,
8590 phba
->sli4_hba
.max_cfg_param
.max_eq
,
8591 phba
->sli4_hba
.max_cfg_param
.max_cq
,
8592 phba
->sli4_hba
.max_cfg_param
.max_wq
,
8593 phba
->sli4_hba
.max_cfg_param
.max_rq
,
8597 * Calculate queue resources based on how
8598 * many WQ/CQ/EQs are available.
8600 qmin
= phba
->sli4_hba
.max_cfg_param
.max_wq
;
8601 if (phba
->sli4_hba
.max_cfg_param
.max_cq
< qmin
)
8602 qmin
= phba
->sli4_hba
.max_cfg_param
.max_cq
;
8603 if (phba
->sli4_hba
.max_cfg_param
.max_eq
< qmin
)
8604 qmin
= phba
->sli4_hba
.max_cfg_param
.max_eq
;
8606 * Whats left after this can go toward NVME / FCP.
8607 * The minus 4 accounts for ELS, NVME LS, MBOX
8608 * plus one extra. When configured for
8609 * NVMET, FCP io channel WQs are not created.
8613 /* Check to see if there is enough for NVME */
8614 if ((phba
->cfg_irq_chann
> qmin
) ||
8615 (phba
->cfg_hdw_queue
> qmin
)) {
8616 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8617 "2005 Reducing Queues - "
8618 "FW resource limitation: "
8619 "WQ %d CQ %d EQ %d: min %d: "
8621 phba
->sli4_hba
.max_cfg_param
.max_wq
,
8622 phba
->sli4_hba
.max_cfg_param
.max_cq
,
8623 phba
->sli4_hba
.max_cfg_param
.max_eq
,
8624 qmin
, phba
->cfg_irq_chann
,
8625 phba
->cfg_hdw_queue
);
8627 if (phba
->cfg_irq_chann
> qmin
)
8628 phba
->cfg_irq_chann
= qmin
;
8629 if (phba
->cfg_hdw_queue
> qmin
)
8630 phba
->cfg_hdw_queue
= qmin
;
8637 /* Update link speed if forced link speed is supported */
8638 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8639 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
8641 bf_get(lpfc_mbx_rd_conf_link_speed
, rd_config
);
8642 if (forced_link_speed
) {
8643 phba
->hba_flag
|= HBA_FORCED_LINK_SPEED
;
8645 switch (forced_link_speed
) {
8647 phba
->cfg_link_speed
=
8648 LPFC_USER_LINK_SPEED_1G
;
8651 phba
->cfg_link_speed
=
8652 LPFC_USER_LINK_SPEED_2G
;
8655 phba
->cfg_link_speed
=
8656 LPFC_USER_LINK_SPEED_4G
;
8659 phba
->cfg_link_speed
=
8660 LPFC_USER_LINK_SPEED_8G
;
8662 case LINK_SPEED_10G
:
8663 phba
->cfg_link_speed
=
8664 LPFC_USER_LINK_SPEED_10G
;
8666 case LINK_SPEED_16G
:
8667 phba
->cfg_link_speed
=
8668 LPFC_USER_LINK_SPEED_16G
;
8670 case LINK_SPEED_32G
:
8671 phba
->cfg_link_speed
=
8672 LPFC_USER_LINK_SPEED_32G
;
8674 case LINK_SPEED_64G
:
8675 phba
->cfg_link_speed
=
8676 LPFC_USER_LINK_SPEED_64G
;
8679 phba
->cfg_link_speed
=
8680 LPFC_USER_LINK_SPEED_AUTO
;
8683 lpfc_printf_log(phba
, KERN_ERR
,
8685 "0047 Unrecognized link "
8688 phba
->cfg_link_speed
=
8689 LPFC_USER_LINK_SPEED_AUTO
;
8694 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
8695 length
= phba
->sli4_hba
.max_cfg_param
.max_xri
-
8696 lpfc_sli4_get_els_iocb_cnt(phba
);
8697 if (phba
->cfg_hba_queue_depth
> length
) {
8698 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8699 "3361 HBA queue depth changed from %d to %d\n",
8700 phba
->cfg_hba_queue_depth
, length
);
8701 phba
->cfg_hba_queue_depth
= length
;
8704 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) <
8705 LPFC_SLI_INTF_IF_TYPE_2
)
8708 /* get the pf# and vf# for SLI4 if_type 2 port */
8709 length
= (sizeof(struct lpfc_mbx_get_func_cfg
) -
8710 sizeof(struct lpfc_sli4_cfg_mhdr
));
8711 lpfc_sli4_config(phba
, pmb
, LPFC_MBOX_SUBSYSTEM_COMMON
,
8712 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG
,
8713 length
, LPFC_SLI4_MBX_EMBED
);
8715 rc2
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
8716 shdr
= (union lpfc_sli4_cfg_shdr
*)
8717 &pmb
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
8718 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
8719 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
8720 if (rc2
|| shdr_status
|| shdr_add_status
) {
8721 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8722 "3026 Mailbox failed , mbxCmd x%x "
8723 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8724 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
8725 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
8729 /* search for fc_fcoe resrouce descriptor */
8730 get_func_cfg
= &pmb
->u
.mqe
.un
.get_func_cfg
;
8732 pdesc_0
= (char *)&get_func_cfg
->func_cfg
.desc
[0];
8733 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)pdesc_0
;
8734 length
= bf_get(lpfc_rsrc_desc_fcfcoe_length
, desc
);
8735 if (length
== LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD
)
8736 length
= LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH
;
8737 else if (length
!= LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH
)
8740 for (i
= 0; i
< LPFC_RSRC_DESC_MAX_NUM
; i
++) {
8741 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)(pdesc_0
+ length
* i
);
8742 if (LPFC_RSRC_DESC_TYPE_FCFCOE
==
8743 bf_get(lpfc_rsrc_desc_fcfcoe_type
, desc
)) {
8744 phba
->sli4_hba
.iov
.pf_number
=
8745 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum
, desc
);
8746 phba
->sli4_hba
.iov
.vf_number
=
8747 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum
, desc
);
8752 if (i
< LPFC_RSRC_DESC_MAX_NUM
)
8753 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8754 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8755 "vf_number:%d\n", phba
->sli4_hba
.iov
.pf_number
,
8756 phba
->sli4_hba
.iov
.vf_number
);
8758 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8759 "3028 GET_FUNCTION_CONFIG: failed to find "
8760 "Resource Descriptor:x%x\n",
8761 LPFC_RSRC_DESC_TYPE_FCFCOE
);
8764 mempool_free(pmb
, phba
->mbox_mem_pool
);
8769 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8770 * @phba: pointer to lpfc hba data structure.
8772 * This routine is invoked to setup the port-side endian order when
8773 * the port if_type is 0. This routine has no function for other
8778 * -ENOMEM - No available memory
8779 * -EIO - The mailbox failed to complete successfully.
8782 lpfc_setup_endian_order(struct lpfc_hba
*phba
)
8784 LPFC_MBOXQ_t
*mboxq
;
8785 uint32_t if_type
, rc
= 0;
8786 uint32_t endian_mb_data
[2] = {HOST_ENDIAN_LOW_WORD0
,
8787 HOST_ENDIAN_HIGH_WORD1
};
8789 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8791 case LPFC_SLI_INTF_IF_TYPE_0
:
8792 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
8795 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8796 "0492 Unable to allocate memory for "
8797 "issuing SLI_CONFIG_SPECIAL mailbox "
8803 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8804 * two words to contain special data values and no other data.
8806 memset(mboxq
, 0, sizeof(LPFC_MBOXQ_t
));
8807 memcpy(&mboxq
->u
.mqe
, &endian_mb_data
, sizeof(endian_mb_data
));
8808 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
8809 if (rc
!= MBX_SUCCESS
) {
8810 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8811 "0493 SLI_CONFIG_SPECIAL mailbox "
8812 "failed with status x%x\n",
8816 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8818 case LPFC_SLI_INTF_IF_TYPE_6
:
8819 case LPFC_SLI_INTF_IF_TYPE_2
:
8820 case LPFC_SLI_INTF_IF_TYPE_1
:
8828 * lpfc_sli4_queue_verify - Verify and update EQ counts
8829 * @phba: pointer to lpfc hba data structure.
8831 * This routine is invoked to check the user settable queue counts for EQs.
8832 * After this routine is called the counts will be set to valid values that
8833 * adhere to the constraints of the system's interrupt vectors and the port's
8838 * -ENOMEM - No available memory
8841 lpfc_sli4_queue_verify(struct lpfc_hba
*phba
)
8844 * Sanity check for configured queue parameters against the run-time
8848 if (phba
->nvmet_support
) {
8849 if (phba
->cfg_hdw_queue
< phba
->cfg_nvmet_mrq
)
8850 phba
->cfg_nvmet_mrq
= phba
->cfg_hdw_queue
;
8851 if (phba
->cfg_nvmet_mrq
> LPFC_NVMET_MRQ_MAX
)
8852 phba
->cfg_nvmet_mrq
= LPFC_NVMET_MRQ_MAX
;
8855 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8856 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8857 phba
->cfg_hdw_queue
, phba
->cfg_irq_chann
,
8858 phba
->cfg_nvmet_mrq
);
8860 /* Get EQ depth from module parameter, fake the default for now */
8861 phba
->sli4_hba
.eq_esize
= LPFC_EQE_SIZE_4B
;
8862 phba
->sli4_hba
.eq_ecount
= LPFC_EQE_DEF_COUNT
;
8864 /* Get CQ depth from module parameter, fake the default for now */
8865 phba
->sli4_hba
.cq_esize
= LPFC_CQE_SIZE
;
8866 phba
->sli4_hba
.cq_ecount
= LPFC_CQE_DEF_COUNT
;
8871 lpfc_alloc_io_wq_cq(struct lpfc_hba
*phba
, int idx
)
8873 struct lpfc_queue
*qdesc
;
8877 cpu
= lpfc_find_cpu_handle(phba
, idx
, LPFC_FIND_BY_HDWQ
);
8878 /* Create Fast Path IO CQs */
8879 if (phba
->enab_exp_wqcq_pages
)
8880 /* Increase the CQ size when WQEs contain an embedded cdb */
8881 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_EXPANDED_PAGE_SIZE
,
8882 phba
->sli4_hba
.cq_esize
,
8883 LPFC_CQE_EXP_COUNT
, cpu
);
8886 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
8887 phba
->sli4_hba
.cq_esize
,
8888 phba
->sli4_hba
.cq_ecount
, cpu
);
8890 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8891 "0499 Failed allocate fast-path IO CQ (%d)\n",
8895 qdesc
->qe_valid
= 1;
8898 phba
->sli4_hba
.hdwq
[idx
].io_cq
= qdesc
;
8900 /* Create Fast Path IO WQs */
8901 if (phba
->enab_exp_wqcq_pages
) {
8902 /* Increase the WQ size when WQEs contain an embedded cdb */
8903 wqesize
= (phba
->fcp_embed_io
) ?
8904 LPFC_WQE128_SIZE
: phba
->sli4_hba
.wq_esize
;
8905 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_EXPANDED_PAGE_SIZE
,
8907 LPFC_WQE_EXP_COUNT
, cpu
);
8909 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
8910 phba
->sli4_hba
.wq_esize
,
8911 phba
->sli4_hba
.wq_ecount
, cpu
);
8914 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8915 "0503 Failed allocate fast-path IO WQ (%d)\n",
8921 phba
->sli4_hba
.hdwq
[idx
].io_wq
= qdesc
;
8922 list_add_tail(&qdesc
->wq_list
, &phba
->sli4_hba
.lpfc_wq_list
);
8927 * lpfc_sli4_queue_create - Create all the SLI4 queues
8928 * @phba: pointer to lpfc hba data structure.
8930 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8931 * operation. For each SLI4 queue type, the parameters such as queue entry
8932 * count (queue depth) shall be taken from the module parameter. For now,
8933 * we just use some constant number as place holder.
8937 * -ENOMEM - No availble memory
8938 * -EIO - The mailbox failed to complete successfully.
8941 lpfc_sli4_queue_create(struct lpfc_hba
*phba
)
8943 struct lpfc_queue
*qdesc
;
8944 int idx
, cpu
, eqcpu
;
8945 struct lpfc_sli4_hdw_queue
*qp
;
8946 struct lpfc_vector_map_info
*cpup
;
8947 struct lpfc_vector_map_info
*eqcpup
;
8948 struct lpfc_eq_intr_info
*eqi
;
8951 * Create HBA Record arrays.
8952 * Both NVME and FCP will share that same vectors / EQs
8954 phba
->sli4_hba
.mq_esize
= LPFC_MQE_SIZE
;
8955 phba
->sli4_hba
.mq_ecount
= LPFC_MQE_DEF_COUNT
;
8956 phba
->sli4_hba
.wq_esize
= LPFC_WQE_SIZE
;
8957 phba
->sli4_hba
.wq_ecount
= LPFC_WQE_DEF_COUNT
;
8958 phba
->sli4_hba
.rq_esize
= LPFC_RQE_SIZE
;
8959 phba
->sli4_hba
.rq_ecount
= LPFC_RQE_DEF_COUNT
;
8960 phba
->sli4_hba
.eq_esize
= LPFC_EQE_SIZE_4B
;
8961 phba
->sli4_hba
.eq_ecount
= LPFC_EQE_DEF_COUNT
;
8962 phba
->sli4_hba
.cq_esize
= LPFC_CQE_SIZE
;
8963 phba
->sli4_hba
.cq_ecount
= LPFC_CQE_DEF_COUNT
;
8965 if (!phba
->sli4_hba
.hdwq
) {
8966 phba
->sli4_hba
.hdwq
= kcalloc(
8967 phba
->cfg_hdw_queue
, sizeof(struct lpfc_sli4_hdw_queue
),
8969 if (!phba
->sli4_hba
.hdwq
) {
8970 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8971 "6427 Failed allocate memory for "
8972 "fast-path Hardware Queue array\n");
8975 /* Prepare hardware queues to take IO buffers */
8976 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
8977 qp
= &phba
->sli4_hba
.hdwq
[idx
];
8978 spin_lock_init(&qp
->io_buf_list_get_lock
);
8979 spin_lock_init(&qp
->io_buf_list_put_lock
);
8980 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_get
);
8981 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_put
);
8982 qp
->get_io_bufs
= 0;
8983 qp
->put_io_bufs
= 0;
8984 qp
->total_io_bufs
= 0;
8985 spin_lock_init(&qp
->abts_io_buf_list_lock
);
8986 INIT_LIST_HEAD(&qp
->lpfc_abts_io_buf_list
);
8987 qp
->abts_scsi_io_bufs
= 0;
8988 qp
->abts_nvme_io_bufs
= 0;
8989 INIT_LIST_HEAD(&qp
->sgl_list
);
8990 INIT_LIST_HEAD(&qp
->cmd_rsp_buf_list
);
8991 spin_lock_init(&qp
->hdwq_lock
);
8995 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
8996 if (phba
->nvmet_support
) {
8997 phba
->sli4_hba
.nvmet_cqset
= kcalloc(
8998 phba
->cfg_nvmet_mrq
,
8999 sizeof(struct lpfc_queue
*),
9001 if (!phba
->sli4_hba
.nvmet_cqset
) {
9002 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9003 "3121 Fail allocate memory for "
9004 "fast-path CQ set array\n");
9007 phba
->sli4_hba
.nvmet_mrq_hdr
= kcalloc(
9008 phba
->cfg_nvmet_mrq
,
9009 sizeof(struct lpfc_queue
*),
9011 if (!phba
->sli4_hba
.nvmet_mrq_hdr
) {
9012 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9013 "3122 Fail allocate memory for "
9014 "fast-path RQ set hdr array\n");
9017 phba
->sli4_hba
.nvmet_mrq_data
= kcalloc(
9018 phba
->cfg_nvmet_mrq
,
9019 sizeof(struct lpfc_queue
*),
9021 if (!phba
->sli4_hba
.nvmet_mrq_data
) {
9022 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9023 "3124 Fail allocate memory for "
9024 "fast-path RQ set data array\n");
9030 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_wq_list
);
9032 /* Create HBA Event Queues (EQs) */
9033 for_each_present_cpu(cpu
) {
9034 /* We only want to create 1 EQ per vector, even though
9035 * multiple CPUs might be using that vector. so only
9036 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
9038 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
9039 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
9042 /* Get a ptr to the Hardware Queue associated with this CPU */
9043 qp
= &phba
->sli4_hba
.hdwq
[cpup
->hdwq
];
9045 /* Allocate an EQ */
9046 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9047 phba
->sli4_hba
.eq_esize
,
9048 phba
->sli4_hba
.eq_ecount
, cpu
);
9050 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9051 "0497 Failed allocate EQ (%d)\n",
9055 qdesc
->qe_valid
= 1;
9056 qdesc
->hdwq
= cpup
->hdwq
;
9057 qdesc
->chann
= cpu
; /* First CPU this EQ is affinitized to */
9058 qdesc
->last_cpu
= qdesc
->chann
;
9060 /* Save the allocated EQ in the Hardware Queue */
9063 eqi
= per_cpu_ptr(phba
->sli4_hba
.eq_info
, qdesc
->last_cpu
);
9064 list_add(&qdesc
->cpu_list
, &eqi
->list
);
9067 /* Now we need to populate the other Hardware Queues, that share
9068 * an IRQ vector, with the associated EQ ptr.
9070 for_each_present_cpu(cpu
) {
9071 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
9073 /* Check for EQ already allocated in previous loop */
9074 if (cpup
->flag
& LPFC_CPU_FIRST_IRQ
)
9077 /* Check for multiple CPUs per hdwq */
9078 qp
= &phba
->sli4_hba
.hdwq
[cpup
->hdwq
];
9082 /* We need to share an EQ for this hdwq */
9083 eqcpu
= lpfc_find_cpu_handle(phba
, cpup
->eq
, LPFC_FIND_BY_EQ
);
9084 eqcpup
= &phba
->sli4_hba
.cpu_map
[eqcpu
];
9085 qp
->hba_eq
= phba
->sli4_hba
.hdwq
[eqcpup
->hdwq
].hba_eq
;
9088 /* Allocate IO Path SLI4 CQ/WQs */
9089 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
9090 if (lpfc_alloc_io_wq_cq(phba
, idx
))
9094 if (phba
->nvmet_support
) {
9095 for (idx
= 0; idx
< phba
->cfg_nvmet_mrq
; idx
++) {
9096 cpu
= lpfc_find_cpu_handle(phba
, idx
,
9098 qdesc
= lpfc_sli4_queue_alloc(phba
,
9099 LPFC_DEFAULT_PAGE_SIZE
,
9100 phba
->sli4_hba
.cq_esize
,
9101 phba
->sli4_hba
.cq_ecount
,
9104 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9105 "3142 Failed allocate NVME "
9106 "CQ Set (%d)\n", idx
);
9109 qdesc
->qe_valid
= 1;
9112 phba
->sli4_hba
.nvmet_cqset
[idx
] = qdesc
;
9117 * Create Slow Path Completion Queues (CQs)
9120 cpu
= lpfc_find_cpu_handle(phba
, 0, LPFC_FIND_BY_EQ
);
9121 /* Create slow-path Mailbox Command Complete Queue */
9122 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9123 phba
->sli4_hba
.cq_esize
,
9124 phba
->sli4_hba
.cq_ecount
, cpu
);
9126 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9127 "0500 Failed allocate slow-path mailbox CQ\n");
9130 qdesc
->qe_valid
= 1;
9131 phba
->sli4_hba
.mbx_cq
= qdesc
;
9133 /* Create slow-path ELS Complete Queue */
9134 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9135 phba
->sli4_hba
.cq_esize
,
9136 phba
->sli4_hba
.cq_ecount
, cpu
);
9138 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9139 "0501 Failed allocate slow-path ELS CQ\n");
9142 qdesc
->qe_valid
= 1;
9144 phba
->sli4_hba
.els_cq
= qdesc
;
9148 * Create Slow Path Work Queues (WQs)
9151 /* Create Mailbox Command Queue */
9153 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9154 phba
->sli4_hba
.mq_esize
,
9155 phba
->sli4_hba
.mq_ecount
, cpu
);
9157 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9158 "0505 Failed allocate slow-path MQ\n");
9162 phba
->sli4_hba
.mbx_wq
= qdesc
;
9165 * Create ELS Work Queues
9168 /* Create slow-path ELS Work Queue */
9169 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9170 phba
->sli4_hba
.wq_esize
,
9171 phba
->sli4_hba
.wq_ecount
, cpu
);
9173 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9174 "0504 Failed allocate slow-path ELS WQ\n");
9178 phba
->sli4_hba
.els_wq
= qdesc
;
9179 list_add_tail(&qdesc
->wq_list
, &phba
->sli4_hba
.lpfc_wq_list
);
9181 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
9182 /* Create NVME LS Complete Queue */
9183 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9184 phba
->sli4_hba
.cq_esize
,
9185 phba
->sli4_hba
.cq_ecount
, cpu
);
9187 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9188 "6079 Failed allocate NVME LS CQ\n");
9192 qdesc
->qe_valid
= 1;
9193 phba
->sli4_hba
.nvmels_cq
= qdesc
;
9195 /* Create NVME LS Work Queue */
9196 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9197 phba
->sli4_hba
.wq_esize
,
9198 phba
->sli4_hba
.wq_ecount
, cpu
);
9200 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9201 "6080 Failed allocate NVME LS WQ\n");
9205 phba
->sli4_hba
.nvmels_wq
= qdesc
;
9206 list_add_tail(&qdesc
->wq_list
, &phba
->sli4_hba
.lpfc_wq_list
);
9210 * Create Receive Queue (RQ)
9213 /* Create Receive Queue for header */
9214 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9215 phba
->sli4_hba
.rq_esize
,
9216 phba
->sli4_hba
.rq_ecount
, cpu
);
9218 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9219 "0506 Failed allocate receive HRQ\n");
9222 phba
->sli4_hba
.hdr_rq
= qdesc
;
9224 /* Create Receive Queue for data */
9225 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
9226 phba
->sli4_hba
.rq_esize
,
9227 phba
->sli4_hba
.rq_ecount
, cpu
);
9229 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9230 "0507 Failed allocate receive DRQ\n");
9233 phba
->sli4_hba
.dat_rq
= qdesc
;
9235 if ((phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) &&
9236 phba
->nvmet_support
) {
9237 for (idx
= 0; idx
< phba
->cfg_nvmet_mrq
; idx
++) {
9238 cpu
= lpfc_find_cpu_handle(phba
, idx
,
9240 /* Create NVMET Receive Queue for header */
9241 qdesc
= lpfc_sli4_queue_alloc(phba
,
9242 LPFC_DEFAULT_PAGE_SIZE
,
9243 phba
->sli4_hba
.rq_esize
,
9244 LPFC_NVMET_RQE_DEF_COUNT
,
9247 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9248 "3146 Failed allocate "
9253 phba
->sli4_hba
.nvmet_mrq_hdr
[idx
] = qdesc
;
9255 /* Only needed for header of RQ pair */
9256 qdesc
->rqbp
= kzalloc_node(sizeof(*qdesc
->rqbp
),
9259 if (qdesc
->rqbp
== NULL
) {
9260 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9261 "6131 Failed allocate "
9266 /* Put list in known state in case driver load fails. */
9267 INIT_LIST_HEAD(&qdesc
->rqbp
->rqb_buffer_list
);
9269 /* Create NVMET Receive Queue for data */
9270 qdesc
= lpfc_sli4_queue_alloc(phba
,
9271 LPFC_DEFAULT_PAGE_SIZE
,
9272 phba
->sli4_hba
.rq_esize
,
9273 LPFC_NVMET_RQE_DEF_COUNT
,
9276 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9277 "3156 Failed allocate "
9282 phba
->sli4_hba
.nvmet_mrq_data
[idx
] = qdesc
;
9286 /* Clear NVME stats */
9287 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
9288 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
9289 memset(&phba
->sli4_hba
.hdwq
[idx
].nvme_cstat
, 0,
9290 sizeof(phba
->sli4_hba
.hdwq
[idx
].nvme_cstat
));
9294 /* Clear SCSI stats */
9295 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
) {
9296 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
9297 memset(&phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
, 0,
9298 sizeof(phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
));
9305 lpfc_sli4_queue_destroy(phba
);
9310 __lpfc_sli4_release_queue(struct lpfc_queue
**qp
)
9313 lpfc_sli4_queue_free(*qp
);
9319 lpfc_sli4_release_queues(struct lpfc_queue
***qs
, int max
)
9326 for (idx
= 0; idx
< max
; idx
++)
9327 __lpfc_sli4_release_queue(&(*qs
)[idx
]);
9334 lpfc_sli4_release_hdwq(struct lpfc_hba
*phba
)
9336 struct lpfc_sli4_hdw_queue
*hdwq
;
9337 struct lpfc_queue
*eq
;
9340 hdwq
= phba
->sli4_hba
.hdwq
;
9342 /* Loop thru all Hardware Queues */
9343 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
9344 /* Free the CQ/WQ corresponding to the Hardware Queue */
9345 lpfc_sli4_queue_free(hdwq
[idx
].io_cq
);
9346 lpfc_sli4_queue_free(hdwq
[idx
].io_wq
);
9347 hdwq
[idx
].hba_eq
= NULL
;
9348 hdwq
[idx
].io_cq
= NULL
;
9349 hdwq
[idx
].io_wq
= NULL
;
9350 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
9351 lpfc_free_sgl_per_hdwq(phba
, &hdwq
[idx
]);
9352 lpfc_free_cmd_rsp_buf_per_hdwq(phba
, &hdwq
[idx
]);
9354 /* Loop thru all IRQ vectors */
9355 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
9356 /* Free the EQ corresponding to the IRQ vector */
9357 eq
= phba
->sli4_hba
.hba_eq_hdl
[idx
].eq
;
9358 lpfc_sli4_queue_free(eq
);
9359 phba
->sli4_hba
.hba_eq_hdl
[idx
].eq
= NULL
;
9364 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9365 * @phba: pointer to lpfc hba data structure.
9367 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9372 * -ENOMEM - No available memory
9373 * -EIO - The mailbox failed to complete successfully.
9376 lpfc_sli4_queue_destroy(struct lpfc_hba
*phba
)
9379 * Set FREE_INIT before beginning to free the queues.
9380 * Wait until the users of queues to acknowledge to
9381 * release queues by clearing FREE_WAIT.
9383 spin_lock_irq(&phba
->hbalock
);
9384 phba
->sli
.sli_flag
|= LPFC_QUEUE_FREE_INIT
;
9385 while (phba
->sli
.sli_flag
& LPFC_QUEUE_FREE_WAIT
) {
9386 spin_unlock_irq(&phba
->hbalock
);
9388 spin_lock_irq(&phba
->hbalock
);
9390 spin_unlock_irq(&phba
->hbalock
);
9392 lpfc_sli4_cleanup_poll_list(phba
);
9394 /* Release HBA eqs */
9395 if (phba
->sli4_hba
.hdwq
)
9396 lpfc_sli4_release_hdwq(phba
);
9398 if (phba
->nvmet_support
) {
9399 lpfc_sli4_release_queues(&phba
->sli4_hba
.nvmet_cqset
,
9400 phba
->cfg_nvmet_mrq
);
9402 lpfc_sli4_release_queues(&phba
->sli4_hba
.nvmet_mrq_hdr
,
9403 phba
->cfg_nvmet_mrq
);
9404 lpfc_sli4_release_queues(&phba
->sli4_hba
.nvmet_mrq_data
,
9405 phba
->cfg_nvmet_mrq
);
9408 /* Release mailbox command work queue */
9409 __lpfc_sli4_release_queue(&phba
->sli4_hba
.mbx_wq
);
9411 /* Release ELS work queue */
9412 __lpfc_sli4_release_queue(&phba
->sli4_hba
.els_wq
);
9414 /* Release ELS work queue */
9415 __lpfc_sli4_release_queue(&phba
->sli4_hba
.nvmels_wq
);
9417 /* Release unsolicited receive queue */
9418 __lpfc_sli4_release_queue(&phba
->sli4_hba
.hdr_rq
);
9419 __lpfc_sli4_release_queue(&phba
->sli4_hba
.dat_rq
);
9421 /* Release ELS complete queue */
9422 __lpfc_sli4_release_queue(&phba
->sli4_hba
.els_cq
);
9424 /* Release NVME LS complete queue */
9425 __lpfc_sli4_release_queue(&phba
->sli4_hba
.nvmels_cq
);
9427 /* Release mailbox command complete queue */
9428 __lpfc_sli4_release_queue(&phba
->sli4_hba
.mbx_cq
);
9430 /* Everything on this list has been freed */
9431 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_wq_list
);
9433 /* Done with freeing the queues */
9434 spin_lock_irq(&phba
->hbalock
);
9435 phba
->sli
.sli_flag
&= ~LPFC_QUEUE_FREE_INIT
;
9436 spin_unlock_irq(&phba
->hbalock
);
9440 lpfc_free_rq_buffer(struct lpfc_hba
*phba
, struct lpfc_queue
*rq
)
9442 struct lpfc_rqb
*rqbp
;
9443 struct lpfc_dmabuf
*h_buf
;
9444 struct rqb_dmabuf
*rqb_buffer
;
9447 while (!list_empty(&rqbp
->rqb_buffer_list
)) {
9448 list_remove_head(&rqbp
->rqb_buffer_list
, h_buf
,
9449 struct lpfc_dmabuf
, list
);
9451 rqb_buffer
= container_of(h_buf
, struct rqb_dmabuf
, hbuf
);
9452 (rqbp
->rqb_free_buffer
)(phba
, rqb_buffer
);
9453 rqbp
->buffer_count
--;
9459 lpfc_create_wq_cq(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
9460 struct lpfc_queue
*cq
, struct lpfc_queue
*wq
, uint16_t *cq_map
,
9461 int qidx
, uint32_t qtype
)
9463 struct lpfc_sli_ring
*pring
;
9466 if (!eq
|| !cq
|| !wq
) {
9467 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9468 "6085 Fast-path %s (%d) not allocated\n",
9469 ((eq
) ? ((cq
) ? "WQ" : "CQ") : "EQ"), qidx
);
9473 /* create the Cq first */
9474 rc
= lpfc_cq_create(phba
, cq
, eq
,
9475 (qtype
== LPFC_MBOX
) ? LPFC_MCQ
: LPFC_WCQ
, qtype
);
9477 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9478 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9479 qidx
, (uint32_t)rc
);
9483 if (qtype
!= LPFC_MBOX
) {
9484 /* Setup cq_map for fast lookup */
9486 *cq_map
= cq
->queue_id
;
9488 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9489 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9490 qidx
, cq
->queue_id
, qidx
, eq
->queue_id
);
9493 rc
= lpfc_wq_create(phba
, wq
, cq
, qtype
);
9495 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9496 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9497 qidx
, (uint32_t)rc
);
9498 /* no need to tear down cq - caller will do so */
9502 /* Bind this CQ/WQ to the NVME ring */
9504 pring
->sli
.sli4
.wqp
= (void *)wq
;
9507 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9508 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9509 qidx
, wq
->queue_id
, wq
->assoc_qid
, qidx
, cq
->queue_id
);
9511 rc
= lpfc_mq_create(phba
, wq
, cq
, LPFC_MBOX
);
9513 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9514 "0539 Failed setup of slow-path MQ: "
9516 /* no need to tear down cq - caller will do so */
9520 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9521 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9522 phba
->sli4_hba
.mbx_wq
->queue_id
,
9523 phba
->sli4_hba
.mbx_cq
->queue_id
);
9530 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9531 * @phba: pointer to lpfc hba data structure.
9533 * This routine will populate the cq_lookup table by all
9534 * available CQ queue_id's.
9537 lpfc_setup_cq_lookup(struct lpfc_hba
*phba
)
9539 struct lpfc_queue
*eq
, *childq
;
9542 memset(phba
->sli4_hba
.cq_lookup
, 0,
9543 (sizeof(struct lpfc_queue
*) * (phba
->sli4_hba
.cq_max
+ 1)));
9544 /* Loop thru all IRQ vectors */
9545 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
9546 /* Get the EQ corresponding to the IRQ vector */
9547 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
9550 /* Loop through all CQs associated with that EQ */
9551 list_for_each_entry(childq
, &eq
->child_list
, list
) {
9552 if (childq
->queue_id
> phba
->sli4_hba
.cq_max
)
9554 if (childq
->subtype
== LPFC_IO
)
9555 phba
->sli4_hba
.cq_lookup
[childq
->queue_id
] =
9562 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9563 * @phba: pointer to lpfc hba data structure.
9565 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9570 * -ENOMEM - No available memory
9571 * -EIO - The mailbox failed to complete successfully.
9574 lpfc_sli4_queue_setup(struct lpfc_hba
*phba
)
9576 uint32_t shdr_status
, shdr_add_status
;
9577 union lpfc_sli4_cfg_shdr
*shdr
;
9578 struct lpfc_vector_map_info
*cpup
;
9579 struct lpfc_sli4_hdw_queue
*qp
;
9580 LPFC_MBOXQ_t
*mboxq
;
9582 uint32_t length
, usdelay
;
9585 /* Check for dual-ULP support */
9586 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
9588 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9589 "3249 Unable to allocate memory for "
9590 "QUERY_FW_CFG mailbox command\n");
9593 length
= (sizeof(struct lpfc_mbx_query_fw_config
) -
9594 sizeof(struct lpfc_sli4_cfg_mhdr
));
9595 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
9596 LPFC_MBOX_OPCODE_QUERY_FW_CFG
,
9597 length
, LPFC_SLI4_MBX_EMBED
);
9599 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
9601 shdr
= (union lpfc_sli4_cfg_shdr
*)
9602 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
9603 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
9604 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
9605 if (shdr_status
|| shdr_add_status
|| rc
) {
9606 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9607 "3250 QUERY_FW_CFG mailbox failed with status "
9608 "x%x add_status x%x, mbx status x%x\n",
9609 shdr_status
, shdr_add_status
, rc
);
9610 if (rc
!= MBX_TIMEOUT
)
9611 mempool_free(mboxq
, phba
->mbox_mem_pool
);
9616 phba
->sli4_hba
.fw_func_mode
=
9617 mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.function_mode
;
9618 phba
->sli4_hba
.ulp0_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp0_mode
;
9619 phba
->sli4_hba
.ulp1_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp1_mode
;
9620 phba
->sli4_hba
.physical_port
=
9621 mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.physical_port
;
9622 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9623 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9624 "ulp1_mode:x%x\n", phba
->sli4_hba
.fw_func_mode
,
9625 phba
->sli4_hba
.ulp0_mode
, phba
->sli4_hba
.ulp1_mode
);
9627 if (rc
!= MBX_TIMEOUT
)
9628 mempool_free(mboxq
, phba
->mbox_mem_pool
);
9631 * Set up HBA Event Queues (EQs)
9633 qp
= phba
->sli4_hba
.hdwq
;
9635 /* Set up HBA event queue */
9637 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9638 "3147 Fast-path EQs not allocated\n");
9643 /* Loop thru all IRQ vectors */
9644 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
9645 /* Create HBA Event Queues (EQs) in order */
9646 for_each_present_cpu(cpu
) {
9647 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
9649 /* Look for the CPU thats using that vector with
9650 * LPFC_CPU_FIRST_IRQ set.
9652 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
9654 if (qidx
!= cpup
->eq
)
9657 /* Create an EQ for that vector */
9658 rc
= lpfc_eq_create(phba
, qp
[cpup
->hdwq
].hba_eq
,
9659 phba
->cfg_fcp_imax
);
9661 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9662 "0523 Failed setup of fast-path"
9663 " EQ (%d), rc = 0x%x\n",
9664 cpup
->eq
, (uint32_t)rc
);
9668 /* Save the EQ for that vector in the hba_eq_hdl */
9669 phba
->sli4_hba
.hba_eq_hdl
[cpup
->eq
].eq
=
9670 qp
[cpup
->hdwq
].hba_eq
;
9672 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9673 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9675 qp
[cpup
->hdwq
].hba_eq
->queue_id
);
9679 /* Loop thru all Hardware Queues */
9680 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
9681 cpu
= lpfc_find_cpu_handle(phba
, qidx
, LPFC_FIND_BY_HDWQ
);
9682 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
9684 /* Create the CQ/WQ corresponding to the Hardware Queue */
9685 rc
= lpfc_create_wq_cq(phba
,
9686 phba
->sli4_hba
.hdwq
[cpup
->hdwq
].hba_eq
,
9689 &phba
->sli4_hba
.hdwq
[qidx
].io_cq_map
,
9693 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9694 "0535 Failed to setup fastpath "
9695 "IO WQ/CQ (%d), rc = 0x%x\n",
9696 qidx
, (uint32_t)rc
);
9702 * Set up Slow Path Complete Queues (CQs)
9705 /* Set up slow-path MBOX CQ/MQ */
9707 if (!phba
->sli4_hba
.mbx_cq
|| !phba
->sli4_hba
.mbx_wq
) {
9708 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9709 "0528 %s not allocated\n",
9710 phba
->sli4_hba
.mbx_cq
?
9711 "Mailbox WQ" : "Mailbox CQ");
9716 rc
= lpfc_create_wq_cq(phba
, qp
[0].hba_eq
,
9717 phba
->sli4_hba
.mbx_cq
,
9718 phba
->sli4_hba
.mbx_wq
,
9719 NULL
, 0, LPFC_MBOX
);
9721 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9722 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9726 if (phba
->nvmet_support
) {
9727 if (!phba
->sli4_hba
.nvmet_cqset
) {
9728 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9729 "3165 Fast-path NVME CQ Set "
9730 "array not allocated\n");
9734 if (phba
->cfg_nvmet_mrq
> 1) {
9735 rc
= lpfc_cq_create_set(phba
,
9736 phba
->sli4_hba
.nvmet_cqset
,
9738 LPFC_WCQ
, LPFC_NVMET
);
9740 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9741 "3164 Failed setup of NVME CQ "
9747 /* Set up NVMET Receive Complete Queue */
9748 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.nvmet_cqset
[0],
9750 LPFC_WCQ
, LPFC_NVMET
);
9752 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9753 "6089 Failed setup NVMET CQ: "
9754 "rc = 0x%x\n", (uint32_t)rc
);
9757 phba
->sli4_hba
.nvmet_cqset
[0]->chann
= 0;
9759 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9760 "6090 NVMET CQ setup: cq-id=%d, "
9761 "parent eq-id=%d\n",
9762 phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
,
9763 qp
[0].hba_eq
->queue_id
);
9767 /* Set up slow-path ELS WQ/CQ */
9768 if (!phba
->sli4_hba
.els_cq
|| !phba
->sli4_hba
.els_wq
) {
9769 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9770 "0530 ELS %s not allocated\n",
9771 phba
->sli4_hba
.els_cq
? "WQ" : "CQ");
9775 rc
= lpfc_create_wq_cq(phba
, qp
[0].hba_eq
,
9776 phba
->sli4_hba
.els_cq
,
9777 phba
->sli4_hba
.els_wq
,
9780 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9781 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9785 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9786 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9787 phba
->sli4_hba
.els_wq
->queue_id
,
9788 phba
->sli4_hba
.els_cq
->queue_id
);
9790 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
9791 /* Set up NVME LS Complete Queue */
9792 if (!phba
->sli4_hba
.nvmels_cq
|| !phba
->sli4_hba
.nvmels_wq
) {
9793 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9794 "6091 LS %s not allocated\n",
9795 phba
->sli4_hba
.nvmels_cq
? "WQ" : "CQ");
9799 rc
= lpfc_create_wq_cq(phba
, qp
[0].hba_eq
,
9800 phba
->sli4_hba
.nvmels_cq
,
9801 phba
->sli4_hba
.nvmels_wq
,
9802 NULL
, 0, LPFC_NVME_LS
);
9804 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9805 "0526 Failed setup of NVVME LS WQ/CQ: "
9806 "rc = 0x%x\n", (uint32_t)rc
);
9810 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9811 "6096 ELS WQ setup: wq-id=%d, "
9812 "parent cq-id=%d\n",
9813 phba
->sli4_hba
.nvmels_wq
->queue_id
,
9814 phba
->sli4_hba
.nvmels_cq
->queue_id
);
9818 * Create NVMET Receive Queue (RQ)
9820 if (phba
->nvmet_support
) {
9821 if ((!phba
->sli4_hba
.nvmet_cqset
) ||
9822 (!phba
->sli4_hba
.nvmet_mrq_hdr
) ||
9823 (!phba
->sli4_hba
.nvmet_mrq_data
)) {
9824 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9825 "6130 MRQ CQ Queues not "
9830 if (phba
->cfg_nvmet_mrq
> 1) {
9831 rc
= lpfc_mrq_create(phba
,
9832 phba
->sli4_hba
.nvmet_mrq_hdr
,
9833 phba
->sli4_hba
.nvmet_mrq_data
,
9834 phba
->sli4_hba
.nvmet_cqset
,
9837 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9838 "6098 Failed setup of NVMET "
9845 rc
= lpfc_rq_create(phba
,
9846 phba
->sli4_hba
.nvmet_mrq_hdr
[0],
9847 phba
->sli4_hba
.nvmet_mrq_data
[0],
9848 phba
->sli4_hba
.nvmet_cqset
[0],
9851 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9852 "6057 Failed setup of NVMET "
9853 "Receive Queue: rc = 0x%x\n",
9859 phba
, KERN_INFO
, LOG_INIT
,
9860 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9861 "dat-rq-id=%d parent cq-id=%d\n",
9862 phba
->sli4_hba
.nvmet_mrq_hdr
[0]->queue_id
,
9863 phba
->sli4_hba
.nvmet_mrq_data
[0]->queue_id
,
9864 phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
);
9869 if (!phba
->sli4_hba
.hdr_rq
|| !phba
->sli4_hba
.dat_rq
) {
9870 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9871 "0540 Receive Queue not allocated\n");
9876 rc
= lpfc_rq_create(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
9877 phba
->sli4_hba
.els_cq
, LPFC_USOL
);
9879 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9880 "0541 Failed setup of Receive Queue: "
9881 "rc = 0x%x\n", (uint32_t)rc
);
9885 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9886 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9887 "parent cq-id=%d\n",
9888 phba
->sli4_hba
.hdr_rq
->queue_id
,
9889 phba
->sli4_hba
.dat_rq
->queue_id
,
9890 phba
->sli4_hba
.els_cq
->queue_id
);
9892 if (phba
->cfg_fcp_imax
)
9893 usdelay
= LPFC_SEC_TO_USEC
/ phba
->cfg_fcp_imax
;
9897 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
;
9898 qidx
+= LPFC_MAX_EQ_DELAY_EQID_CNT
)
9899 lpfc_modify_hba_eq_delay(phba
, qidx
, LPFC_MAX_EQ_DELAY_EQID_CNT
,
9902 if (phba
->sli4_hba
.cq_max
) {
9903 kfree(phba
->sli4_hba
.cq_lookup
);
9904 phba
->sli4_hba
.cq_lookup
= kcalloc((phba
->sli4_hba
.cq_max
+ 1),
9905 sizeof(struct lpfc_queue
*), GFP_KERNEL
);
9906 if (!phba
->sli4_hba
.cq_lookup
) {
9907 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9908 "0549 Failed setup of CQ Lookup table: "
9909 "size 0x%x\n", phba
->sli4_hba
.cq_max
);
9913 lpfc_setup_cq_lookup(phba
);
9918 lpfc_sli4_queue_unset(phba
);
9924 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9925 * @phba: pointer to lpfc hba data structure.
9927 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9932 * -ENOMEM - No available memory
9933 * -EIO - The mailbox failed to complete successfully.
9936 lpfc_sli4_queue_unset(struct lpfc_hba
*phba
)
9938 struct lpfc_sli4_hdw_queue
*qp
;
9939 struct lpfc_queue
*eq
;
9942 /* Unset mailbox command work queue */
9943 if (phba
->sli4_hba
.mbx_wq
)
9944 lpfc_mq_destroy(phba
, phba
->sli4_hba
.mbx_wq
);
9946 /* Unset NVME LS work queue */
9947 if (phba
->sli4_hba
.nvmels_wq
)
9948 lpfc_wq_destroy(phba
, phba
->sli4_hba
.nvmels_wq
);
9950 /* Unset ELS work queue */
9951 if (phba
->sli4_hba
.els_wq
)
9952 lpfc_wq_destroy(phba
, phba
->sli4_hba
.els_wq
);
9954 /* Unset unsolicited receive queue */
9955 if (phba
->sli4_hba
.hdr_rq
)
9956 lpfc_rq_destroy(phba
, phba
->sli4_hba
.hdr_rq
,
9957 phba
->sli4_hba
.dat_rq
);
9959 /* Unset mailbox command complete queue */
9960 if (phba
->sli4_hba
.mbx_cq
)
9961 lpfc_cq_destroy(phba
, phba
->sli4_hba
.mbx_cq
);
9963 /* Unset ELS complete queue */
9964 if (phba
->sli4_hba
.els_cq
)
9965 lpfc_cq_destroy(phba
, phba
->sli4_hba
.els_cq
);
9967 /* Unset NVME LS complete queue */
9968 if (phba
->sli4_hba
.nvmels_cq
)
9969 lpfc_cq_destroy(phba
, phba
->sli4_hba
.nvmels_cq
);
9971 if (phba
->nvmet_support
) {
9972 /* Unset NVMET MRQ queue */
9973 if (phba
->sli4_hba
.nvmet_mrq_hdr
) {
9974 for (qidx
= 0; qidx
< phba
->cfg_nvmet_mrq
; qidx
++)
9977 phba
->sli4_hba
.nvmet_mrq_hdr
[qidx
],
9978 phba
->sli4_hba
.nvmet_mrq_data
[qidx
]);
9981 /* Unset NVMET CQ Set complete queue */
9982 if (phba
->sli4_hba
.nvmet_cqset
) {
9983 for (qidx
= 0; qidx
< phba
->cfg_nvmet_mrq
; qidx
++)
9985 phba
, phba
->sli4_hba
.nvmet_cqset
[qidx
]);
9989 /* Unset fast-path SLI4 queues */
9990 if (phba
->sli4_hba
.hdwq
) {
9991 /* Loop thru all Hardware Queues */
9992 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
9993 /* Destroy the CQ/WQ corresponding to Hardware Queue */
9994 qp
= &phba
->sli4_hba
.hdwq
[qidx
];
9995 lpfc_wq_destroy(phba
, qp
->io_wq
);
9996 lpfc_cq_destroy(phba
, qp
->io_cq
);
9998 /* Loop thru all IRQ vectors */
9999 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
10000 /* Destroy the EQ corresponding to the IRQ vector */
10001 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
10002 lpfc_eq_destroy(phba
, eq
);
10006 kfree(phba
->sli4_hba
.cq_lookup
);
10007 phba
->sli4_hba
.cq_lookup
= NULL
;
10008 phba
->sli4_hba
.cq_max
= 0;
10012 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
10013 * @phba: pointer to lpfc hba data structure.
10015 * This routine is invoked to allocate and set up a pool of completion queue
10016 * events. The body of the completion queue event is a completion queue entry
10017 * CQE. For now, this pool is used for the interrupt service routine to queue
10018 * the following HBA completion queue events for the worker thread to process:
10019 * - Mailbox asynchronous events
10020 * - Receive queue completion unsolicited events
10021 * Later, this can be used for all the slow-path events.
10025 * -ENOMEM - No available memory
10028 lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*phba
)
10030 struct lpfc_cq_event
*cq_event
;
10033 for (i
= 0; i
< (4 * phba
->sli4_hba
.cq_ecount
); i
++) {
10034 cq_event
= kmalloc(sizeof(struct lpfc_cq_event
), GFP_KERNEL
);
10036 goto out_pool_create_fail
;
10037 list_add_tail(&cq_event
->list
,
10038 &phba
->sli4_hba
.sp_cqe_event_pool
);
10042 out_pool_create_fail
:
10043 lpfc_sli4_cq_event_pool_destroy(phba
);
10048 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
10049 * @phba: pointer to lpfc hba data structure.
10051 * This routine is invoked to free the pool of completion queue events at
10052 * driver unload time. Note that, it is the responsibility of the driver
10053 * cleanup routine to free all the outstanding completion-queue events
10054 * allocated from this pool back into the pool before invoking this routine
10055 * to destroy the pool.
10058 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*phba
)
10060 struct lpfc_cq_event
*cq_event
, *next_cq_event
;
10062 list_for_each_entry_safe(cq_event
, next_cq_event
,
10063 &phba
->sli4_hba
.sp_cqe_event_pool
, list
) {
10064 list_del(&cq_event
->list
);
10070 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10071 * @phba: pointer to lpfc hba data structure.
10073 * This routine is the lock free version of the API invoked to allocate a
10074 * completion-queue event from the free pool.
10076 * Return: Pointer to the newly allocated completion-queue event if successful
10079 struct lpfc_cq_event
*
10080 __lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
10082 struct lpfc_cq_event
*cq_event
= NULL
;
10084 list_remove_head(&phba
->sli4_hba
.sp_cqe_event_pool
, cq_event
,
10085 struct lpfc_cq_event
, list
);
10090 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10091 * @phba: pointer to lpfc hba data structure.
10093 * This routine is the lock version of the API invoked to allocate a
10094 * completion-queue event from the free pool.
10096 * Return: Pointer to the newly allocated completion-queue event if successful
10099 struct lpfc_cq_event
*
10100 lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
10102 struct lpfc_cq_event
*cq_event
;
10103 unsigned long iflags
;
10105 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10106 cq_event
= __lpfc_sli4_cq_event_alloc(phba
);
10107 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10112 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10113 * @phba: pointer to lpfc hba data structure.
10114 * @cq_event: pointer to the completion queue event to be freed.
10116 * This routine is the lock free version of the API invoked to release a
10117 * completion-queue event back into the free pool.
10120 __lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
10121 struct lpfc_cq_event
*cq_event
)
10123 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_cqe_event_pool
);
10127 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10128 * @phba: pointer to lpfc hba data structure.
10129 * @cq_event: pointer to the completion queue event to be freed.
10131 * This routine is the lock version of the API invoked to release a
10132 * completion-queue event back into the free pool.
10135 lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
10136 struct lpfc_cq_event
*cq_event
)
10138 unsigned long iflags
;
10139 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10140 __lpfc_sli4_cq_event_release(phba
, cq_event
);
10141 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10145 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10146 * @phba: pointer to lpfc hba data structure.
10148 * This routine is to free all the pending completion-queue events to the
10149 * back into the free pool for device reset.
10152 lpfc_sli4_cq_event_release_all(struct lpfc_hba
*phba
)
10154 LIST_HEAD(cq_event_list
);
10155 struct lpfc_cq_event
*cq_event
;
10156 unsigned long iflags
;
10158 /* Retrieve all the pending WCQEs from pending WCQE lists */
10160 /* Pending ELS XRI abort events */
10161 spin_lock_irqsave(&phba
->sli4_hba
.els_xri_abrt_list_lock
, iflags
);
10162 list_splice_init(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
10164 spin_unlock_irqrestore(&phba
->sli4_hba
.els_xri_abrt_list_lock
, iflags
);
10166 /* Pending asynnc events */
10167 spin_lock_irqsave(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
10168 list_splice_init(&phba
->sli4_hba
.sp_asynce_work_queue
,
10170 spin_unlock_irqrestore(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
10172 while (!list_empty(&cq_event_list
)) {
10173 list_remove_head(&cq_event_list
, cq_event
,
10174 struct lpfc_cq_event
, list
);
10175 lpfc_sli4_cq_event_release(phba
, cq_event
);
10180 * lpfc_pci_function_reset - Reset pci function.
10181 * @phba: pointer to lpfc hba data structure.
10183 * This routine is invoked to request a PCI function reset. It will destroys
10184 * all resources assigned to the PCI function which originates this request.
10188 * -ENOMEM - No available memory
10189 * -EIO - The mailbox failed to complete successfully.
10192 lpfc_pci_function_reset(struct lpfc_hba
*phba
)
10194 LPFC_MBOXQ_t
*mboxq
;
10195 uint32_t rc
= 0, if_type
;
10196 uint32_t shdr_status
, shdr_add_status
;
10198 uint32_t port_reset
= 0;
10199 union lpfc_sli4_cfg_shdr
*shdr
;
10200 struct lpfc_register reg_data
;
10203 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
10205 case LPFC_SLI_INTF_IF_TYPE_0
:
10206 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
10209 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10210 "0494 Unable to allocate memory for "
10211 "issuing SLI_FUNCTION_RESET mailbox "
10216 /* Setup PCI function reset mailbox-ioctl command */
10217 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10218 LPFC_MBOX_OPCODE_FUNCTION_RESET
, 0,
10219 LPFC_SLI4_MBX_EMBED
);
10220 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
10221 shdr
= (union lpfc_sli4_cfg_shdr
*)
10222 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
10223 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10224 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
10226 if (rc
!= MBX_TIMEOUT
)
10227 mempool_free(mboxq
, phba
->mbox_mem_pool
);
10228 if (shdr_status
|| shdr_add_status
|| rc
) {
10229 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10230 "0495 SLI_FUNCTION_RESET mailbox "
10231 "failed with status x%x add_status x%x,"
10232 " mbx status x%x\n",
10233 shdr_status
, shdr_add_status
, rc
);
10237 case LPFC_SLI_INTF_IF_TYPE_2
:
10238 case LPFC_SLI_INTF_IF_TYPE_6
:
10241 * Poll the Port Status Register and wait for RDY for
10242 * up to 30 seconds. If the port doesn't respond, treat
10245 for (rdy_chk
= 0; rdy_chk
< 1500; rdy_chk
++) {
10246 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.
10247 STATUSregaddr
, ®_data
.word0
)) {
10251 if (bf_get(lpfc_sliport_status_rdy
, ®_data
))
10256 if (!bf_get(lpfc_sliport_status_rdy
, ®_data
)) {
10257 phba
->work_status
[0] = readl(
10258 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
10259 phba
->work_status
[1] = readl(
10260 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
10261 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10262 "2890 Port not ready, port status reg "
10263 "0x%x error 1=0x%x, error 2=0x%x\n",
10265 phba
->work_status
[0],
10266 phba
->work_status
[1]);
10273 * Reset the port now
10275 reg_data
.word0
= 0;
10276 bf_set(lpfc_sliport_ctrl_end
, ®_data
,
10277 LPFC_SLIPORT_LITTLE_ENDIAN
);
10278 bf_set(lpfc_sliport_ctrl_ip
, ®_data
,
10279 LPFC_SLIPORT_INIT_PORT
);
10280 writel(reg_data
.word0
, phba
->sli4_hba
.u
.if_type2
.
10283 pci_read_config_word(phba
->pcidev
,
10284 PCI_DEVICE_ID
, &devid
);
10289 } else if (bf_get(lpfc_sliport_status_rn
, ®_data
)) {
10295 case LPFC_SLI_INTF_IF_TYPE_1
:
10301 /* Catch the not-ready port failure after a port reset. */
10303 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10304 "3317 HBA not functional: IP Reset Failed "
10305 "try: echo fw_reset > board_mode\n");
10313 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10314 * @phba: pointer to lpfc hba data structure.
10316 * This routine is invoked to set up the PCI device memory space for device
10317 * with SLI-4 interface spec.
10321 * other values - error
10324 lpfc_sli4_pci_mem_setup(struct lpfc_hba
*phba
)
10326 struct pci_dev
*pdev
= phba
->pcidev
;
10327 unsigned long bar0map_len
, bar1map_len
, bar2map_len
;
10334 /* Set the device DMA mask size */
10335 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10337 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10342 * The BARs and register set definitions and offset locations are
10343 * dependent on the if_type.
10345 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
,
10346 &phba
->sli4_hba
.sli_intf
.word0
)) {
10350 /* There is no SLI3 failback for SLI4 devices. */
10351 if (bf_get(lpfc_sli_intf_valid
, &phba
->sli4_hba
.sli_intf
) !=
10352 LPFC_SLI_INTF_VALID
) {
10353 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10354 "2894 SLI_INTF reg contents invalid "
10355 "sli_intf reg 0x%x\n",
10356 phba
->sli4_hba
.sli_intf
.word0
);
10360 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
10362 * Get the bus address of SLI4 device Bar regions and the
10363 * number of bytes required by each mapping. The mapping of the
10364 * particular PCI BARs regions is dependent on the type of
10367 if (pci_resource_start(pdev
, PCI_64BIT_BAR0
)) {
10368 phba
->pci_bar0_map
= pci_resource_start(pdev
, PCI_64BIT_BAR0
);
10369 bar0map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR0
);
10372 * Map SLI4 PCI Config Space Register base to a kernel virtual
10375 phba
->sli4_hba
.conf_regs_memmap_p
=
10376 ioremap(phba
->pci_bar0_map
, bar0map_len
);
10377 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
10378 dev_printk(KERN_ERR
, &pdev
->dev
,
10379 "ioremap failed for SLI4 PCI config "
10383 phba
->pci_bar0_memmap_p
= phba
->sli4_hba
.conf_regs_memmap_p
;
10384 /* Set up BAR0 PCI config space register memory map */
10385 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
10387 phba
->pci_bar0_map
= pci_resource_start(pdev
, 1);
10388 bar0map_len
= pci_resource_len(pdev
, 1);
10389 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
10390 dev_printk(KERN_ERR
, &pdev
->dev
,
10391 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10394 phba
->sli4_hba
.conf_regs_memmap_p
=
10395 ioremap(phba
->pci_bar0_map
, bar0map_len
);
10396 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
10397 dev_printk(KERN_ERR
, &pdev
->dev
,
10398 "ioremap failed for SLI4 PCI config "
10402 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
10405 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
) {
10406 if (pci_resource_start(pdev
, PCI_64BIT_BAR2
)) {
10408 * Map SLI4 if type 0 HBA Control Register base to a
10409 * kernel virtual address and setup the registers.
10411 phba
->pci_bar1_map
= pci_resource_start(pdev
,
10413 bar1map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR2
);
10414 phba
->sli4_hba
.ctrl_regs_memmap_p
=
10415 ioremap(phba
->pci_bar1_map
,
10417 if (!phba
->sli4_hba
.ctrl_regs_memmap_p
) {
10418 dev_err(&pdev
->dev
,
10419 "ioremap failed for SLI4 HBA "
10420 "control registers.\n");
10422 goto out_iounmap_conf
;
10424 phba
->pci_bar2_memmap_p
=
10425 phba
->sli4_hba
.ctrl_regs_memmap_p
;
10426 lpfc_sli4_bar1_register_memmap(phba
, if_type
);
10429 goto out_iounmap_conf
;
10433 if ((if_type
== LPFC_SLI_INTF_IF_TYPE_6
) &&
10434 (pci_resource_start(pdev
, PCI_64BIT_BAR2
))) {
10436 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10437 * virtual address and setup the registers.
10439 phba
->pci_bar1_map
= pci_resource_start(pdev
, PCI_64BIT_BAR2
);
10440 bar1map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR2
);
10441 phba
->sli4_hba
.drbl_regs_memmap_p
=
10442 ioremap(phba
->pci_bar1_map
, bar1map_len
);
10443 if (!phba
->sli4_hba
.drbl_regs_memmap_p
) {
10444 dev_err(&pdev
->dev
,
10445 "ioremap failed for SLI4 HBA doorbell registers.\n");
10447 goto out_iounmap_conf
;
10449 phba
->pci_bar2_memmap_p
= phba
->sli4_hba
.drbl_regs_memmap_p
;
10450 lpfc_sli4_bar1_register_memmap(phba
, if_type
);
10453 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
) {
10454 if (pci_resource_start(pdev
, PCI_64BIT_BAR4
)) {
10456 * Map SLI4 if type 0 HBA Doorbell Register base to
10457 * a kernel virtual address and setup the registers.
10459 phba
->pci_bar2_map
= pci_resource_start(pdev
,
10461 bar2map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR4
);
10462 phba
->sli4_hba
.drbl_regs_memmap_p
=
10463 ioremap(phba
->pci_bar2_map
,
10465 if (!phba
->sli4_hba
.drbl_regs_memmap_p
) {
10466 dev_err(&pdev
->dev
,
10467 "ioremap failed for SLI4 HBA"
10468 " doorbell registers.\n");
10470 goto out_iounmap_ctrl
;
10472 phba
->pci_bar4_memmap_p
=
10473 phba
->sli4_hba
.drbl_regs_memmap_p
;
10474 error
= lpfc_sli4_bar2_register_memmap(phba
, LPFC_VF0
);
10476 goto out_iounmap_all
;
10479 goto out_iounmap_all
;
10483 if (if_type
== LPFC_SLI_INTF_IF_TYPE_6
&&
10484 pci_resource_start(pdev
, PCI_64BIT_BAR4
)) {
10486 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10487 * virtual address and setup the registers.
10489 phba
->pci_bar2_map
= pci_resource_start(pdev
, PCI_64BIT_BAR4
);
10490 bar2map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR4
);
10491 phba
->sli4_hba
.dpp_regs_memmap_p
=
10492 ioremap(phba
->pci_bar2_map
, bar2map_len
);
10493 if (!phba
->sli4_hba
.dpp_regs_memmap_p
) {
10494 dev_err(&pdev
->dev
,
10495 "ioremap failed for SLI4 HBA dpp registers.\n");
10497 goto out_iounmap_ctrl
;
10499 phba
->pci_bar4_memmap_p
= phba
->sli4_hba
.dpp_regs_memmap_p
;
10502 /* Set up the EQ/CQ register handeling functions now */
10504 case LPFC_SLI_INTF_IF_TYPE_0
:
10505 case LPFC_SLI_INTF_IF_TYPE_2
:
10506 phba
->sli4_hba
.sli4_eq_clr_intr
= lpfc_sli4_eq_clr_intr
;
10507 phba
->sli4_hba
.sli4_write_eq_db
= lpfc_sli4_write_eq_db
;
10508 phba
->sli4_hba
.sli4_write_cq_db
= lpfc_sli4_write_cq_db
;
10510 case LPFC_SLI_INTF_IF_TYPE_6
:
10511 phba
->sli4_hba
.sli4_eq_clr_intr
= lpfc_sli4_if6_eq_clr_intr
;
10512 phba
->sli4_hba
.sli4_write_eq_db
= lpfc_sli4_if6_write_eq_db
;
10513 phba
->sli4_hba
.sli4_write_cq_db
= lpfc_sli4_if6_write_cq_db
;
10522 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
10524 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
10526 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
10532 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10533 * @phba: pointer to lpfc hba data structure.
10535 * This routine is invoked to unset the PCI device memory space for device
10536 * with SLI-4 interface spec.
10539 lpfc_sli4_pci_mem_unset(struct lpfc_hba
*phba
)
10542 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
10545 case LPFC_SLI_INTF_IF_TYPE_0
:
10546 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
10547 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
10548 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
10550 case LPFC_SLI_INTF_IF_TYPE_2
:
10551 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
10553 case LPFC_SLI_INTF_IF_TYPE_6
:
10554 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
10555 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
10556 if (phba
->sli4_hba
.dpp_regs_memmap_p
)
10557 iounmap(phba
->sli4_hba
.dpp_regs_memmap_p
);
10559 case LPFC_SLI_INTF_IF_TYPE_1
:
10561 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
10562 "FATAL - unsupported SLI4 interface type - %d\n",
10569 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10570 * @phba: pointer to lpfc hba data structure.
10572 * This routine is invoked to enable the MSI-X interrupt vectors to device
10573 * with SLI-3 interface specs.
10577 * other values - error
10580 lpfc_sli_enable_msix(struct lpfc_hba
*phba
)
10585 /* Set up MSI-X multi-message vectors */
10586 rc
= pci_alloc_irq_vectors(phba
->pcidev
,
10587 LPFC_MSIX_VECTORS
, LPFC_MSIX_VECTORS
, PCI_IRQ_MSIX
);
10589 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10590 "0420 PCI enable MSI-X failed (%d)\n", rc
);
10595 * Assign MSI-X vectors to interrupt handlers
10598 /* vector-0 is associated to slow-path handler */
10599 rc
= request_irq(pci_irq_vector(phba
->pcidev
, 0),
10600 &lpfc_sli_sp_intr_handler
, 0,
10601 LPFC_SP_DRIVER_HANDLER_NAME
, phba
);
10603 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
10604 "0421 MSI-X slow-path request_irq failed "
10609 /* vector-1 is associated to fast-path handler */
10610 rc
= request_irq(pci_irq_vector(phba
->pcidev
, 1),
10611 &lpfc_sli_fp_intr_handler
, 0,
10612 LPFC_FP_DRIVER_HANDLER_NAME
, phba
);
10615 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
10616 "0429 MSI-X fast-path request_irq failed "
10622 * Configure HBA MSI-X attention conditions to messages
10624 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10628 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10629 "0474 Unable to allocate memory for issuing "
10630 "MBOX_CONFIG_MSI command\n");
10633 rc
= lpfc_config_msi(phba
, pmb
);
10636 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
10637 if (rc
!= MBX_SUCCESS
) {
10638 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
10639 "0351 Config MSI mailbox command failed, "
10640 "mbxCmd x%x, mbxStatus x%x\n",
10641 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
);
10645 /* Free memory allocated for mailbox command */
10646 mempool_free(pmb
, phba
->mbox_mem_pool
);
10650 /* Free memory allocated for mailbox command */
10651 mempool_free(pmb
, phba
->mbox_mem_pool
);
10654 /* free the irq already requested */
10655 free_irq(pci_irq_vector(phba
->pcidev
, 1), phba
);
10658 /* free the irq already requested */
10659 free_irq(pci_irq_vector(phba
->pcidev
, 0), phba
);
10662 /* Unconfigure MSI-X capability structure */
10663 pci_free_irq_vectors(phba
->pcidev
);
10670 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10671 * @phba: pointer to lpfc hba data structure.
10673 * This routine is invoked to enable the MSI interrupt mode to device with
10674 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10675 * enable the MSI vector. The device driver is responsible for calling the
10676 * request_irq() to register MSI vector with a interrupt the handler, which
10677 * is done in this function.
10681 * other values - error
10684 lpfc_sli_enable_msi(struct lpfc_hba
*phba
)
10688 rc
= pci_enable_msi(phba
->pcidev
);
10690 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10691 "0462 PCI enable MSI mode success.\n");
10693 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10694 "0471 PCI enable MSI mode failed (%d)\n", rc
);
10698 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
10699 0, LPFC_DRIVER_NAME
, phba
);
10701 pci_disable_msi(phba
->pcidev
);
10702 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
10703 "0478 MSI request_irq failed (%d)\n", rc
);
10709 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10710 * @phba: pointer to lpfc hba data structure.
10711 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
10713 * This routine is invoked to enable device interrupt and associate driver's
10714 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10715 * spec. Depends on the interrupt mode configured to the driver, the driver
10716 * will try to fallback from the configured interrupt mode to an interrupt
10717 * mode which is supported by the platform, kernel, and device in the order
10719 * MSI-X -> MSI -> IRQ.
10723 * other values - error
10726 lpfc_sli_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
10728 uint32_t intr_mode
= LPFC_INTR_ERROR
;
10731 if (cfg_mode
== 2) {
10732 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10733 retval
= lpfc_sli_config_port(phba
, LPFC_SLI_REV3
);
10735 /* Now, try to enable MSI-X interrupt mode */
10736 retval
= lpfc_sli_enable_msix(phba
);
10738 /* Indicate initialization to MSI-X mode */
10739 phba
->intr_type
= MSIX
;
10745 /* Fallback to MSI if MSI-X initialization failed */
10746 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
10747 retval
= lpfc_sli_enable_msi(phba
);
10749 /* Indicate initialization to MSI mode */
10750 phba
->intr_type
= MSI
;
10755 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10756 if (phba
->intr_type
== NONE
) {
10757 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
10758 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
10760 /* Indicate initialization to INTx mode */
10761 phba
->intr_type
= INTx
;
10769 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10770 * @phba: pointer to lpfc hba data structure.
10772 * This routine is invoked to disable device interrupt and disassociate the
10773 * driver's interrupt handler(s) from interrupt vector(s) to device with
10774 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10775 * release the interrupt vector(s) for the message signaled interrupt.
10778 lpfc_sli_disable_intr(struct lpfc_hba
*phba
)
10782 if (phba
->intr_type
== MSIX
)
10783 nr_irqs
= LPFC_MSIX_VECTORS
;
10787 for (i
= 0; i
< nr_irqs
; i
++)
10788 free_irq(pci_irq_vector(phba
->pcidev
, i
), phba
);
10789 pci_free_irq_vectors(phba
->pcidev
);
10791 /* Reset interrupt management states */
10792 phba
->intr_type
= NONE
;
10793 phba
->sli
.slistat
.sli_intr
= 0;
10797 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10798 * @phba: pointer to lpfc hba data structure.
10799 * @id: EQ vector index or Hardware Queue index
10800 * @match: LPFC_FIND_BY_EQ = match by EQ
10801 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
10802 * Return the CPU that matches the selection criteria
10805 lpfc_find_cpu_handle(struct lpfc_hba
*phba
, uint16_t id
, int match
)
10807 struct lpfc_vector_map_info
*cpup
;
10810 /* Loop through all CPUs */
10811 for_each_present_cpu(cpu
) {
10812 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
10814 /* If we are matching by EQ, there may be multiple CPUs using
10815 * using the same vector, so select the one with
10816 * LPFC_CPU_FIRST_IRQ set.
10818 if ((match
== LPFC_FIND_BY_EQ
) &&
10819 (cpup
->flag
& LPFC_CPU_FIRST_IRQ
) &&
10823 /* If matching by HDWQ, select the first CPU that matches */
10824 if ((match
== LPFC_FIND_BY_HDWQ
) && (cpup
->hdwq
== id
))
10832 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10833 * @phba: pointer to lpfc hba data structure.
10834 * @cpu: CPU map index
10835 * @phys_id: CPU package physical id
10836 * @core_id: CPU core id
10839 lpfc_find_hyper(struct lpfc_hba
*phba
, int cpu
,
10840 uint16_t phys_id
, uint16_t core_id
)
10842 struct lpfc_vector_map_info
*cpup
;
10845 for_each_present_cpu(idx
) {
10846 cpup
= &phba
->sli4_hba
.cpu_map
[idx
];
10847 /* Does the cpup match the one we are looking for */
10848 if ((cpup
->phys_id
== phys_id
) &&
10849 (cpup
->core_id
== core_id
) &&
10858 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10859 * @phba: pointer to lpfc hba data structure.
10860 * @eqidx: index for eq and irq vector
10861 * @flag: flags to set for vector_map structure
10862 * @cpu: cpu used to index vector_map structure
10864 * The routine assigns eq info into vector_map structure
10867 lpfc_assign_eq_map_info(struct lpfc_hba
*phba
, uint16_t eqidx
, uint16_t flag
,
10870 struct lpfc_vector_map_info
*cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
10871 struct lpfc_hba_eq_hdl
*eqhdl
= lpfc_get_eq_hdl(eqidx
);
10874 cpup
->flag
|= flag
;
10876 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10877 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10878 cpu
, eqhdl
->irq
, cpup
->eq
, cpup
->flag
);
10882 * lpfc_cpu_map_array_init - Initialize cpu_map structure
10883 * @phba: pointer to lpfc hba data structure.
10885 * The routine initializes the cpu_map array structure
10888 lpfc_cpu_map_array_init(struct lpfc_hba
*phba
)
10890 struct lpfc_vector_map_info
*cpup
;
10891 struct lpfc_eq_intr_info
*eqi
;
10894 for_each_possible_cpu(cpu
) {
10895 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
10896 cpup
->phys_id
= LPFC_VECTOR_MAP_EMPTY
;
10897 cpup
->core_id
= LPFC_VECTOR_MAP_EMPTY
;
10898 cpup
->hdwq
= LPFC_VECTOR_MAP_EMPTY
;
10899 cpup
->eq
= LPFC_VECTOR_MAP_EMPTY
;
10901 eqi
= per_cpu_ptr(phba
->sli4_hba
.eq_info
, cpu
);
10902 INIT_LIST_HEAD(&eqi
->list
);
10908 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
10909 * @phba: pointer to lpfc hba data structure.
10911 * The routine initializes the hba_eq_hdl array structure
10914 lpfc_hba_eq_hdl_array_init(struct lpfc_hba
*phba
)
10916 struct lpfc_hba_eq_hdl
*eqhdl
;
10919 for (i
= 0; i
< phba
->cfg_irq_chann
; i
++) {
10920 eqhdl
= lpfc_get_eq_hdl(i
);
10921 eqhdl
->irq
= LPFC_VECTOR_MAP_EMPTY
;
10922 eqhdl
->phba
= phba
;
10927 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
10928 * @phba: pointer to lpfc hba data structure.
10929 * @vectors: number of msix vectors allocated.
10931 * The routine will figure out the CPU affinity assignment for every
10932 * MSI-X vector allocated for the HBA.
10933 * In addition, the CPU to IO channel mapping will be calculated
10934 * and the phba->sli4_hba.cpu_map array will reflect this.
10937 lpfc_cpu_affinity_check(struct lpfc_hba
*phba
, int vectors
)
10939 int i
, cpu
, idx
, next_idx
, new_cpu
, start_cpu
, first_cpu
;
10940 int max_phys_id
, min_phys_id
;
10941 int max_core_id
, min_core_id
;
10942 struct lpfc_vector_map_info
*cpup
;
10943 struct lpfc_vector_map_info
*new_cpup
;
10945 struct cpuinfo_x86
*cpuinfo
;
10947 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10948 struct lpfc_hdwq_stat
*c_stat
;
10952 min_phys_id
= LPFC_VECTOR_MAP_EMPTY
;
10954 min_core_id
= LPFC_VECTOR_MAP_EMPTY
;
10956 /* Update CPU map with physical id and core id of each CPU */
10957 for_each_present_cpu(cpu
) {
10958 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
10960 cpuinfo
= &cpu_data(cpu
);
10961 cpup
->phys_id
= cpuinfo
->phys_proc_id
;
10962 cpup
->core_id
= cpuinfo
->cpu_core_id
;
10963 if (lpfc_find_hyper(phba
, cpu
, cpup
->phys_id
, cpup
->core_id
))
10964 cpup
->flag
|= LPFC_CPU_MAP_HYPER
;
10966 /* No distinction between CPUs for other platforms */
10968 cpup
->core_id
= cpu
;
10971 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10972 "3328 CPU %d physid %d coreid %d flag x%x\n",
10973 cpu
, cpup
->phys_id
, cpup
->core_id
, cpup
->flag
);
10975 if (cpup
->phys_id
> max_phys_id
)
10976 max_phys_id
= cpup
->phys_id
;
10977 if (cpup
->phys_id
< min_phys_id
)
10978 min_phys_id
= cpup
->phys_id
;
10980 if (cpup
->core_id
> max_core_id
)
10981 max_core_id
= cpup
->core_id
;
10982 if (cpup
->core_id
< min_core_id
)
10983 min_core_id
= cpup
->core_id
;
10986 /* After looking at each irq vector assigned to this pcidev, its
10987 * possible to see that not ALL CPUs have been accounted for.
10988 * Next we will set any unassigned (unaffinitized) cpu map
10989 * entries to a IRQ on the same phys_id.
10991 first_cpu
= cpumask_first(cpu_present_mask
);
10992 start_cpu
= first_cpu
;
10994 for_each_present_cpu(cpu
) {
10995 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
10997 /* Is this CPU entry unassigned */
10998 if (cpup
->eq
== LPFC_VECTOR_MAP_EMPTY
) {
10999 /* Mark CPU as IRQ not assigned by the kernel */
11000 cpup
->flag
|= LPFC_CPU_MAP_UNASSIGN
;
11002 /* If so, find a new_cpup thats on the the SAME
11003 * phys_id as cpup. start_cpu will start where we
11004 * left off so all unassigned entries don't get assgined
11005 * the IRQ of the first entry.
11007 new_cpu
= start_cpu
;
11008 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
11009 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
11010 if (!(new_cpup
->flag
& LPFC_CPU_MAP_UNASSIGN
) &&
11011 (new_cpup
->eq
!= LPFC_VECTOR_MAP_EMPTY
) &&
11012 (new_cpup
->phys_id
== cpup
->phys_id
))
11014 new_cpu
= cpumask_next(
11015 new_cpu
, cpu_present_mask
);
11016 if (new_cpu
== nr_cpumask_bits
)
11017 new_cpu
= first_cpu
;
11019 /* At this point, we leave the CPU as unassigned */
11022 /* We found a matching phys_id, so copy the IRQ info */
11023 cpup
->eq
= new_cpup
->eq
;
11025 /* Bump start_cpu to the next slot to minmize the
11026 * chance of having multiple unassigned CPU entries
11027 * selecting the same IRQ.
11029 start_cpu
= cpumask_next(new_cpu
, cpu_present_mask
);
11030 if (start_cpu
== nr_cpumask_bits
)
11031 start_cpu
= first_cpu
;
11033 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11034 "3337 Set Affinity: CPU %d "
11035 "eq %d from peer cpu %d same "
11037 cpu
, cpup
->eq
, new_cpu
,
11042 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
11043 start_cpu
= first_cpu
;
11045 for_each_present_cpu(cpu
) {
11046 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11048 /* Is this entry unassigned */
11049 if (cpup
->eq
== LPFC_VECTOR_MAP_EMPTY
) {
11050 /* Mark it as IRQ not assigned by the kernel */
11051 cpup
->flag
|= LPFC_CPU_MAP_UNASSIGN
;
11053 /* If so, find a new_cpup thats on ANY phys_id
11054 * as the cpup. start_cpu will start where we
11055 * left off so all unassigned entries don't get
11056 * assigned the IRQ of the first entry.
11058 new_cpu
= start_cpu
;
11059 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
11060 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
11061 if (!(new_cpup
->flag
& LPFC_CPU_MAP_UNASSIGN
) &&
11062 (new_cpup
->eq
!= LPFC_VECTOR_MAP_EMPTY
))
11064 new_cpu
= cpumask_next(
11065 new_cpu
, cpu_present_mask
);
11066 if (new_cpu
== nr_cpumask_bits
)
11067 new_cpu
= first_cpu
;
11069 /* We should never leave an entry unassigned */
11070 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11071 "3339 Set Affinity: CPU %d "
11072 "eq %d UNASSIGNED\n",
11073 cpup
->hdwq
, cpup
->eq
);
11076 /* We found an available entry, copy the IRQ info */
11077 cpup
->eq
= new_cpup
->eq
;
11079 /* Bump start_cpu to the next slot to minmize the
11080 * chance of having multiple unassigned CPU entries
11081 * selecting the same IRQ.
11083 start_cpu
= cpumask_next(new_cpu
, cpu_present_mask
);
11084 if (start_cpu
== nr_cpumask_bits
)
11085 start_cpu
= first_cpu
;
11087 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11088 "3338 Set Affinity: CPU %d "
11089 "eq %d from peer cpu %d (%d/%d)\n",
11090 cpu
, cpup
->eq
, new_cpu
,
11091 new_cpup
->phys_id
, new_cpup
->core_id
);
11095 /* Assign hdwq indices that are unique across all cpus in the map
11096 * that are also FIRST_CPUs.
11099 for_each_present_cpu(cpu
) {
11100 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11102 /* Only FIRST IRQs get a hdwq index assignment. */
11103 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
11106 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11109 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11110 "3333 Set Affinity: CPU %d (phys %d core %d): "
11111 "hdwq %d eq %d flg x%x\n",
11112 cpu
, cpup
->phys_id
, cpup
->core_id
,
11113 cpup
->hdwq
, cpup
->eq
, cpup
->flag
);
11115 /* Associate a hdwq with each cpu_map entry
11116 * This will be 1 to 1 - hdwq to cpu, unless there are less
11117 * hardware queues then CPUs. For that case we will just round-robin
11118 * the available hardware queues as they get assigned to CPUs.
11119 * The next_idx is the idx from the FIRST_CPU loop above to account
11120 * for irq_chann < hdwq. The idx is used for round-robin assignments
11121 * and needs to start at 0.
11126 for_each_present_cpu(cpu
) {
11127 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11129 /* FIRST cpus are already mapped. */
11130 if (cpup
->flag
& LPFC_CPU_FIRST_IRQ
)
11133 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11134 * of the unassigned cpus to the next idx so that all
11135 * hdw queues are fully utilized.
11137 if (next_idx
< phba
->cfg_hdw_queue
) {
11138 cpup
->hdwq
= next_idx
;
11143 /* Not a First CPU and all hdw_queues are used. Reuse a
11144 * Hardware Queue for another CPU, so be smart about it
11145 * and pick one that has its IRQ/EQ mapped to the same phys_id
11146 * (CPU package) and core_id.
11148 new_cpu
= start_cpu
;
11149 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
11150 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
11151 if (new_cpup
->hdwq
!= LPFC_VECTOR_MAP_EMPTY
&&
11152 new_cpup
->phys_id
== cpup
->phys_id
&&
11153 new_cpup
->core_id
== cpup
->core_id
) {
11156 new_cpu
= cpumask_next(new_cpu
, cpu_present_mask
);
11157 if (new_cpu
== nr_cpumask_bits
)
11158 new_cpu
= first_cpu
;
11161 /* If we can't match both phys_id and core_id,
11162 * settle for just a phys_id match.
11164 new_cpu
= start_cpu
;
11165 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
11166 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
11167 if (new_cpup
->hdwq
!= LPFC_VECTOR_MAP_EMPTY
&&
11168 new_cpup
->phys_id
== cpup
->phys_id
)
11171 new_cpu
= cpumask_next(new_cpu
, cpu_present_mask
);
11172 if (new_cpu
== nr_cpumask_bits
)
11173 new_cpu
= first_cpu
;
11176 /* Otherwise just round robin on cfg_hdw_queue */
11177 cpup
->hdwq
= idx
% phba
->cfg_hdw_queue
;
11181 /* We found an available entry, copy the IRQ info */
11182 start_cpu
= cpumask_next(new_cpu
, cpu_present_mask
);
11183 if (start_cpu
== nr_cpumask_bits
)
11184 start_cpu
= first_cpu
;
11185 cpup
->hdwq
= new_cpup
->hdwq
;
11187 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11188 "3335 Set Affinity: CPU %d (phys %d core %d): "
11189 "hdwq %d eq %d flg x%x\n",
11190 cpu
, cpup
->phys_id
, cpup
->core_id
,
11191 cpup
->hdwq
, cpup
->eq
, cpup
->flag
);
11195 * Initialize the cpu_map slots for not-present cpus in case
11196 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11199 for_each_possible_cpu(cpu
) {
11200 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11201 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11202 c_stat
= per_cpu_ptr(phba
->sli4_hba
.c_stat
, cpu
);
11203 c_stat
->hdwq_no
= cpup
->hdwq
;
11205 if (cpup
->hdwq
!= LPFC_VECTOR_MAP_EMPTY
)
11208 cpup
->hdwq
= idx
++ % phba
->cfg_hdw_queue
;
11209 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11210 c_stat
->hdwq_no
= cpup
->hdwq
;
11212 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11213 "3340 Set Affinity: not present "
11214 "CPU %d hdwq %d\n",
11218 /* The cpu_map array will be used later during initialization
11219 * when EQ / CQ / WQs are allocated and configured.
11225 * lpfc_cpuhp_get_eq
11227 * @phba: pointer to lpfc hba data structure.
11228 * @cpu: cpu going offline
11229 * @eqlist: eq list to append to
11232 lpfc_cpuhp_get_eq(struct lpfc_hba
*phba
, unsigned int cpu
,
11233 struct list_head
*eqlist
)
11235 const struct cpumask
*maskp
;
11236 struct lpfc_queue
*eq
;
11237 struct cpumask
*tmp
;
11240 tmp
= kzalloc(cpumask_size(), GFP_KERNEL
);
11244 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
11245 maskp
= pci_irq_get_affinity(phba
->pcidev
, idx
);
11249 * if irq is not affinitized to the cpu going
11250 * then we don't need to poll the eq attached
11253 if (!cpumask_and(tmp
, maskp
, cpumask_of(cpu
)))
11255 /* get the cpus that are online and are affini-
11256 * tized to this irq vector. If the count is
11257 * more than 1 then cpuhp is not going to shut-
11258 * down this vector. Since this cpu has not
11259 * gone offline yet, we need >1.
11261 cpumask_and(tmp
, maskp
, cpu_online_mask
);
11262 if (cpumask_weight(tmp
) > 1)
11265 /* Now that we have an irq to shutdown, get the eq
11266 * mapped to this irq. Note: multiple hdwq's in
11267 * the software can share an eq, but eventually
11268 * only eq will be mapped to this vector
11270 eq
= phba
->sli4_hba
.hba_eq_hdl
[idx
].eq
;
11271 list_add(&eq
->_poll_list
, eqlist
);
11277 static void __lpfc_cpuhp_remove(struct lpfc_hba
*phba
)
11279 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
11282 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state
,
11285 * unregistering the instance doesn't stop the polling
11286 * timer. Wait for the poll timer to retire.
11289 del_timer_sync(&phba
->cpuhp_poll_timer
);
11292 static void lpfc_cpuhp_remove(struct lpfc_hba
*phba
)
11294 if (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
)
11297 __lpfc_cpuhp_remove(phba
);
11300 static void lpfc_cpuhp_add(struct lpfc_hba
*phba
)
11302 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
11307 if (!list_empty(&phba
->poll_list
))
11308 mod_timer(&phba
->cpuhp_poll_timer
,
11309 jiffies
+ msecs_to_jiffies(LPFC_POLL_HB
));
11313 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state
,
11317 static int __lpfc_cpuhp_checks(struct lpfc_hba
*phba
, int *retval
)
11319 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
11324 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
11329 /* proceed with the hotplug */
11334 * lpfc_irq_set_aff - set IRQ affinity
11335 * @eqhdl: EQ handle
11336 * @cpu: cpu to set affinity
11340 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl
*eqhdl
, unsigned int cpu
)
11342 cpumask_clear(&eqhdl
->aff_mask
);
11343 cpumask_set_cpu(cpu
, &eqhdl
->aff_mask
);
11344 irq_set_status_flags(eqhdl
->irq
, IRQ_NO_BALANCING
);
11345 irq_set_affinity_hint(eqhdl
->irq
, &eqhdl
->aff_mask
);
11349 * lpfc_irq_clear_aff - clear IRQ affinity
11350 * @eqhdl: EQ handle
11354 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl
*eqhdl
)
11356 cpumask_clear(&eqhdl
->aff_mask
);
11357 irq_clear_status_flags(eqhdl
->irq
, IRQ_NO_BALANCING
);
11361 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11362 * @phba: pointer to HBA context object.
11363 * @cpu: cpu going offline/online
11364 * @offline: true, cpu is going offline. false, cpu is coming online.
11366 * If cpu is going offline, we'll try our best effort to find the next
11367 * online cpu on the phba's original_mask and migrate all offlining IRQ
11370 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
11372 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
11373 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11377 lpfc_irq_rebalance(struct lpfc_hba
*phba
, unsigned int cpu
, bool offline
)
11379 struct lpfc_vector_map_info
*cpup
;
11380 struct cpumask
*aff_mask
;
11381 unsigned int cpu_select
, cpu_next
, idx
;
11382 const struct cpumask
*orig_mask
;
11384 if (phba
->irq_chann_mode
== NORMAL_MODE
)
11387 orig_mask
= &phba
->sli4_hba
.irq_aff_mask
;
11389 if (!cpumask_test_cpu(cpu
, orig_mask
))
11392 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11394 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
11398 /* Find next online CPU on original mask */
11399 cpu_next
= cpumask_next_wrap(cpu
, orig_mask
, cpu
, true);
11400 cpu_select
= lpfc_next_online_cpu(orig_mask
, cpu_next
);
11402 /* Found a valid CPU */
11403 if ((cpu_select
< nr_cpu_ids
) && (cpu_select
!= cpu
)) {
11404 /* Go through each eqhdl and ensure offlining
11405 * cpu aff_mask is migrated
11407 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
11408 aff_mask
= lpfc_get_aff_mask(idx
);
11410 /* Migrate affinity */
11411 if (cpumask_test_cpu(cpu
, aff_mask
))
11412 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx
),
11416 /* Rely on irqbalance if no online CPUs left on NUMA */
11417 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++)
11418 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx
));
11421 /* Migrate affinity back to this CPU */
11422 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup
->eq
), cpu
);
11426 static int lpfc_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
11428 struct lpfc_hba
*phba
= hlist_entry_safe(node
, struct lpfc_hba
, cpuhp
);
11429 struct lpfc_queue
*eq
, *next
;
11434 WARN_ONCE(!phba
, "cpu: %u. phba:NULL", raw_smp_processor_id());
11438 if (__lpfc_cpuhp_checks(phba
, &retval
))
11441 lpfc_irq_rebalance(phba
, cpu
, true);
11443 retval
= lpfc_cpuhp_get_eq(phba
, cpu
, &eqlist
);
11447 /* start polling on these eq's */
11448 list_for_each_entry_safe(eq
, next
, &eqlist
, _poll_list
) {
11449 list_del_init(&eq
->_poll_list
);
11450 lpfc_sli4_start_polling(eq
);
11456 static int lpfc_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
11458 struct lpfc_hba
*phba
= hlist_entry_safe(node
, struct lpfc_hba
, cpuhp
);
11459 struct lpfc_queue
*eq
, *next
;
11464 WARN_ONCE(!phba
, "cpu: %u. phba:NULL", raw_smp_processor_id());
11468 if (__lpfc_cpuhp_checks(phba
, &retval
))
11471 lpfc_irq_rebalance(phba
, cpu
, false);
11473 list_for_each_entry_safe(eq
, next
, &phba
->poll_list
, _poll_list
) {
11474 n
= lpfc_find_cpu_handle(phba
, eq
->hdwq
, LPFC_FIND_BY_HDWQ
);
11476 lpfc_sli4_stop_polling(eq
);
11483 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11484 * @phba: pointer to lpfc hba data structure.
11486 * This routine is invoked to enable the MSI-X interrupt vectors to device
11487 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
11488 * to cpus on the system.
11490 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11491 * the number of cpus on the same numa node as this adapter. The vectors are
11492 * allocated without requesting OS affinity mapping. A vector will be
11493 * allocated and assigned to each online and offline cpu. If the cpu is
11494 * online, then affinity will be set to that cpu. If the cpu is offline, then
11495 * affinity will be set to the nearest peer cpu within the numa node that is
11496 * online. If there are no online cpus within the numa node, affinity is not
11497 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11498 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11501 * If numa mode is not enabled and there is more than 1 vector allocated, then
11502 * the driver relies on the managed irq interface where the OS assigns vector to
11503 * cpu affinity. The driver will then use that affinity mapping to setup its
11504 * cpu mapping table.
11508 * other values - error
11511 lpfc_sli4_enable_msix(struct lpfc_hba
*phba
)
11513 int vectors
, rc
, index
;
11515 const struct cpumask
*aff_mask
= NULL
;
11516 unsigned int cpu
= 0, cpu_cnt
= 0, cpu_select
= nr_cpu_ids
;
11517 struct lpfc_vector_map_info
*cpup
;
11518 struct lpfc_hba_eq_hdl
*eqhdl
;
11519 const struct cpumask
*maskp
;
11520 unsigned int flags
= PCI_IRQ_MSIX
;
11522 /* Set up MSI-X multi-message vectors */
11523 vectors
= phba
->cfg_irq_chann
;
11525 if (phba
->irq_chann_mode
!= NORMAL_MODE
)
11526 aff_mask
= &phba
->sli4_hba
.irq_aff_mask
;
11529 cpu_cnt
= cpumask_weight(aff_mask
);
11530 vectors
= min(phba
->cfg_irq_chann
, cpu_cnt
);
11532 /* cpu: iterates over aff_mask including offline or online
11533 * cpu_select: iterates over online aff_mask to set affinity
11535 cpu
= cpumask_first(aff_mask
);
11536 cpu_select
= lpfc_next_online_cpu(aff_mask
, cpu
);
11538 flags
|= PCI_IRQ_AFFINITY
;
11541 rc
= pci_alloc_irq_vectors(phba
->pcidev
, 1, vectors
, flags
);
11543 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11544 "0484 PCI enable MSI-X failed (%d)\n", rc
);
11549 /* Assign MSI-X vectors to interrupt handlers */
11550 for (index
= 0; index
< vectors
; index
++) {
11551 eqhdl
= lpfc_get_eq_hdl(index
);
11552 name
= eqhdl
->handler_name
;
11553 memset(name
, 0, LPFC_SLI4_HANDLER_NAME_SZ
);
11554 snprintf(name
, LPFC_SLI4_HANDLER_NAME_SZ
,
11555 LPFC_DRIVER_HANDLER_NAME
"%d", index
);
11557 eqhdl
->idx
= index
;
11558 rc
= request_irq(pci_irq_vector(phba
->pcidev
, index
),
11559 &lpfc_sli4_hba_intr_handler
, 0,
11562 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
11563 "0486 MSI-X fast-path (%d) "
11564 "request_irq failed (%d)\n", index
, rc
);
11568 eqhdl
->irq
= pci_irq_vector(phba
->pcidev
, index
);
11571 /* If found a neighboring online cpu, set affinity */
11572 if (cpu_select
< nr_cpu_ids
)
11573 lpfc_irq_set_aff(eqhdl
, cpu_select
);
11575 /* Assign EQ to cpu_map */
11576 lpfc_assign_eq_map_info(phba
, index
,
11577 LPFC_CPU_FIRST_IRQ
,
11580 /* Iterate to next offline or online cpu in aff_mask */
11581 cpu
= cpumask_next(cpu
, aff_mask
);
11583 /* Find next online cpu in aff_mask to set affinity */
11584 cpu_select
= lpfc_next_online_cpu(aff_mask
, cpu
);
11585 } else if (vectors
== 1) {
11586 cpu
= cpumask_first(cpu_present_mask
);
11587 lpfc_assign_eq_map_info(phba
, index
, LPFC_CPU_FIRST_IRQ
,
11590 maskp
= pci_irq_get_affinity(phba
->pcidev
, index
);
11592 /* Loop through all CPUs associated with vector index */
11593 for_each_cpu_and(cpu
, maskp
, cpu_present_mask
) {
11594 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11596 /* If this is the first CPU thats assigned to
11597 * this vector, set LPFC_CPU_FIRST_IRQ.
11599 * With certain platforms its possible that irq
11600 * vectors are affinitized to all the cpu's.
11601 * This can result in each cpu_map.eq to be set
11602 * to the last vector, resulting in overwrite
11603 * of all the previous cpu_map.eq. Ensure that
11604 * each vector receives a place in cpu_map.
11605 * Later call to lpfc_cpu_affinity_check will
11606 * ensure we are nicely balanced out.
11608 if (cpup
->eq
!= LPFC_VECTOR_MAP_EMPTY
)
11610 lpfc_assign_eq_map_info(phba
, index
,
11611 LPFC_CPU_FIRST_IRQ
,
11618 if (vectors
!= phba
->cfg_irq_chann
) {
11619 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11620 "3238 Reducing IO channels to match number of "
11621 "MSI-X vectors, requested %d got %d\n",
11622 phba
->cfg_irq_chann
, vectors
);
11623 if (phba
->cfg_irq_chann
> vectors
)
11624 phba
->cfg_irq_chann
= vectors
;
11630 /* free the irq already requested */
11631 for (--index
; index
>= 0; index
--) {
11632 eqhdl
= lpfc_get_eq_hdl(index
);
11633 lpfc_irq_clear_aff(eqhdl
);
11634 irq_set_affinity_hint(eqhdl
->irq
, NULL
);
11635 free_irq(eqhdl
->irq
, eqhdl
);
11638 /* Unconfigure MSI-X capability structure */
11639 pci_free_irq_vectors(phba
->pcidev
);
11646 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11647 * @phba: pointer to lpfc hba data structure.
11649 * This routine is invoked to enable the MSI interrupt mode to device with
11650 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11651 * called to enable the MSI vector. The device driver is responsible for
11652 * calling the request_irq() to register MSI vector with a interrupt the
11653 * handler, which is done in this function.
11657 * other values - error
11660 lpfc_sli4_enable_msi(struct lpfc_hba
*phba
)
11664 struct lpfc_hba_eq_hdl
*eqhdl
;
11666 rc
= pci_alloc_irq_vectors(phba
->pcidev
, 1, 1,
11667 PCI_IRQ_MSI
| PCI_IRQ_AFFINITY
);
11669 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11670 "0487 PCI enable MSI mode success.\n");
11672 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11673 "0488 PCI enable MSI mode failed (%d)\n", rc
);
11674 return rc
? rc
: -1;
11677 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
11678 0, LPFC_DRIVER_NAME
, phba
);
11680 pci_free_irq_vectors(phba
->pcidev
);
11681 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
11682 "0490 MSI request_irq failed (%d)\n", rc
);
11686 eqhdl
= lpfc_get_eq_hdl(0);
11687 eqhdl
->irq
= pci_irq_vector(phba
->pcidev
, 0);
11689 cpu
= cpumask_first(cpu_present_mask
);
11690 lpfc_assign_eq_map_info(phba
, 0, LPFC_CPU_FIRST_IRQ
, cpu
);
11692 for (index
= 0; index
< phba
->cfg_irq_chann
; index
++) {
11693 eqhdl
= lpfc_get_eq_hdl(index
);
11694 eqhdl
->idx
= index
;
11701 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11702 * @phba: pointer to lpfc hba data structure.
11703 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
11705 * This routine is invoked to enable device interrupt and associate driver's
11706 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11707 * interface spec. Depends on the interrupt mode configured to the driver,
11708 * the driver will try to fallback from the configured interrupt mode to an
11709 * interrupt mode which is supported by the platform, kernel, and device in
11711 * MSI-X -> MSI -> IRQ.
11715 * other values - error
11718 lpfc_sli4_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
11720 uint32_t intr_mode
= LPFC_INTR_ERROR
;
11723 if (cfg_mode
== 2) {
11724 /* Preparation before conf_msi mbox cmd */
11727 /* Now, try to enable MSI-X interrupt mode */
11728 retval
= lpfc_sli4_enable_msix(phba
);
11730 /* Indicate initialization to MSI-X mode */
11731 phba
->intr_type
= MSIX
;
11737 /* Fallback to MSI if MSI-X initialization failed */
11738 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
11739 retval
= lpfc_sli4_enable_msi(phba
);
11741 /* Indicate initialization to MSI mode */
11742 phba
->intr_type
= MSI
;
11747 /* Fallback to INTx if both MSI-X/MSI initalization failed */
11748 if (phba
->intr_type
== NONE
) {
11749 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
11750 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
11752 struct lpfc_hba_eq_hdl
*eqhdl
;
11755 /* Indicate initialization to INTx mode */
11756 phba
->intr_type
= INTx
;
11759 eqhdl
= lpfc_get_eq_hdl(0);
11760 eqhdl
->irq
= pci_irq_vector(phba
->pcidev
, 0);
11762 cpu
= cpumask_first(cpu_present_mask
);
11763 lpfc_assign_eq_map_info(phba
, 0, LPFC_CPU_FIRST_IRQ
,
11765 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
11766 eqhdl
= lpfc_get_eq_hdl(idx
);
11775 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11776 * @phba: pointer to lpfc hba data structure.
11778 * This routine is invoked to disable device interrupt and disassociate
11779 * the driver's interrupt handler(s) from interrupt vector(s) to device
11780 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11781 * will release the interrupt vector(s) for the message signaled interrupt.
11784 lpfc_sli4_disable_intr(struct lpfc_hba
*phba
)
11786 /* Disable the currently initialized interrupt mode */
11787 if (phba
->intr_type
== MSIX
) {
11789 struct lpfc_hba_eq_hdl
*eqhdl
;
11791 /* Free up MSI-X multi-message vectors */
11792 for (index
= 0; index
< phba
->cfg_irq_chann
; index
++) {
11793 eqhdl
= lpfc_get_eq_hdl(index
);
11794 lpfc_irq_clear_aff(eqhdl
);
11795 irq_set_affinity_hint(eqhdl
->irq
, NULL
);
11796 free_irq(eqhdl
->irq
, eqhdl
);
11799 free_irq(phba
->pcidev
->irq
, phba
);
11802 pci_free_irq_vectors(phba
->pcidev
);
11804 /* Reset interrupt management states */
11805 phba
->intr_type
= NONE
;
11806 phba
->sli
.slistat
.sli_intr
= 0;
11810 * lpfc_unset_hba - Unset SLI3 hba device initialization
11811 * @phba: pointer to lpfc hba data structure.
11813 * This routine is invoked to unset the HBA device initialization steps to
11814 * a device with SLI-3 interface spec.
11817 lpfc_unset_hba(struct lpfc_hba
*phba
)
11819 struct lpfc_vport
*vport
= phba
->pport
;
11820 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
11822 spin_lock_irq(shost
->host_lock
);
11823 vport
->load_flag
|= FC_UNLOADING
;
11824 spin_unlock_irq(shost
->host_lock
);
11826 kfree(phba
->vpi_bmask
);
11827 kfree(phba
->vpi_ids
);
11829 lpfc_stop_hba_timers(phba
);
11831 phba
->pport
->work_port_events
= 0;
11833 lpfc_sli_hba_down(phba
);
11835 lpfc_sli_brdrestart(phba
);
11837 lpfc_sli_disable_intr(phba
);
11843 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11844 * @phba: Pointer to HBA context object.
11846 * This function is called in the SLI4 code path to wait for completion
11847 * of device's XRIs exchange busy. It will check the XRI exchange busy
11848 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11849 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11850 * I/Os every 30 seconds, log error message, and wait forever. Only when
11851 * all XRI exchange busy complete, the driver unload shall proceed with
11852 * invoking the function reset ioctl mailbox command to the CNA and the
11853 * the rest of the driver unload resource release.
11856 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba
*phba
)
11858 struct lpfc_sli4_hdw_queue
*qp
;
11861 int io_xri_cmpl
= 1;
11862 int nvmet_xri_cmpl
= 1;
11863 int els_xri_cmpl
= list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
11865 /* Driver just aborted IOs during the hba_unset process. Pause
11866 * here to give the HBA time to complete the IO and get entries
11867 * into the abts lists.
11869 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1
* 5);
11871 /* Wait for NVME pending IO to flush back to transport. */
11872 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
11873 lpfc_nvme_wait_for_io_drain(phba
);
11876 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
11877 qp
= &phba
->sli4_hba
.hdwq
[idx
];
11878 io_xri_cmpl
= list_empty(&qp
->lpfc_abts_io_buf_list
);
11879 if (!io_xri_cmpl
) /* if list is NOT empty */
11885 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
11887 list_empty(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
11890 while (!els_xri_cmpl
|| !io_xri_cmpl
|| !nvmet_xri_cmpl
) {
11891 if (wait_time
> LPFC_XRI_EXCH_BUSY_WAIT_TMO
) {
11892 if (!nvmet_xri_cmpl
)
11893 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11894 "6424 NVMET XRI exchange busy "
11895 "wait time: %d seconds.\n",
11898 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11899 "6100 IO XRI exchange busy "
11900 "wait time: %d seconds.\n",
11903 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11904 "2878 ELS XRI exchange busy "
11905 "wait time: %d seconds.\n",
11907 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2
);
11908 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T2
;
11910 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1
);
11911 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T1
;
11915 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
11916 qp
= &phba
->sli4_hba
.hdwq
[idx
];
11917 io_xri_cmpl
= list_empty(
11918 &qp
->lpfc_abts_io_buf_list
);
11919 if (!io_xri_cmpl
) /* if list is NOT empty */
11925 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
11926 nvmet_xri_cmpl
= list_empty(
11927 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
11930 list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
11936 * lpfc_sli4_hba_unset - Unset the fcoe hba
11937 * @phba: Pointer to HBA context object.
11939 * This function is called in the SLI4 code path to reset the HBA's FCoE
11940 * function. The caller is not required to hold any lock. This routine
11941 * issues PCI function reset mailbox command to reset the FCoE function.
11942 * At the end of the function, it calls lpfc_hba_down_post function to
11943 * free any pending commands.
11946 lpfc_sli4_hba_unset(struct lpfc_hba
*phba
)
11949 LPFC_MBOXQ_t
*mboxq
;
11950 struct pci_dev
*pdev
= phba
->pcidev
;
11952 lpfc_stop_hba_timers(phba
);
11954 phba
->sli4_hba
.intr_enable
= 0;
11957 * Gracefully wait out the potential current outstanding asynchronous
11961 /* First, block any pending async mailbox command from posted */
11962 spin_lock_irq(&phba
->hbalock
);
11963 phba
->sli
.sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
11964 spin_unlock_irq(&phba
->hbalock
);
11965 /* Now, trying to wait it out if we can */
11966 while (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
11968 if (++wait_cnt
> LPFC_ACTIVE_MBOX_WAIT_CNT
)
11971 /* Forcefully release the outstanding mailbox command if timed out */
11972 if (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
11973 spin_lock_irq(&phba
->hbalock
);
11974 mboxq
= phba
->sli
.mbox_active
;
11975 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
11976 __lpfc_mbox_cmpl_put(phba
, mboxq
);
11977 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
11978 phba
->sli
.mbox_active
= NULL
;
11979 spin_unlock_irq(&phba
->hbalock
);
11982 /* Abort all iocbs associated with the hba */
11983 lpfc_sli_hba_iocb_abort(phba
);
11985 /* Wait for completion of device XRI exchange busy */
11986 lpfc_sli4_xri_exchange_busy_wait(phba
);
11988 /* per-phba callback de-registration for hotplug event */
11990 lpfc_cpuhp_remove(phba
);
11992 /* Disable PCI subsystem interrupt */
11993 lpfc_sli4_disable_intr(phba
);
11995 /* Disable SR-IOV if enabled */
11996 if (phba
->cfg_sriov_nr_virtfn
)
11997 pci_disable_sriov(pdev
);
11999 /* Stop kthread signal shall trigger work_done one more time */
12000 kthread_stop(phba
->worker_thread
);
12002 /* Disable FW logging to host memory */
12003 lpfc_ras_stop_fwlog(phba
);
12005 /* Unset the queues shared with the hardware then release all
12006 * allocated resources.
12008 lpfc_sli4_queue_unset(phba
);
12009 lpfc_sli4_queue_destroy(phba
);
12011 /* Reset SLI4 HBA FCoE function */
12012 lpfc_pci_function_reset(phba
);
12014 /* Free RAS DMA memory */
12015 if (phba
->ras_fwlog
.ras_enabled
)
12016 lpfc_sli4_ras_dma_free(phba
);
12018 /* Stop the SLI4 device port */
12020 phba
->pport
->work_port_events
= 0;
12024 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
12025 * @phba: Pointer to HBA context object.
12026 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
12028 * This function is called in the SLI4 code path to read the port's
12029 * sli4 capabilities.
12031 * This function may be be called from any context that can block-wait
12032 * for the completion. The expectation is that this routine is called
12033 * typically from probe_one or from the online routine.
12036 lpfc_pc_sli4_params_get(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
12039 struct lpfc_mqe
*mqe
;
12040 struct lpfc_pc_sli4_params
*sli4_params
;
12044 mqe
= &mboxq
->u
.mqe
;
12046 /* Read the port's SLI4 Parameters port capabilities */
12047 lpfc_pc_sli4_params(mboxq
);
12048 if (!phba
->sli4_hba
.intr_enable
)
12049 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
12051 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
12052 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
12058 sli4_params
= &phba
->sli4_hba
.pc_sli4_params
;
12059 sli4_params
->if_type
= bf_get(if_type
, &mqe
->un
.sli4_params
);
12060 sli4_params
->sli_rev
= bf_get(sli_rev
, &mqe
->un
.sli4_params
);
12061 sli4_params
->sli_family
= bf_get(sli_family
, &mqe
->un
.sli4_params
);
12062 sli4_params
->featurelevel_1
= bf_get(featurelevel_1
,
12063 &mqe
->un
.sli4_params
);
12064 sli4_params
->featurelevel_2
= bf_get(featurelevel_2
,
12065 &mqe
->un
.sli4_params
);
12066 sli4_params
->proto_types
= mqe
->un
.sli4_params
.word3
;
12067 sli4_params
->sge_supp_len
= mqe
->un
.sli4_params
.sge_supp_len
;
12068 sli4_params
->if_page_sz
= bf_get(if_page_sz
, &mqe
->un
.sli4_params
);
12069 sli4_params
->rq_db_window
= bf_get(rq_db_window
, &mqe
->un
.sli4_params
);
12070 sli4_params
->loopbk_scope
= bf_get(loopbk_scope
, &mqe
->un
.sli4_params
);
12071 sli4_params
->eq_pages_max
= bf_get(eq_pages
, &mqe
->un
.sli4_params
);
12072 sli4_params
->eqe_size
= bf_get(eqe_size
, &mqe
->un
.sli4_params
);
12073 sli4_params
->cq_pages_max
= bf_get(cq_pages
, &mqe
->un
.sli4_params
);
12074 sli4_params
->cqe_size
= bf_get(cqe_size
, &mqe
->un
.sli4_params
);
12075 sli4_params
->mq_pages_max
= bf_get(mq_pages
, &mqe
->un
.sli4_params
);
12076 sli4_params
->mqe_size
= bf_get(mqe_size
, &mqe
->un
.sli4_params
);
12077 sli4_params
->mq_elem_cnt
= bf_get(mq_elem_cnt
, &mqe
->un
.sli4_params
);
12078 sli4_params
->wq_pages_max
= bf_get(wq_pages
, &mqe
->un
.sli4_params
);
12079 sli4_params
->wqe_size
= bf_get(wqe_size
, &mqe
->un
.sli4_params
);
12080 sli4_params
->rq_pages_max
= bf_get(rq_pages
, &mqe
->un
.sli4_params
);
12081 sli4_params
->rqe_size
= bf_get(rqe_size
, &mqe
->un
.sli4_params
);
12082 sli4_params
->hdr_pages_max
= bf_get(hdr_pages
, &mqe
->un
.sli4_params
);
12083 sli4_params
->hdr_size
= bf_get(hdr_size
, &mqe
->un
.sli4_params
);
12084 sli4_params
->hdr_pp_align
= bf_get(hdr_pp_align
, &mqe
->un
.sli4_params
);
12085 sli4_params
->sgl_pages_max
= bf_get(sgl_pages
, &mqe
->un
.sli4_params
);
12086 sli4_params
->sgl_pp_align
= bf_get(sgl_pp_align
, &mqe
->un
.sli4_params
);
12088 /* Make sure that sge_supp_len can be handled by the driver */
12089 if (sli4_params
->sge_supp_len
> LPFC_MAX_SGE_SIZE
)
12090 sli4_params
->sge_supp_len
= LPFC_MAX_SGE_SIZE
;
12096 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
12097 * @phba: Pointer to HBA context object.
12098 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
12100 * This function is called in the SLI4 code path to read the port's
12101 * sli4 capabilities.
12103 * This function may be be called from any context that can block-wait
12104 * for the completion. The expectation is that this routine is called
12105 * typically from probe_one or from the online routine.
12108 lpfc_get_sli4_parameters(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
12111 struct lpfc_mqe
*mqe
= &mboxq
->u
.mqe
;
12112 struct lpfc_pc_sli4_params
*sli4_params
;
12115 bool exp_wqcq_pages
= true;
12116 struct lpfc_sli4_parameters
*mbx_sli4_parameters
;
12119 * By default, the driver assumes the SLI4 port requires RPI
12120 * header postings. The SLI4_PARAM response will correct this
12123 phba
->sli4_hba
.rpi_hdrs_in_use
= 1;
12125 /* Read the port's SLI4 Config Parameters */
12126 length
= (sizeof(struct lpfc_mbx_get_sli4_parameters
) -
12127 sizeof(struct lpfc_sli4_cfg_mhdr
));
12128 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
12129 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS
,
12130 length
, LPFC_SLI4_MBX_EMBED
);
12131 if (!phba
->sli4_hba
.intr_enable
)
12132 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
12134 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
12135 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
12139 sli4_params
= &phba
->sli4_hba
.pc_sli4_params
;
12140 mbx_sli4_parameters
= &mqe
->un
.get_sli4_parameters
.sli4_parameters
;
12141 sli4_params
->if_type
= bf_get(cfg_if_type
, mbx_sli4_parameters
);
12142 sli4_params
->sli_rev
= bf_get(cfg_sli_rev
, mbx_sli4_parameters
);
12143 sli4_params
->sli_family
= bf_get(cfg_sli_family
, mbx_sli4_parameters
);
12144 sli4_params
->featurelevel_1
= bf_get(cfg_sli_hint_1
,
12145 mbx_sli4_parameters
);
12146 sli4_params
->featurelevel_2
= bf_get(cfg_sli_hint_2
,
12147 mbx_sli4_parameters
);
12148 if (bf_get(cfg_phwq
, mbx_sli4_parameters
))
12149 phba
->sli3_options
|= LPFC_SLI4_PHWQ_ENABLED
;
12151 phba
->sli3_options
&= ~LPFC_SLI4_PHWQ_ENABLED
;
12152 sli4_params
->sge_supp_len
= mbx_sli4_parameters
->sge_supp_len
;
12153 sli4_params
->loopbk_scope
= bf_get(loopbk_scope
, mbx_sli4_parameters
);
12154 sli4_params
->oas_supported
= bf_get(cfg_oas
, mbx_sli4_parameters
);
12155 sli4_params
->cqv
= bf_get(cfg_cqv
, mbx_sli4_parameters
);
12156 sli4_params
->mqv
= bf_get(cfg_mqv
, mbx_sli4_parameters
);
12157 sli4_params
->wqv
= bf_get(cfg_wqv
, mbx_sli4_parameters
);
12158 sli4_params
->rqv
= bf_get(cfg_rqv
, mbx_sli4_parameters
);
12159 sli4_params
->eqav
= bf_get(cfg_eqav
, mbx_sli4_parameters
);
12160 sli4_params
->cqav
= bf_get(cfg_cqav
, mbx_sli4_parameters
);
12161 sli4_params
->wqsize
= bf_get(cfg_wqsize
, mbx_sli4_parameters
);
12162 sli4_params
->bv1s
= bf_get(cfg_bv1s
, mbx_sli4_parameters
);
12163 sli4_params
->pls
= bf_get(cfg_pvl
, mbx_sli4_parameters
);
12164 sli4_params
->sgl_pages_max
= bf_get(cfg_sgl_page_cnt
,
12165 mbx_sli4_parameters
);
12166 sli4_params
->wqpcnt
= bf_get(cfg_wqpcnt
, mbx_sli4_parameters
);
12167 sli4_params
->sgl_pp_align
= bf_get(cfg_sgl_pp_align
,
12168 mbx_sli4_parameters
);
12169 phba
->sli4_hba
.extents_in_use
= bf_get(cfg_ext
, mbx_sli4_parameters
);
12170 phba
->sli4_hba
.rpi_hdrs_in_use
= bf_get(cfg_hdrr
, mbx_sli4_parameters
);
12172 /* Check for Extended Pre-Registered SGL support */
12173 phba
->cfg_xpsgl
= bf_get(cfg_xpsgl
, mbx_sli4_parameters
);
12175 /* Check for firmware nvme support */
12176 rc
= (bf_get(cfg_nvme
, mbx_sli4_parameters
) &&
12177 bf_get(cfg_xib
, mbx_sli4_parameters
));
12180 /* Save this to indicate the Firmware supports NVME */
12181 sli4_params
->nvme
= 1;
12183 /* Firmware NVME support, check driver FC4 NVME support */
12184 if (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
) {
12185 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_NVME
,
12186 "6133 Disabling NVME support: "
12187 "FC4 type not supported: x%x\n",
12188 phba
->cfg_enable_fc4_type
);
12192 /* No firmware NVME support, check driver FC4 NVME support */
12193 sli4_params
->nvme
= 0;
12194 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
12195 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_NVME
,
12196 "6101 Disabling NVME support: Not "
12197 "supported by firmware (%d %d) x%x\n",
12198 bf_get(cfg_nvme
, mbx_sli4_parameters
),
12199 bf_get(cfg_xib
, mbx_sli4_parameters
),
12200 phba
->cfg_enable_fc4_type
);
12202 phba
->nvme_support
= 0;
12203 phba
->nvmet_support
= 0;
12204 phba
->cfg_nvmet_mrq
= 0;
12205 phba
->cfg_nvme_seg_cnt
= 0;
12207 /* If no FC4 type support, move to just SCSI support */
12208 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
12210 phba
->cfg_enable_fc4_type
= LPFC_ENABLE_FCP
;
12214 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
12215 * accommodate 512K and 1M IOs in a single nvme buf.
12217 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
12218 phba
->cfg_sg_seg_cnt
= LPFC_MAX_NVME_SEG_CNT
;
12220 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
12221 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
12222 LPFC_SLI_INTF_IF_TYPE_6
) || (!bf_get(cfg_xib
, mbx_sli4_parameters
)))
12223 phba
->cfg_enable_pbde
= 0;
12226 * To support Suppress Response feature we must satisfy 3 conditions.
12227 * lpfc_suppress_rsp module parameter must be set (default).
12228 * In SLI4-Parameters Descriptor:
12229 * Extended Inline Buffers (XIB) must be supported.
12230 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
12231 * (double negative).
12233 if (phba
->cfg_suppress_rsp
&& bf_get(cfg_xib
, mbx_sli4_parameters
) &&
12234 !(bf_get(cfg_nosr
, mbx_sli4_parameters
)))
12235 phba
->sli
.sli_flag
|= LPFC_SLI_SUPPRESS_RSP
;
12237 phba
->cfg_suppress_rsp
= 0;
12239 if (bf_get(cfg_eqdr
, mbx_sli4_parameters
))
12240 phba
->sli
.sli_flag
|= LPFC_SLI_USE_EQDR
;
12242 /* Make sure that sge_supp_len can be handled by the driver */
12243 if (sli4_params
->sge_supp_len
> LPFC_MAX_SGE_SIZE
)
12244 sli4_params
->sge_supp_len
= LPFC_MAX_SGE_SIZE
;
12247 * Check whether the adapter supports an embedded copy of the
12248 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
12249 * to use this option, 128-byte WQEs must be used.
12251 if (bf_get(cfg_ext_embed_cb
, mbx_sli4_parameters
))
12252 phba
->fcp_embed_io
= 1;
12254 phba
->fcp_embed_io
= 0;
12256 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_NVME
,
12257 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12258 bf_get(cfg_xib
, mbx_sli4_parameters
),
12259 phba
->cfg_enable_pbde
,
12260 phba
->fcp_embed_io
, phba
->nvme_support
,
12261 phba
->cfg_nvme_embed_cmd
, phba
->cfg_suppress_rsp
);
12263 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
12264 LPFC_SLI_INTF_IF_TYPE_2
) &&
12265 (bf_get(lpfc_sli_intf_sli_family
, &phba
->sli4_hba
.sli_intf
) ==
12266 LPFC_SLI_INTF_FAMILY_LNCR_A0
))
12267 exp_wqcq_pages
= false;
12269 if ((bf_get(cfg_cqpsize
, mbx_sli4_parameters
) & LPFC_CQ_16K_PAGE_SZ
) &&
12270 (bf_get(cfg_wqpsize
, mbx_sli4_parameters
) & LPFC_WQ_16K_PAGE_SZ
) &&
12272 (sli4_params
->wqsize
& LPFC_WQ_SZ128_SUPPORT
))
12273 phba
->enab_exp_wqcq_pages
= 1;
12275 phba
->enab_exp_wqcq_pages
= 0;
12277 * Check if the SLI port supports MDS Diagnostics
12279 if (bf_get(cfg_mds_diags
, mbx_sli4_parameters
))
12280 phba
->mds_diags_support
= 1;
12282 phba
->mds_diags_support
= 0;
12285 * Check if the SLI port supports NSLER
12287 if (bf_get(cfg_nsler
, mbx_sli4_parameters
))
12292 /* Save PB info for use during HBA setup */
12293 sli4_params
->mi_ver
= bf_get(cfg_mi_ver
, mbx_sli4_parameters
);
12294 sli4_params
->mib_bde_cnt
= bf_get(cfg_mib_bde_cnt
, mbx_sli4_parameters
);
12295 sli4_params
->mib_size
= mbx_sli4_parameters
->mib_size
;
12296 sli4_params
->mi_value
= LPFC_DFLT_MIB_VAL
;
12298 /* Next we check for Vendor MIB support */
12299 if (sli4_params
->mi_ver
&& phba
->cfg_enable_mi
)
12300 phba
->cfg_fdmi_on
= LPFC_FDMI_SUPPORT
;
12302 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12303 "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
12304 sli4_params
->mi_ver
, phba
->cfg_enable_mi
,
12305 sli4_params
->mi_value
, sli4_params
->mib_bde_cnt
,
12306 sli4_params
->mib_size
);
12311 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
12312 * @pdev: pointer to PCI device
12313 * @pid: pointer to PCI device identifier
12315 * This routine is to be called to attach a device with SLI-3 interface spec
12316 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12317 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12318 * information of the device and driver to see if the driver state that it can
12319 * support this kind of device. If the match is successful, the driver core
12320 * invokes this routine. If this routine determines it can claim the HBA, it
12321 * does all the initialization that it needs to do to handle the HBA properly.
12324 * 0 - driver can claim the device
12325 * negative value - driver can not claim the device
12328 lpfc_pci_probe_one_s3(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
12330 struct lpfc_hba
*phba
;
12331 struct lpfc_vport
*vport
= NULL
;
12332 struct Scsi_Host
*shost
= NULL
;
12334 uint32_t cfg_mode
, intr_mode
;
12336 /* Allocate memory for HBA structure */
12337 phba
= lpfc_hba_alloc(pdev
);
12341 /* Perform generic PCI device enabling operation */
12342 error
= lpfc_enable_pci_dev(phba
);
12344 goto out_free_phba
;
12346 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
12347 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_LP
);
12349 goto out_disable_pci_dev
;
12351 /* Set up SLI-3 specific device PCI memory space */
12352 error
= lpfc_sli_pci_mem_setup(phba
);
12354 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12355 "1402 Failed to set up pci memory space.\n");
12356 goto out_disable_pci_dev
;
12359 /* Set up SLI-3 specific device driver resources */
12360 error
= lpfc_sli_driver_resource_setup(phba
);
12362 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12363 "1404 Failed to set up driver resource.\n");
12364 goto out_unset_pci_mem_s3
;
12367 /* Initialize and populate the iocb list per host */
12369 error
= lpfc_init_iocb_list(phba
, LPFC_IOCB_LIST_CNT
);
12371 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12372 "1405 Failed to initialize iocb list.\n");
12373 goto out_unset_driver_resource_s3
;
12376 /* Set up common device driver resources */
12377 error
= lpfc_setup_driver_resource_phase2(phba
);
12379 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12380 "1406 Failed to set up driver resource.\n");
12381 goto out_free_iocb_list
;
12384 /* Get the default values for Model Name and Description */
12385 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
12387 /* Create SCSI host to the physical port */
12388 error
= lpfc_create_shost(phba
);
12390 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12391 "1407 Failed to create scsi host.\n");
12392 goto out_unset_driver_resource
;
12395 /* Configure sysfs attributes */
12396 vport
= phba
->pport
;
12397 error
= lpfc_alloc_sysfs_attr(vport
);
12399 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12400 "1476 Failed to allocate sysfs attr\n");
12401 goto out_destroy_shost
;
12404 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
12405 /* Now, trying to enable interrupt and bring up the device */
12406 cfg_mode
= phba
->cfg_use_msi
;
12408 /* Put device to a known state before enabling interrupt */
12409 lpfc_stop_port(phba
);
12410 /* Configure and enable interrupt */
12411 intr_mode
= lpfc_sli_enable_intr(phba
, cfg_mode
);
12412 if (intr_mode
== LPFC_INTR_ERROR
) {
12413 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12414 "0431 Failed to enable interrupt.\n");
12416 goto out_free_sysfs_attr
;
12418 /* SLI-3 HBA setup */
12419 if (lpfc_sli_hba_setup(phba
)) {
12420 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12421 "1477 Failed to set up hba\n");
12423 goto out_remove_device
;
12426 /* Wait 50ms for the interrupts of previous mailbox commands */
12428 /* Check active interrupts on message signaled interrupts */
12429 if (intr_mode
== 0 ||
12430 phba
->sli
.slistat
.sli_intr
> LPFC_MSIX_VECTORS
) {
12431 /* Log the current active interrupt mode */
12432 phba
->intr_mode
= intr_mode
;
12433 lpfc_log_intr_mode(phba
, intr_mode
);
12436 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12437 "0447 Configure interrupt mode (%d) "
12438 "failed active interrupt test.\n",
12440 /* Disable the current interrupt mode */
12441 lpfc_sli_disable_intr(phba
);
12442 /* Try next level of interrupt mode */
12443 cfg_mode
= --intr_mode
;
12447 /* Perform post initialization setup */
12448 lpfc_post_init_setup(phba
);
12450 /* Check if there are static vports to be created. */
12451 lpfc_create_static_vport(phba
);
12456 lpfc_unset_hba(phba
);
12457 out_free_sysfs_attr
:
12458 lpfc_free_sysfs_attr(vport
);
12460 lpfc_destroy_shost(phba
);
12461 out_unset_driver_resource
:
12462 lpfc_unset_driver_resource_phase2(phba
);
12463 out_free_iocb_list
:
12464 lpfc_free_iocb_list(phba
);
12465 out_unset_driver_resource_s3
:
12466 lpfc_sli_driver_resource_unset(phba
);
12467 out_unset_pci_mem_s3
:
12468 lpfc_sli_pci_mem_unset(phba
);
12469 out_disable_pci_dev
:
12470 lpfc_disable_pci_dev(phba
);
12472 scsi_host_put(shost
);
12474 lpfc_hba_free(phba
);
12479 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
12480 * @pdev: pointer to PCI device
12482 * This routine is to be called to disattach a device with SLI-3 interface
12483 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12484 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12485 * device to be removed from the PCI subsystem properly.
12488 lpfc_pci_remove_one_s3(struct pci_dev
*pdev
)
12490 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
12491 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
12492 struct lpfc_vport
**vports
;
12493 struct lpfc_hba
*phba
= vport
->phba
;
12496 spin_lock_irq(&phba
->hbalock
);
12497 vport
->load_flag
|= FC_UNLOADING
;
12498 spin_unlock_irq(&phba
->hbalock
);
12500 lpfc_free_sysfs_attr(vport
);
12502 /* Release all the vports against this physical port */
12503 vports
= lpfc_create_vport_work_array(phba
);
12504 if (vports
!= NULL
)
12505 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
12506 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
12508 fc_vport_terminate(vports
[i
]->fc_vport
);
12510 lpfc_destroy_vport_work_array(phba
, vports
);
12512 /* Remove FC host with the physical port */
12513 fc_remove_host(shost
);
12514 scsi_remove_host(shost
);
12516 /* Clean up all nodes, mailboxes and IOs. */
12517 lpfc_cleanup(vport
);
12520 * Bring down the SLI Layer. This step disable all interrupts,
12521 * clears the rings, discards all mailbox commands, and resets
12525 /* HBA interrupt will be disabled after this call */
12526 lpfc_sli_hba_down(phba
);
12527 /* Stop kthread signal shall trigger work_done one more time */
12528 kthread_stop(phba
->worker_thread
);
12529 /* Final cleanup of txcmplq and reset the HBA */
12530 lpfc_sli_brdrestart(phba
);
12532 kfree(phba
->vpi_bmask
);
12533 kfree(phba
->vpi_ids
);
12535 lpfc_stop_hba_timers(phba
);
12536 spin_lock_irq(&phba
->port_list_lock
);
12537 list_del_init(&vport
->listentry
);
12538 spin_unlock_irq(&phba
->port_list_lock
);
12540 lpfc_debugfs_terminate(vport
);
12542 /* Disable SR-IOV if enabled */
12543 if (phba
->cfg_sriov_nr_virtfn
)
12544 pci_disable_sriov(pdev
);
12546 /* Disable interrupt */
12547 lpfc_sli_disable_intr(phba
);
12549 scsi_host_put(shost
);
12552 * Call scsi_free before mem_free since scsi bufs are released to their
12553 * corresponding pools here.
12555 lpfc_scsi_free(phba
);
12556 lpfc_free_iocb_list(phba
);
12558 lpfc_mem_free_all(phba
);
12560 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
12561 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
12563 /* Free resources associated with SLI2 interface */
12564 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
12565 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
12567 /* unmap adapter SLIM and Control Registers */
12568 iounmap(phba
->ctrl_regs_memmap_p
);
12569 iounmap(phba
->slim_memmap_p
);
12571 lpfc_hba_free(phba
);
12573 pci_release_mem_regions(pdev
);
12574 pci_disable_device(pdev
);
12578 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
12579 * @dev_d: pointer to device
12581 * This routine is to be called from the kernel's PCI subsystem to support
12582 * system Power Management (PM) to device with SLI-3 interface spec. When
12583 * PM invokes this method, it quiesces the device by stopping the driver's
12584 * worker thread for the device, turning off device's interrupt and DMA,
12585 * and bring the device offline. Note that as the driver implements the
12586 * minimum PM requirements to a power-aware driver's PM support for the
12587 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12588 * to the suspend() method call will be treated as SUSPEND and the driver will
12589 * fully reinitialize its device during resume() method call, the driver will
12590 * set device to PCI_D3hot state in PCI config space instead of setting it
12591 * according to the @msg provided by the PM.
12594 * 0 - driver suspended the device
12597 static int __maybe_unused
12598 lpfc_pci_suspend_one_s3(struct device
*dev_d
)
12600 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
12601 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
12603 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12604 "0473 PCI device Power Management suspend.\n");
12606 /* Bring down the device */
12607 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
12608 lpfc_offline(phba
);
12609 kthread_stop(phba
->worker_thread
);
12611 /* Disable interrupt from device */
12612 lpfc_sli_disable_intr(phba
);
12618 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12619 * @dev_d: pointer to device
12621 * This routine is to be called from the kernel's PCI subsystem to support
12622 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12623 * invokes this method, it restores the device's PCI config space state and
12624 * fully reinitializes the device and brings it online. Note that as the
12625 * driver implements the minimum PM requirements to a power-aware driver's
12626 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12627 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12628 * driver will fully reinitialize its device during resume() method call,
12629 * the device will be set to PCI_D0 directly in PCI config space before
12630 * restoring the state.
12633 * 0 - driver suspended the device
12636 static int __maybe_unused
12637 lpfc_pci_resume_one_s3(struct device
*dev_d
)
12639 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
12640 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
12641 uint32_t intr_mode
;
12644 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12645 "0452 PCI device Power Management resume.\n");
12647 /* Startup the kernel thread for this host adapter. */
12648 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
12649 "lpfc_worker_%d", phba
->brd_no
);
12650 if (IS_ERR(phba
->worker_thread
)) {
12651 error
= PTR_ERR(phba
->worker_thread
);
12652 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12653 "0434 PM resume failed to start worker "
12654 "thread: error=x%x.\n", error
);
12658 /* Configure and enable interrupt */
12659 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
12660 if (intr_mode
== LPFC_INTR_ERROR
) {
12661 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12662 "0430 PM resume Failed to enable interrupt\n");
12665 phba
->intr_mode
= intr_mode
;
12667 /* Restart HBA and bring it online */
12668 lpfc_sli_brdrestart(phba
);
12671 /* Log the current active interrupt mode */
12672 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
12678 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12679 * @phba: pointer to lpfc hba data structure.
12681 * This routine is called to prepare the SLI3 device for PCI slot recover. It
12682 * aborts all the outstanding SCSI I/Os to the pci device.
12685 lpfc_sli_prep_dev_for_recover(struct lpfc_hba
*phba
)
12687 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12688 "2723 PCI channel I/O abort preparing for recovery\n");
12691 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12692 * and let the SCSI mid-layer to retry them to recover.
12694 lpfc_sli_abort_fcp_rings(phba
);
12698 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12699 * @phba: pointer to lpfc hba data structure.
12701 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12702 * disables the device interrupt and pci device, and aborts the internal FCP
12706 lpfc_sli_prep_dev_for_reset(struct lpfc_hba
*phba
)
12708 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12709 "2710 PCI channel disable preparing for reset\n");
12711 /* Block any management I/Os to the device */
12712 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
12714 /* Block all SCSI devices' I/Os on the host */
12715 lpfc_scsi_dev_block(phba
);
12717 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12718 lpfc_sli_flush_io_rings(phba
);
12720 /* stop all timers */
12721 lpfc_stop_hba_timers(phba
);
12723 /* Disable interrupt and pci device */
12724 lpfc_sli_disable_intr(phba
);
12725 pci_disable_device(phba
->pcidev
);
12729 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12730 * @phba: pointer to lpfc hba data structure.
12732 * This routine is called to prepare the SLI3 device for PCI slot permanently
12733 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12737 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
12739 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12740 "2711 PCI channel permanent disable for failure\n");
12741 /* Block all SCSI devices' I/Os on the host */
12742 lpfc_scsi_dev_block(phba
);
12744 /* stop all timers */
12745 lpfc_stop_hba_timers(phba
);
12747 /* Clean up all driver's outstanding SCSI I/Os */
12748 lpfc_sli_flush_io_rings(phba
);
12752 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12753 * @pdev: pointer to PCI device.
12754 * @state: the current PCI connection state.
12756 * This routine is called from the PCI subsystem for I/O error handling to
12757 * device with SLI-3 interface spec. This function is called by the PCI
12758 * subsystem after a PCI bus error affecting this device has been detected.
12759 * When this function is invoked, it will need to stop all the I/Os and
12760 * interrupt(s) to the device. Once that is done, it will return
12761 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12765 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
12766 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12767 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12769 static pci_ers_result_t
12770 lpfc_io_error_detected_s3(struct pci_dev
*pdev
, pci_channel_state_t state
)
12772 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
12773 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
12776 case pci_channel_io_normal
:
12777 /* Non-fatal error, prepare for recovery */
12778 lpfc_sli_prep_dev_for_recover(phba
);
12779 return PCI_ERS_RESULT_CAN_RECOVER
;
12780 case pci_channel_io_frozen
:
12781 /* Fatal error, prepare for slot reset */
12782 lpfc_sli_prep_dev_for_reset(phba
);
12783 return PCI_ERS_RESULT_NEED_RESET
;
12784 case pci_channel_io_perm_failure
:
12785 /* Permanent failure, prepare for device down */
12786 lpfc_sli_prep_dev_for_perm_failure(phba
);
12787 return PCI_ERS_RESULT_DISCONNECT
;
12789 /* Unknown state, prepare and request slot reset */
12790 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12791 "0472 Unknown PCI error state: x%x\n", state
);
12792 lpfc_sli_prep_dev_for_reset(phba
);
12793 return PCI_ERS_RESULT_NEED_RESET
;
12798 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12799 * @pdev: pointer to PCI device.
12801 * This routine is called from the PCI subsystem for error handling to
12802 * device with SLI-3 interface spec. This is called after PCI bus has been
12803 * reset to restart the PCI card from scratch, as if from a cold-boot.
12804 * During the PCI subsystem error recovery, after driver returns
12805 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12806 * recovery and then call this routine before calling the .resume method
12807 * to recover the device. This function will initialize the HBA device,
12808 * enable the interrupt, but it will just put the HBA to offline state
12809 * without passing any I/O traffic.
12812 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12813 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12815 static pci_ers_result_t
12816 lpfc_io_slot_reset_s3(struct pci_dev
*pdev
)
12818 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
12819 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
12820 struct lpfc_sli
*psli
= &phba
->sli
;
12821 uint32_t intr_mode
;
12823 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
12824 if (pci_enable_device_mem(pdev
)) {
12825 printk(KERN_ERR
"lpfc: Cannot re-enable "
12826 "PCI device after reset.\n");
12827 return PCI_ERS_RESULT_DISCONNECT
;
12830 pci_restore_state(pdev
);
12833 * As the new kernel behavior of pci_restore_state() API call clears
12834 * device saved_state flag, need to save the restored state again.
12836 pci_save_state(pdev
);
12838 if (pdev
->is_busmaster
)
12839 pci_set_master(pdev
);
12841 spin_lock_irq(&phba
->hbalock
);
12842 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
12843 spin_unlock_irq(&phba
->hbalock
);
12845 /* Configure and enable interrupt */
12846 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
12847 if (intr_mode
== LPFC_INTR_ERROR
) {
12848 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12849 "0427 Cannot re-enable interrupt after "
12851 return PCI_ERS_RESULT_DISCONNECT
;
12853 phba
->intr_mode
= intr_mode
;
12855 /* Take device offline, it will perform cleanup */
12856 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
12857 lpfc_offline(phba
);
12858 lpfc_sli_brdrestart(phba
);
12860 /* Log the current active interrupt mode */
12861 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
12863 return PCI_ERS_RESULT_RECOVERED
;
12867 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12868 * @pdev: pointer to PCI device
12870 * This routine is called from the PCI subsystem for error handling to device
12871 * with SLI-3 interface spec. It is called when kernel error recovery tells
12872 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12873 * error recovery. After this call, traffic can start to flow from this device
12877 lpfc_io_resume_s3(struct pci_dev
*pdev
)
12879 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
12880 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
12882 /* Bring device online, it will be no-op for non-fatal error resume */
12887 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12888 * @phba: pointer to lpfc hba data structure.
12890 * returns the number of ELS/CT IOCBs to reserve
12893 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba
*phba
)
12895 int max_xri
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
12897 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
12898 if (max_xri
<= 100)
12900 else if (max_xri
<= 256)
12902 else if (max_xri
<= 512)
12904 else if (max_xri
<= 1024)
12906 else if (max_xri
<= 1536)
12908 else if (max_xri
<= 2048)
12917 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12918 * @phba: pointer to lpfc hba data structure.
12920 * returns the number of ELS/CT + NVMET IOCBs to reserve
12923 lpfc_sli4_get_iocb_cnt(struct lpfc_hba
*phba
)
12925 int max_xri
= lpfc_sli4_get_els_iocb_cnt(phba
);
12927 if (phba
->nvmet_support
)
12928 max_xri
+= LPFC_NVMET_BUF_POST
;
12934 lpfc_log_write_firmware_error(struct lpfc_hba
*phba
, uint32_t offset
,
12935 uint32_t magic_number
, uint32_t ftype
, uint32_t fid
, uint32_t fsize
,
12936 const struct firmware
*fw
)
12940 /* Three cases: (1) FW was not supported on the detected adapter.
12941 * (2) FW update has been locked out administratively.
12942 * (3) Some other error during FW update.
12943 * In each case, an unmaskable message is written to the console
12944 * for admin diagnosis.
12946 if (offset
== ADD_STATUS_FW_NOT_SUPPORTED
||
12947 (phba
->pcidev
->device
== PCI_DEVICE_ID_LANCER_G6_FC
&&
12948 magic_number
!= MAGIC_NUMBER_G6
) ||
12949 (phba
->pcidev
->device
== PCI_DEVICE_ID_LANCER_G7_FC
&&
12950 magic_number
!= MAGIC_NUMBER_G7
)) {
12951 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12952 "3030 This firmware version is not supported on"
12953 " this HBA model. Device:%x Magic:%x Type:%x "
12954 "ID:%x Size %d %zd\n",
12955 phba
->pcidev
->device
, magic_number
, ftype
, fid
,
12958 } else if (offset
== ADD_STATUS_FW_DOWNLOAD_HW_DISABLED
) {
12959 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12960 "3021 Firmware downloads have been prohibited "
12961 "by a system configuration setting on "
12962 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12964 phba
->pcidev
->device
, magic_number
, ftype
, fid
,
12968 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12969 "3022 FW Download failed. Add Status x%x "
12970 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12972 offset
, phba
->pcidev
->device
, magic_number
,
12973 ftype
, fid
, fsize
, fw
->size
);
12980 * lpfc_write_firmware - attempt to write a firmware image to the port
12981 * @fw: pointer to firmware image returned from request_firmware.
12982 * @context: pointer to firmware image returned from request_firmware.
12986 lpfc_write_firmware(const struct firmware
*fw
, void *context
)
12988 struct lpfc_hba
*phba
= (struct lpfc_hba
*)context
;
12989 char fwrev
[FW_REV_STR_SIZE
];
12990 struct lpfc_grp_hdr
*image
;
12991 struct list_head dma_buffer_list
;
12993 struct lpfc_dmabuf
*dmabuf
, *next
;
12994 uint32_t offset
= 0, temp_offset
= 0;
12995 uint32_t magic_number
, ftype
, fid
, fsize
;
12997 /* It can be null in no-wait mode, sanity check */
13002 image
= (struct lpfc_grp_hdr
*)fw
->data
;
13004 magic_number
= be32_to_cpu(image
->magic_number
);
13005 ftype
= bf_get_be32(lpfc_grp_hdr_file_type
, image
);
13006 fid
= bf_get_be32(lpfc_grp_hdr_id
, image
);
13007 fsize
= be32_to_cpu(image
->size
);
13009 INIT_LIST_HEAD(&dma_buffer_list
);
13010 lpfc_decode_firmware_rev(phba
, fwrev
, 1);
13011 if (strncmp(fwrev
, image
->revision
, strnlen(image
->revision
, 16))) {
13012 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13013 "3023 Updating Firmware, Current Version:%s "
13014 "New Version:%s\n",
13015 fwrev
, image
->revision
);
13016 for (i
= 0; i
< LPFC_MBX_WR_CONFIG_MAX_BDE
; i
++) {
13017 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
),
13023 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
13027 if (!dmabuf
->virt
) {
13032 list_add_tail(&dmabuf
->list
, &dma_buffer_list
);
13034 while (offset
< fw
->size
) {
13035 temp_offset
= offset
;
13036 list_for_each_entry(dmabuf
, &dma_buffer_list
, list
) {
13037 if (temp_offset
+ SLI4_PAGE_SIZE
> fw
->size
) {
13038 memcpy(dmabuf
->virt
,
13039 fw
->data
+ temp_offset
,
13040 fw
->size
- temp_offset
);
13041 temp_offset
= fw
->size
;
13044 memcpy(dmabuf
->virt
, fw
->data
+ temp_offset
,
13046 temp_offset
+= SLI4_PAGE_SIZE
;
13048 rc
= lpfc_wr_object(phba
, &dma_buffer_list
,
13049 (fw
->size
- offset
), &offset
);
13051 rc
= lpfc_log_write_firmware_error(phba
, offset
,
13062 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13063 "3029 Skipped Firmware update, Current "
13064 "Version:%s New Version:%s\n",
13065 fwrev
, image
->revision
);
13068 list_for_each_entry_safe(dmabuf
, next
, &dma_buffer_list
, list
) {
13069 list_del(&dmabuf
->list
);
13070 dma_free_coherent(&phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
13071 dmabuf
->virt
, dmabuf
->phys
);
13074 release_firmware(fw
);
13077 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13078 "3062 Firmware update error, status %d.\n", rc
);
13080 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13081 "3024 Firmware update success: size %d.\n", rc
);
13085 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
13086 * @phba: pointer to lpfc hba data structure.
13087 * @fw_upgrade: which firmware to update.
13089 * This routine is called to perform Linux generic firmware upgrade on device
13090 * that supports such feature.
13093 lpfc_sli4_request_firmware_update(struct lpfc_hba
*phba
, uint8_t fw_upgrade
)
13095 uint8_t file_name
[ELX_MODEL_NAME_SIZE
];
13097 const struct firmware
*fw
;
13099 /* Only supported on SLI4 interface type 2 for now */
13100 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) <
13101 LPFC_SLI_INTF_IF_TYPE_2
)
13104 snprintf(file_name
, ELX_MODEL_NAME_SIZE
, "%s.grp", phba
->ModelName
);
13106 if (fw_upgrade
== INT_FW_UPGRADE
) {
13107 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
13108 file_name
, &phba
->pcidev
->dev
,
13109 GFP_KERNEL
, (void *)phba
,
13110 lpfc_write_firmware
);
13111 } else if (fw_upgrade
== RUN_FW_UPGRADE
) {
13112 ret
= request_firmware(&fw
, file_name
, &phba
->pcidev
->dev
);
13114 lpfc_write_firmware(fw
, (void *)phba
);
13123 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
13124 * @pdev: pointer to PCI device
13125 * @pid: pointer to PCI device identifier
13127 * This routine is called from the kernel's PCI subsystem to device with
13128 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13129 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13130 * information of the device and driver to see if the driver state that it
13131 * can support this kind of device. If the match is successful, the driver
13132 * core invokes this routine. If this routine determines it can claim the HBA,
13133 * it does all the initialization that it needs to do to handle the HBA
13137 * 0 - driver can claim the device
13138 * negative value - driver can not claim the device
13141 lpfc_pci_probe_one_s4(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
13143 struct lpfc_hba
*phba
;
13144 struct lpfc_vport
*vport
= NULL
;
13145 struct Scsi_Host
*shost
= NULL
;
13147 uint32_t cfg_mode
, intr_mode
;
13149 /* Allocate memory for HBA structure */
13150 phba
= lpfc_hba_alloc(pdev
);
13154 /* Perform generic PCI device enabling operation */
13155 error
= lpfc_enable_pci_dev(phba
);
13157 goto out_free_phba
;
13159 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
13160 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_OC
);
13162 goto out_disable_pci_dev
;
13164 /* Set up SLI-4 specific device PCI memory space */
13165 error
= lpfc_sli4_pci_mem_setup(phba
);
13167 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13168 "1410 Failed to set up pci memory space.\n");
13169 goto out_disable_pci_dev
;
13172 /* Set up SLI-4 Specific device driver resources */
13173 error
= lpfc_sli4_driver_resource_setup(phba
);
13175 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13176 "1412 Failed to set up driver resource.\n");
13177 goto out_unset_pci_mem_s4
;
13180 INIT_LIST_HEAD(&phba
->active_rrq_list
);
13181 INIT_LIST_HEAD(&phba
->fcf
.fcf_pri_list
);
13183 /* Set up common device driver resources */
13184 error
= lpfc_setup_driver_resource_phase2(phba
);
13186 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13187 "1414 Failed to set up driver resource.\n");
13188 goto out_unset_driver_resource_s4
;
13191 /* Get the default values for Model Name and Description */
13192 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
13194 /* Now, trying to enable interrupt and bring up the device */
13195 cfg_mode
= phba
->cfg_use_msi
;
13197 /* Put device to a known state before enabling interrupt */
13198 phba
->pport
= NULL
;
13199 lpfc_stop_port(phba
);
13201 /* Init cpu_map array */
13202 lpfc_cpu_map_array_init(phba
);
13204 /* Init hba_eq_hdl array */
13205 lpfc_hba_eq_hdl_array_init(phba
);
13207 /* Configure and enable interrupt */
13208 intr_mode
= lpfc_sli4_enable_intr(phba
, cfg_mode
);
13209 if (intr_mode
== LPFC_INTR_ERROR
) {
13210 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13211 "0426 Failed to enable interrupt.\n");
13213 goto out_unset_driver_resource
;
13215 /* Default to single EQ for non-MSI-X */
13216 if (phba
->intr_type
!= MSIX
) {
13217 phba
->cfg_irq_chann
= 1;
13218 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
13219 if (phba
->nvmet_support
)
13220 phba
->cfg_nvmet_mrq
= 1;
13223 lpfc_cpu_affinity_check(phba
, phba
->cfg_irq_chann
);
13225 /* Create SCSI host to the physical port */
13226 error
= lpfc_create_shost(phba
);
13228 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13229 "1415 Failed to create scsi host.\n");
13230 goto out_disable_intr
;
13232 vport
= phba
->pport
;
13233 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
13235 /* Configure sysfs attributes */
13236 error
= lpfc_alloc_sysfs_attr(vport
);
13238 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13239 "1416 Failed to allocate sysfs attr\n");
13240 goto out_destroy_shost
;
13243 /* Set up SLI-4 HBA */
13244 if (lpfc_sli4_hba_setup(phba
)) {
13245 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13246 "1421 Failed to set up hba\n");
13248 goto out_free_sysfs_attr
;
13251 /* Log the current active interrupt mode */
13252 phba
->intr_mode
= intr_mode
;
13253 lpfc_log_intr_mode(phba
, intr_mode
);
13255 /* Perform post initialization setup */
13256 lpfc_post_init_setup(phba
);
13258 /* NVME support in FW earlier in the driver load corrects the
13259 * FC4 type making a check for nvme_support unnecessary.
13261 if (phba
->nvmet_support
== 0) {
13262 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
13263 /* Create NVME binding with nvme_fc_transport. This
13264 * ensures the vport is initialized. If the localport
13265 * create fails, it should not unload the driver to
13266 * support field issues.
13268 error
= lpfc_nvme_create_localport(vport
);
13270 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13271 "6004 NVME registration "
13272 "failed, error x%x\n",
13278 /* check for firmware upgrade or downgrade */
13279 if (phba
->cfg_request_firmware_upgrade
)
13280 lpfc_sli4_request_firmware_update(phba
, INT_FW_UPGRADE
);
13282 /* Check if there are static vports to be created. */
13283 lpfc_create_static_vport(phba
);
13285 /* Enable RAS FW log support */
13286 lpfc_sli4_ras_setup(phba
);
13288 INIT_LIST_HEAD(&phba
->poll_list
);
13289 timer_setup(&phba
->cpuhp_poll_timer
, lpfc_sli4_poll_hbtimer
, 0);
13290 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state
, &phba
->cpuhp
);
13294 out_free_sysfs_attr
:
13295 lpfc_free_sysfs_attr(vport
);
13297 lpfc_destroy_shost(phba
);
13299 lpfc_sli4_disable_intr(phba
);
13300 out_unset_driver_resource
:
13301 lpfc_unset_driver_resource_phase2(phba
);
13302 out_unset_driver_resource_s4
:
13303 lpfc_sli4_driver_resource_unset(phba
);
13304 out_unset_pci_mem_s4
:
13305 lpfc_sli4_pci_mem_unset(phba
);
13306 out_disable_pci_dev
:
13307 lpfc_disable_pci_dev(phba
);
13309 scsi_host_put(shost
);
13311 lpfc_hba_free(phba
);
13316 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
13317 * @pdev: pointer to PCI device
13319 * This routine is called from the kernel's PCI subsystem to device with
13320 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13321 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13322 * device to be removed from the PCI subsystem properly.
13325 lpfc_pci_remove_one_s4(struct pci_dev
*pdev
)
13327 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13328 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
13329 struct lpfc_vport
**vports
;
13330 struct lpfc_hba
*phba
= vport
->phba
;
13333 /* Mark the device unloading flag */
13334 spin_lock_irq(&phba
->hbalock
);
13335 vport
->load_flag
|= FC_UNLOADING
;
13336 spin_unlock_irq(&phba
->hbalock
);
13338 lpfc_free_sysfs_attr(vport
);
13340 /* Release all the vports against this physical port */
13341 vports
= lpfc_create_vport_work_array(phba
);
13342 if (vports
!= NULL
)
13343 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
13344 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
13346 fc_vport_terminate(vports
[i
]->fc_vport
);
13348 lpfc_destroy_vport_work_array(phba
, vports
);
13350 /* Remove FC host with the physical port */
13351 fc_remove_host(shost
);
13352 scsi_remove_host(shost
);
13354 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
13355 * localports are destroyed after to cleanup all transport memory.
13357 lpfc_cleanup(vport
);
13358 lpfc_nvmet_destroy_targetport(phba
);
13359 lpfc_nvme_destroy_localport(vport
);
13361 /* De-allocate multi-XRI pools */
13362 if (phba
->cfg_xri_rebalancing
)
13363 lpfc_destroy_multixri_pools(phba
);
13366 * Bring down the SLI Layer. This step disables all interrupts,
13367 * clears the rings, discards all mailbox commands, and resets
13368 * the HBA FCoE function.
13370 lpfc_debugfs_terminate(vport
);
13372 lpfc_stop_hba_timers(phba
);
13373 spin_lock_irq(&phba
->port_list_lock
);
13374 list_del_init(&vport
->listentry
);
13375 spin_unlock_irq(&phba
->port_list_lock
);
13377 /* Perform scsi free before driver resource_unset since scsi
13378 * buffers are released to their corresponding pools here.
13380 lpfc_io_free(phba
);
13381 lpfc_free_iocb_list(phba
);
13382 lpfc_sli4_hba_unset(phba
);
13384 lpfc_unset_driver_resource_phase2(phba
);
13385 lpfc_sli4_driver_resource_unset(phba
);
13387 /* Unmap adapter Control and Doorbell registers */
13388 lpfc_sli4_pci_mem_unset(phba
);
13390 /* Release PCI resources and disable device's PCI function */
13391 scsi_host_put(shost
);
13392 lpfc_disable_pci_dev(phba
);
13394 /* Finally, free the driver's device data structure */
13395 lpfc_hba_free(phba
);
13401 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
13402 * @dev_d: pointer to device
13404 * This routine is called from the kernel's PCI subsystem to support system
13405 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
13406 * this method, it quiesces the device by stopping the driver's worker
13407 * thread for the device, turning off device's interrupt and DMA, and bring
13408 * the device offline. Note that as the driver implements the minimum PM
13409 * requirements to a power-aware driver's PM support for suspend/resume -- all
13410 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
13411 * method call will be treated as SUSPEND and the driver will fully
13412 * reinitialize its device during resume() method call, the driver will set
13413 * device to PCI_D3hot state in PCI config space instead of setting it
13414 * according to the @msg provided by the PM.
13417 * 0 - driver suspended the device
13420 static int __maybe_unused
13421 lpfc_pci_suspend_one_s4(struct device
*dev_d
)
13423 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
13424 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13426 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13427 "2843 PCI device Power Management suspend.\n");
13429 /* Bring down the device */
13430 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
13431 lpfc_offline(phba
);
13432 kthread_stop(phba
->worker_thread
);
13434 /* Disable interrupt from device */
13435 lpfc_sli4_disable_intr(phba
);
13436 lpfc_sli4_queue_destroy(phba
);
13442 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
13443 * @dev_d: pointer to device
13445 * This routine is called from the kernel's PCI subsystem to support system
13446 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
13447 * this method, it restores the device's PCI config space state and fully
13448 * reinitializes the device and brings it online. Note that as the driver
13449 * implements the minimum PM requirements to a power-aware driver's PM for
13450 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
13451 * to the suspend() method call will be treated as SUSPEND and the driver
13452 * will fully reinitialize its device during resume() method call, the device
13453 * will be set to PCI_D0 directly in PCI config space before restoring the
13457 * 0 - driver suspended the device
13460 static int __maybe_unused
13461 lpfc_pci_resume_one_s4(struct device
*dev_d
)
13463 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
13464 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13465 uint32_t intr_mode
;
13468 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13469 "0292 PCI device Power Management resume.\n");
13471 /* Startup the kernel thread for this host adapter. */
13472 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
13473 "lpfc_worker_%d", phba
->brd_no
);
13474 if (IS_ERR(phba
->worker_thread
)) {
13475 error
= PTR_ERR(phba
->worker_thread
);
13476 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13477 "0293 PM resume failed to start worker "
13478 "thread: error=x%x.\n", error
);
13482 /* Configure and enable interrupt */
13483 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
13484 if (intr_mode
== LPFC_INTR_ERROR
) {
13485 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13486 "0294 PM resume Failed to enable interrupt\n");
13489 phba
->intr_mode
= intr_mode
;
13491 /* Restart HBA and bring it online */
13492 lpfc_sli_brdrestart(phba
);
13495 /* Log the current active interrupt mode */
13496 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
13502 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13503 * @phba: pointer to lpfc hba data structure.
13505 * This routine is called to prepare the SLI4 device for PCI slot recover. It
13506 * aborts all the outstanding SCSI I/Os to the pci device.
13509 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba
*phba
)
13511 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13512 "2828 PCI channel I/O abort preparing for recovery\n");
13514 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13515 * and let the SCSI mid-layer to retry them to recover.
13517 lpfc_sli_abort_fcp_rings(phba
);
13521 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13522 * @phba: pointer to lpfc hba data structure.
13524 * This routine is called to prepare the SLI4 device for PCI slot reset. It
13525 * disables the device interrupt and pci device, and aborts the internal FCP
13529 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba
*phba
)
13531 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13532 "2826 PCI channel disable preparing for reset\n");
13534 /* Block any management I/Os to the device */
13535 lpfc_block_mgmt_io(phba
, LPFC_MBX_NO_WAIT
);
13537 /* Block all SCSI devices' I/Os on the host */
13538 lpfc_scsi_dev_block(phba
);
13540 /* Flush all driver's outstanding I/Os as we are to reset */
13541 lpfc_sli_flush_io_rings(phba
);
13543 /* stop all timers */
13544 lpfc_stop_hba_timers(phba
);
13546 /* Disable interrupt and pci device */
13547 lpfc_sli4_disable_intr(phba
);
13548 lpfc_sli4_queue_destroy(phba
);
13549 pci_disable_device(phba
->pcidev
);
13553 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13554 * @phba: pointer to lpfc hba data structure.
13556 * This routine is called to prepare the SLI4 device for PCI slot permanently
13557 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13561 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
13563 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13564 "2827 PCI channel permanent disable for failure\n");
13566 /* Block all SCSI devices' I/Os on the host */
13567 lpfc_scsi_dev_block(phba
);
13569 /* stop all timers */
13570 lpfc_stop_hba_timers(phba
);
13572 /* Clean up all driver's outstanding I/Os */
13573 lpfc_sli_flush_io_rings(phba
);
13577 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
13578 * @pdev: pointer to PCI device.
13579 * @state: the current PCI connection state.
13581 * This routine is called from the PCI subsystem for error handling to device
13582 * with SLI-4 interface spec. This function is called by the PCI subsystem
13583 * after a PCI bus error affecting this device has been detected. When this
13584 * function is invoked, it will need to stop all the I/Os and interrupt(s)
13585 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13586 * for the PCI subsystem to perform proper recovery as desired.
13589 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13590 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13592 static pci_ers_result_t
13593 lpfc_io_error_detected_s4(struct pci_dev
*pdev
, pci_channel_state_t state
)
13595 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13596 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13599 case pci_channel_io_normal
:
13600 /* Non-fatal error, prepare for recovery */
13601 lpfc_sli4_prep_dev_for_recover(phba
);
13602 return PCI_ERS_RESULT_CAN_RECOVER
;
13603 case pci_channel_io_frozen
:
13604 /* Fatal error, prepare for slot reset */
13605 lpfc_sli4_prep_dev_for_reset(phba
);
13606 return PCI_ERS_RESULT_NEED_RESET
;
13607 case pci_channel_io_perm_failure
:
13608 /* Permanent failure, prepare for device down */
13609 lpfc_sli4_prep_dev_for_perm_failure(phba
);
13610 return PCI_ERS_RESULT_DISCONNECT
;
13612 /* Unknown state, prepare and request slot reset */
13613 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13614 "2825 Unknown PCI error state: x%x\n", state
);
13615 lpfc_sli4_prep_dev_for_reset(phba
);
13616 return PCI_ERS_RESULT_NEED_RESET
;
13621 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13622 * @pdev: pointer to PCI device.
13624 * This routine is called from the PCI subsystem for error handling to device
13625 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13626 * restart the PCI card from scratch, as if from a cold-boot. During the
13627 * PCI subsystem error recovery, after the driver returns
13628 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13629 * recovery and then call this routine before calling the .resume method to
13630 * recover the device. This function will initialize the HBA device, enable
13631 * the interrupt, but it will just put the HBA to offline state without
13632 * passing any I/O traffic.
13635 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13636 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13638 static pci_ers_result_t
13639 lpfc_io_slot_reset_s4(struct pci_dev
*pdev
)
13641 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13642 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13643 struct lpfc_sli
*psli
= &phba
->sli
;
13644 uint32_t intr_mode
;
13646 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
13647 if (pci_enable_device_mem(pdev
)) {
13648 printk(KERN_ERR
"lpfc: Cannot re-enable "
13649 "PCI device after reset.\n");
13650 return PCI_ERS_RESULT_DISCONNECT
;
13653 pci_restore_state(pdev
);
13656 * As the new kernel behavior of pci_restore_state() API call clears
13657 * device saved_state flag, need to save the restored state again.
13659 pci_save_state(pdev
);
13661 if (pdev
->is_busmaster
)
13662 pci_set_master(pdev
);
13664 spin_lock_irq(&phba
->hbalock
);
13665 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
13666 spin_unlock_irq(&phba
->hbalock
);
13668 /* Configure and enable interrupt */
13669 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
13670 if (intr_mode
== LPFC_INTR_ERROR
) {
13671 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13672 "2824 Cannot re-enable interrupt after "
13674 return PCI_ERS_RESULT_DISCONNECT
;
13676 phba
->intr_mode
= intr_mode
;
13678 /* Log the current active interrupt mode */
13679 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
13681 return PCI_ERS_RESULT_RECOVERED
;
13685 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13686 * @pdev: pointer to PCI device
13688 * This routine is called from the PCI subsystem for error handling to device
13689 * with SLI-4 interface spec. It is called when kernel error recovery tells
13690 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13691 * error recovery. After this call, traffic can start to flow from this device
13695 lpfc_io_resume_s4(struct pci_dev
*pdev
)
13697 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13698 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13701 * In case of slot reset, as function reset is performed through
13702 * mailbox command which needs DMA to be enabled, this operation
13703 * has to be moved to the io resume phase. Taking device offline
13704 * will perform the necessary cleanup.
13706 if (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)) {
13707 /* Perform device reset */
13708 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
13709 lpfc_offline(phba
);
13710 lpfc_sli_brdrestart(phba
);
13711 /* Bring the device back online */
13717 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13718 * @pdev: pointer to PCI device
13719 * @pid: pointer to PCI device identifier
13721 * This routine is to be registered to the kernel's PCI subsystem. When an
13722 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13723 * at PCI device-specific information of the device and driver to see if the
13724 * driver state that it can support this kind of device. If the match is
13725 * successful, the driver core invokes this routine. This routine dispatches
13726 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13727 * do all the initialization that it needs to do to handle the HBA device
13731 * 0 - driver can claim the device
13732 * negative value - driver can not claim the device
13735 lpfc_pci_probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
13738 struct lpfc_sli_intf intf
;
13740 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
, &intf
.word0
))
13743 if ((bf_get(lpfc_sli_intf_valid
, &intf
) == LPFC_SLI_INTF_VALID
) &&
13744 (bf_get(lpfc_sli_intf_slirev
, &intf
) == LPFC_SLI_INTF_REV_SLI4
))
13745 rc
= lpfc_pci_probe_one_s4(pdev
, pid
);
13747 rc
= lpfc_pci_probe_one_s3(pdev
, pid
);
13753 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13754 * @pdev: pointer to PCI device
13756 * This routine is to be registered to the kernel's PCI subsystem. When an
13757 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13758 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13759 * remove routine, which will perform all the necessary cleanup for the
13760 * device to be removed from the PCI subsystem properly.
13763 lpfc_pci_remove_one(struct pci_dev
*pdev
)
13765 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13766 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13768 switch (phba
->pci_dev_grp
) {
13769 case LPFC_PCI_DEV_LP
:
13770 lpfc_pci_remove_one_s3(pdev
);
13772 case LPFC_PCI_DEV_OC
:
13773 lpfc_pci_remove_one_s4(pdev
);
13776 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13777 "1424 Invalid PCI device group: 0x%x\n",
13778 phba
->pci_dev_grp
);
13785 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13786 * @dev: pointer to device
13788 * This routine is to be registered to the kernel's PCI subsystem to support
13789 * system Power Management (PM). When PM invokes this method, it dispatches
13790 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13791 * suspend the device.
13794 * 0 - driver suspended the device
13797 static int __maybe_unused
13798 lpfc_pci_suspend_one(struct device
*dev
)
13800 struct Scsi_Host
*shost
= dev_get_drvdata(dev
);
13801 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13804 switch (phba
->pci_dev_grp
) {
13805 case LPFC_PCI_DEV_LP
:
13806 rc
= lpfc_pci_suspend_one_s3(dev
);
13808 case LPFC_PCI_DEV_OC
:
13809 rc
= lpfc_pci_suspend_one_s4(dev
);
13812 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13813 "1425 Invalid PCI device group: 0x%x\n",
13814 phba
->pci_dev_grp
);
13821 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13822 * @dev: pointer to device
13824 * This routine is to be registered to the kernel's PCI subsystem to support
13825 * system Power Management (PM). When PM invokes this method, it dispatches
13826 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13827 * resume the device.
13830 * 0 - driver suspended the device
13833 static int __maybe_unused
13834 lpfc_pci_resume_one(struct device
*dev
)
13836 struct Scsi_Host
*shost
= dev_get_drvdata(dev
);
13837 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13840 switch (phba
->pci_dev_grp
) {
13841 case LPFC_PCI_DEV_LP
:
13842 rc
= lpfc_pci_resume_one_s3(dev
);
13844 case LPFC_PCI_DEV_OC
:
13845 rc
= lpfc_pci_resume_one_s4(dev
);
13848 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13849 "1426 Invalid PCI device group: 0x%x\n",
13850 phba
->pci_dev_grp
);
13857 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13858 * @pdev: pointer to PCI device.
13859 * @state: the current PCI connection state.
13861 * This routine is registered to the PCI subsystem for error handling. This
13862 * function is called by the PCI subsystem after a PCI bus error affecting
13863 * this device has been detected. When this routine is invoked, it dispatches
13864 * the action to the proper SLI-3 or SLI-4 device error detected handling
13865 * routine, which will perform the proper error detected operation.
13868 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13869 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13871 static pci_ers_result_t
13872 lpfc_io_error_detected(struct pci_dev
*pdev
, pci_channel_state_t state
)
13874 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13875 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13876 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
13878 switch (phba
->pci_dev_grp
) {
13879 case LPFC_PCI_DEV_LP
:
13880 rc
= lpfc_io_error_detected_s3(pdev
, state
);
13882 case LPFC_PCI_DEV_OC
:
13883 rc
= lpfc_io_error_detected_s4(pdev
, state
);
13886 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13887 "1427 Invalid PCI device group: 0x%x\n",
13888 phba
->pci_dev_grp
);
13895 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13896 * @pdev: pointer to PCI device.
13898 * This routine is registered to the PCI subsystem for error handling. This
13899 * function is called after PCI bus has been reset to restart the PCI card
13900 * from scratch, as if from a cold-boot. When this routine is invoked, it
13901 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13902 * routine, which will perform the proper device reset.
13905 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13906 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13908 static pci_ers_result_t
13909 lpfc_io_slot_reset(struct pci_dev
*pdev
)
13911 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13912 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13913 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
13915 switch (phba
->pci_dev_grp
) {
13916 case LPFC_PCI_DEV_LP
:
13917 rc
= lpfc_io_slot_reset_s3(pdev
);
13919 case LPFC_PCI_DEV_OC
:
13920 rc
= lpfc_io_slot_reset_s4(pdev
);
13923 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13924 "1428 Invalid PCI device group: 0x%x\n",
13925 phba
->pci_dev_grp
);
13932 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13933 * @pdev: pointer to PCI device
13935 * This routine is registered to the PCI subsystem for error handling. It
13936 * is called when kernel error recovery tells the lpfc driver that it is
13937 * OK to resume normal PCI operation after PCI bus error recovery. When
13938 * this routine is invoked, it dispatches the action to the proper SLI-3
13939 * or SLI-4 device io_resume routine, which will resume the device operation.
13942 lpfc_io_resume(struct pci_dev
*pdev
)
13944 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
13945 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
13947 switch (phba
->pci_dev_grp
) {
13948 case LPFC_PCI_DEV_LP
:
13949 lpfc_io_resume_s3(pdev
);
13951 case LPFC_PCI_DEV_OC
:
13952 lpfc_io_resume_s4(pdev
);
13955 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13956 "1429 Invalid PCI device group: 0x%x\n",
13957 phba
->pci_dev_grp
);
13964 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
13965 * @phba: pointer to lpfc hba data structure.
13967 * This routine checks to see if OAS is supported for this adapter. If
13968 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
13969 * the enable oas flag is cleared and the pool created for OAS device data
13974 lpfc_sli4_oas_verify(struct lpfc_hba
*phba
)
13977 if (!phba
->cfg_EnableXLane
)
13980 if (phba
->sli4_hba
.pc_sli4_params
.oas_supported
) {
13984 mempool_destroy(phba
->device_data_mem_pool
);
13985 phba
->device_data_mem_pool
= NULL
;
13992 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
13993 * @phba: pointer to lpfc hba data structure.
13995 * This routine checks to see if RAS is supported by the adapter. Check the
13996 * function through which RAS support enablement is to be done.
13999 lpfc_sli4_ras_init(struct lpfc_hba
*phba
)
14001 switch (phba
->pcidev
->device
) {
14002 case PCI_DEVICE_ID_LANCER_G6_FC
:
14003 case PCI_DEVICE_ID_LANCER_G7_FC
:
14004 phba
->ras_fwlog
.ras_hwsupport
= true;
14005 if (phba
->cfg_ras_fwlog_func
== PCI_FUNC(phba
->pcidev
->devfn
) &&
14006 phba
->cfg_ras_fwlog_buffsize
)
14007 phba
->ras_fwlog
.ras_enabled
= true;
14009 phba
->ras_fwlog
.ras_enabled
= false;
14012 phba
->ras_fwlog
.ras_hwsupport
= false;
14017 MODULE_DEVICE_TABLE(pci
, lpfc_id_table
);
14019 static const struct pci_error_handlers lpfc_err_handler
= {
14020 .error_detected
= lpfc_io_error_detected
,
14021 .slot_reset
= lpfc_io_slot_reset
,
14022 .resume
= lpfc_io_resume
,
14025 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one
,
14026 lpfc_pci_suspend_one
,
14027 lpfc_pci_resume_one
);
14029 static struct pci_driver lpfc_driver
= {
14030 .name
= LPFC_DRIVER_NAME
,
14031 .id_table
= lpfc_id_table
,
14032 .probe
= lpfc_pci_probe_one
,
14033 .remove
= lpfc_pci_remove_one
,
14034 .shutdown
= lpfc_pci_remove_one
,
14035 .driver
.pm
= &lpfc_pci_pm_ops_one
,
14036 .err_handler
= &lpfc_err_handler
,
14039 static const struct file_operations lpfc_mgmt_fop
= {
14040 .owner
= THIS_MODULE
,
14043 static struct miscdevice lpfc_mgmt_dev
= {
14044 .minor
= MISC_DYNAMIC_MINOR
,
14045 .name
= "lpfcmgmt",
14046 .fops
= &lpfc_mgmt_fop
,
14050 * lpfc_init - lpfc module initialization routine
14052 * This routine is to be invoked when the lpfc module is loaded into the
14053 * kernel. The special kernel macro module_init() is used to indicate the
14054 * role of this routine to the kernel as lpfc module entry point.
14058 * -ENOMEM - FC attach transport failed
14059 * all others - failed
14066 pr_info(LPFC_MODULE_DESC
"\n");
14067 pr_info(LPFC_COPYRIGHT
"\n");
14069 error
= misc_register(&lpfc_mgmt_dev
);
14071 printk(KERN_ERR
"Could not register lpfcmgmt device, "
14072 "misc_register returned with status %d", error
);
14075 lpfc_transport_functions
.vport_create
= lpfc_vport_create
;
14076 lpfc_transport_functions
.vport_delete
= lpfc_vport_delete
;
14077 lpfc_transport_template
=
14078 fc_attach_transport(&lpfc_transport_functions
);
14079 if (lpfc_transport_template
== NULL
)
14081 lpfc_vport_transport_template
=
14082 fc_attach_transport(&lpfc_vport_transport_functions
);
14083 if (lpfc_vport_transport_template
== NULL
) {
14084 fc_release_transport(lpfc_transport_template
);
14087 lpfc_wqe_cmd_template();
14088 lpfc_nvmet_cmd_template();
14090 /* Initialize in case vector mapping is needed */
14091 lpfc_present_cpu
= num_present_cpus();
14093 error
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
,
14094 "lpfc/sli4:online",
14095 lpfc_cpu_online
, lpfc_cpu_offline
);
14097 goto cpuhp_failure
;
14098 lpfc_cpuhp_state
= error
;
14100 error
= pci_register_driver(&lpfc_driver
);
14107 cpuhp_remove_multi_state(lpfc_cpuhp_state
);
14109 fc_release_transport(lpfc_transport_template
);
14110 fc_release_transport(lpfc_vport_transport_template
);
14112 misc_deregister(&lpfc_mgmt_dev
);
14117 void lpfc_dmp_dbg(struct lpfc_hba
*phba
)
14119 unsigned int start_idx
;
14120 unsigned int dbg_cnt
;
14121 unsigned int temp_idx
;
14124 unsigned long rem_nsec
;
14126 if (phba
->cfg_log_verbose
)
14129 if (atomic_cmpxchg(&phba
->dbg_log_dmping
, 0, 1) != 0)
14132 start_idx
= (unsigned int)atomic_read(&phba
->dbg_log_idx
) % DBG_LOG_SZ
;
14133 dbg_cnt
= (unsigned int)atomic_read(&phba
->dbg_log_cnt
);
14134 temp_idx
= start_idx
;
14135 if (dbg_cnt
>= DBG_LOG_SZ
) {
14136 dbg_cnt
= DBG_LOG_SZ
;
14139 if ((start_idx
+ dbg_cnt
) > (DBG_LOG_SZ
- 1)) {
14140 temp_idx
= (start_idx
+ dbg_cnt
) % DBG_LOG_SZ
;
14142 if (start_idx
< dbg_cnt
)
14143 start_idx
= DBG_LOG_SZ
- (dbg_cnt
- start_idx
);
14145 start_idx
-= dbg_cnt
;
14148 dev_info(&phba
->pcidev
->dev
, "start %d end %d cnt %d\n",
14149 start_idx
, temp_idx
, dbg_cnt
);
14151 for (i
= 0; i
< dbg_cnt
; i
++) {
14152 if ((start_idx
+ i
) < DBG_LOG_SZ
)
14153 temp_idx
= (start_idx
+ i
) % DBG_LOG_SZ
;
14156 rem_nsec
= do_div(phba
->dbg_log
[temp_idx
].t_ns
, NSEC_PER_SEC
);
14157 dev_info(&phba
->pcidev
->dev
, "%d: [%5lu.%06lu] %s",
14159 (unsigned long)phba
->dbg_log
[temp_idx
].t_ns
,
14161 phba
->dbg_log
[temp_idx
].log
);
14163 atomic_set(&phba
->dbg_log_cnt
, 0);
14164 atomic_set(&phba
->dbg_log_dmping
, 0);
14168 void lpfc_dbg_print(struct lpfc_hba
*phba
, const char *fmt
, ...)
14172 int dbg_dmping
= atomic_read(&phba
->dbg_log_dmping
);
14173 struct va_format vaf
;
14176 va_start(args
, fmt
);
14177 if (unlikely(dbg_dmping
)) {
14180 dev_info(&phba
->pcidev
->dev
, "%pV", &vaf
);
14184 idx
= (unsigned int)atomic_fetch_add(1, &phba
->dbg_log_idx
) %
14187 atomic_inc(&phba
->dbg_log_cnt
);
14189 vscnprintf(phba
->dbg_log
[idx
].log
,
14190 sizeof(phba
->dbg_log
[idx
].log
), fmt
, args
);
14193 phba
->dbg_log
[idx
].t_ns
= local_clock();
14197 * lpfc_exit - lpfc module removal routine
14199 * This routine is invoked when the lpfc module is removed from the kernel.
14200 * The special kernel macro module_exit() is used to indicate the role of
14201 * this routine to the kernel as lpfc module exit point.
14206 misc_deregister(&lpfc_mgmt_dev
);
14207 pci_unregister_driver(&lpfc_driver
);
14208 cpuhp_remove_multi_state(lpfc_cpuhp_state
);
14209 fc_release_transport(lpfc_transport_template
);
14210 fc_release_transport(lpfc_vport_transport_template
);
14211 idr_destroy(&lpfc_hba_index
);
14214 module_init(lpfc_init
);
14215 module_exit(lpfc_exit
);
14216 MODULE_LICENSE("GPL");
14217 MODULE_DESCRIPTION(LPFC_MODULE_DESC
);
14218 MODULE_AUTHOR("Broadcom");
14219 MODULE_VERSION("0:" LPFC_DRIVER_VERSION
);