1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/clock.h>
34 #include <linux/ctype.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/irq.h>
40 #include <linux/bitops.h>
41 #include <linux/crash_dump.h>
42 #include <linux/cpu.h>
43 #include <linux/cpuhotplug.h>
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_transport_fc.h>
49 #include <scsi/scsi_tcq.h>
50 #include <scsi/fc/fc_fs.h>
55 #include "lpfc_sli4.h"
57 #include "lpfc_disc.h"
59 #include "lpfc_scsi.h"
60 #include "lpfc_nvme.h"
61 #include "lpfc_logmsg.h"
62 #include "lpfc_crtn.h"
63 #include "lpfc_vport.h"
64 #include "lpfc_version.h"
67 static enum cpuhp_state lpfc_cpuhp_state
;
68 /* Used when mapping IRQ vectors in a driver centric manner */
69 static uint32_t lpfc_present_cpu
;
70 static bool lpfc_pldv_detect
;
72 static void __lpfc_cpuhp_remove(struct lpfc_hba
*phba
);
73 static void lpfc_cpuhp_remove(struct lpfc_hba
*phba
);
74 static void lpfc_cpuhp_add(struct lpfc_hba
*phba
);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba
*, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba
*);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba
*);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba
*);
79 static int lpfc_setup_endian_order(struct lpfc_hba
*);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba
*);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba
*);
83 static void lpfc_init_sgl_list(struct lpfc_hba
*);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba
*);
85 static void lpfc_free_active_sgl(struct lpfc_hba
*);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba
*phba
);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba
*phba
);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba
*);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba
*);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba
*, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba
*phba
);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba
*, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba
*, struct Scsi_Host
*);
96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba
*);
97 static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba
*phba
);
98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba
*phba
);
100 static struct scsi_transport_template
*lpfc_transport_template
= NULL
;
101 static struct scsi_transport_template
*lpfc_vport_transport_template
= NULL
;
102 static DEFINE_IDR(lpfc_hba_index
);
103 #define LPFC_NVMET_BUF_POST 254
104 static int lpfc_vmid_res_alloc(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
);
105 static void lpfc_cgn_update_tstamp(struct lpfc_hba
*phba
, struct lpfc_cgn_ts
*ts
);
108 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
109 * @phba: pointer to lpfc hba data structure.
111 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
112 * mailbox command. It retrieves the revision information from the HBA and
113 * collects the Vital Product Data (VPD) about the HBA for preparing the
114 * configuration of the HBA.
118 * -ERESTART - requests the SLI layer to reset the HBA and try again.
119 * Any other value - indicates an error.
122 lpfc_config_port_prep(struct lpfc_hba
*phba
)
124 lpfc_vpd_t
*vp
= &phba
->vpd
;
128 char *lpfc_vpd_data
= NULL
;
130 static char licensed
[56] =
131 "key unlock for use with gnu public licensed code only\0";
132 static int init_key
= 1;
134 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
136 phba
->link_state
= LPFC_HBA_ERROR
;
141 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
143 if (lpfc_is_LC_HBA(phba
->pcidev
->device
)) {
145 uint32_t *ptext
= (uint32_t *) licensed
;
147 for (i
= 0; i
< 56; i
+= sizeof (uint32_t), ptext
++)
148 *ptext
= cpu_to_be32(*ptext
);
152 lpfc_read_nv(phba
, pmb
);
153 memset((char*)mb
->un
.varRDnvp
.rsvd3
, 0,
154 sizeof (mb
->un
.varRDnvp
.rsvd3
));
155 memcpy((char*)mb
->un
.varRDnvp
.rsvd3
, licensed
,
158 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
160 if (rc
!= MBX_SUCCESS
) {
161 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
162 "0324 Config Port initialization "
163 "error, mbxCmd x%x READ_NVPARM, "
165 mb
->mbxCommand
, mb
->mbxStatus
);
166 mempool_free(pmb
, phba
->mbox_mem_pool
);
169 memcpy(phba
->wwnn
, (char *)mb
->un
.varRDnvp
.nodename
,
171 memcpy(phba
->wwpn
, (char *)mb
->un
.varRDnvp
.portname
,
176 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
177 * which was already set in lpfc_get_cfgparam()
179 phba
->sli3_options
&= (uint32_t)LPFC_SLI3_BG_ENABLED
;
181 /* Setup and issue mailbox READ REV command */
182 lpfc_read_rev(phba
, pmb
);
183 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
184 if (rc
!= MBX_SUCCESS
) {
185 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
186 "0439 Adapter failed to init, mbxCmd x%x "
187 "READ_REV, mbxStatus x%x\n",
188 mb
->mbxCommand
, mb
->mbxStatus
);
189 mempool_free( pmb
, phba
->mbox_mem_pool
);
195 * The value of rr must be 1 since the driver set the cv field to 1.
196 * This setting requires the FW to set all revision fields.
198 if (mb
->un
.varRdRev
.rr
== 0) {
200 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
201 "0440 Adapter failed to init, READ_REV has "
202 "missing revision information.\n");
203 mempool_free(pmb
, phba
->mbox_mem_pool
);
207 if (phba
->sli_rev
== 3 && !mb
->un
.varRdRev
.v3rsp
) {
208 mempool_free(pmb
, phba
->mbox_mem_pool
);
212 /* Save information as VPD data */
214 memcpy(&vp
->sli3Feat
, &mb
->un
.varRdRev
.sli3Feat
, sizeof(uint32_t));
215 vp
->rev
.sli1FwRev
= mb
->un
.varRdRev
.sli1FwRev
;
216 memcpy(vp
->rev
.sli1FwName
, (char*) mb
->un
.varRdRev
.sli1FwName
, 16);
217 vp
->rev
.sli2FwRev
= mb
->un
.varRdRev
.sli2FwRev
;
218 memcpy(vp
->rev
.sli2FwName
, (char *) mb
->un
.varRdRev
.sli2FwName
, 16);
219 vp
->rev
.biuRev
= mb
->un
.varRdRev
.biuRev
;
220 vp
->rev
.smRev
= mb
->un
.varRdRev
.smRev
;
221 vp
->rev
.smFwRev
= mb
->un
.varRdRev
.un
.smFwRev
;
222 vp
->rev
.endecRev
= mb
->un
.varRdRev
.endecRev
;
223 vp
->rev
.fcphHigh
= mb
->un
.varRdRev
.fcphHigh
;
224 vp
->rev
.fcphLow
= mb
->un
.varRdRev
.fcphLow
;
225 vp
->rev
.feaLevelHigh
= mb
->un
.varRdRev
.feaLevelHigh
;
226 vp
->rev
.feaLevelLow
= mb
->un
.varRdRev
.feaLevelLow
;
227 vp
->rev
.postKernRev
= mb
->un
.varRdRev
.postKernRev
;
228 vp
->rev
.opFwRev
= mb
->un
.varRdRev
.opFwRev
;
230 /* If the sli feature level is less then 9, we must
231 * tear down all RPIs and VPIs on link down if NPIV
234 if (vp
->rev
.feaLevelHigh
< 9)
235 phba
->sli3_options
|= LPFC_SLI3_VPORT_TEARDOWN
;
237 if (lpfc_is_LC_HBA(phba
->pcidev
->device
))
238 memcpy(phba
->RandomData
, (char *)&mb
->un
.varWords
[24],
239 sizeof (phba
->RandomData
));
241 /* Get adapter VPD information */
242 lpfc_vpd_data
= kmalloc(DMP_VPD_SIZE
, GFP_KERNEL
);
246 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_VPD
);
247 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
249 if (rc
!= MBX_SUCCESS
) {
250 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
251 "0441 VPD not present on adapter, "
252 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
253 mb
->mbxCommand
, mb
->mbxStatus
);
254 mb
->un
.varDmp
.word_cnt
= 0;
256 /* dump mem may return a zero when finished or we got a
257 * mailbox error, either way we are done.
259 if (mb
->un
.varDmp
.word_cnt
== 0)
262 if (mb
->un
.varDmp
.word_cnt
> DMP_VPD_SIZE
- offset
)
263 mb
->un
.varDmp
.word_cnt
= DMP_VPD_SIZE
- offset
;
264 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
265 lpfc_vpd_data
+ offset
,
266 mb
->un
.varDmp
.word_cnt
);
267 offset
+= mb
->un
.varDmp
.word_cnt
;
268 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_VPD_SIZE
);
270 lpfc_parse_vpd(phba
, lpfc_vpd_data
, offset
);
272 kfree(lpfc_vpd_data
);
274 mempool_free(pmb
, phba
->mbox_mem_pool
);
279 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
280 * @phba: pointer to lpfc hba data structure.
281 * @pmboxq: pointer to the driver internal queue element for mailbox command.
283 * This is the completion handler for driver's configuring asynchronous event
284 * mailbox command to the device. If the mailbox command returns successfully,
285 * it will set internal async event support flag to 1; otherwise, it will
286 * set internal async event support flag to 0.
289 lpfc_config_async_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
291 if (pmboxq
->u
.mb
.mbxStatus
== MBX_SUCCESS
)
292 phba
->temp_sensor_support
= 1;
294 phba
->temp_sensor_support
= 0;
295 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
300 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
301 * @phba: pointer to lpfc hba data structure.
302 * @pmboxq: pointer to the driver internal queue element for mailbox command.
304 * This is the completion handler for dump mailbox command for getting
305 * wake up parameters. When this command complete, the response contain
306 * Option rom version of the HBA. This function translate the version number
307 * into a human readable string and store it in OptionROMVersion.
310 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
313 uint32_t prog_id_word
;
315 /* character array used for decoding dist type. */
316 char dist_char
[] = "nabx";
318 if (pmboxq
->u
.mb
.mbxStatus
!= MBX_SUCCESS
) {
319 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
323 prg
= (struct prog_id
*) &prog_id_word
;
325 /* word 7 contain option rom version */
326 prog_id_word
= pmboxq
->u
.mb
.un
.varWords
[7];
328 /* Decode the Option rom version word to a readable string */
329 dist
= dist_char
[prg
->dist
];
331 if ((prg
->dist
== 3) && (prg
->num
== 0))
332 snprintf(phba
->OptionROMVersion
, 32, "%d.%d%d",
333 prg
->ver
, prg
->rev
, prg
->lev
);
335 snprintf(phba
->OptionROMVersion
, 32, "%d.%d%d%c%d",
336 prg
->ver
, prg
->rev
, prg
->lev
,
338 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
344 * @vport: pointer to lpfc vport data structure.
351 lpfc_update_vport_wwn(struct lpfc_vport
*vport
)
353 struct lpfc_hba
*phba
= vport
->phba
;
356 * If the name is empty or there exists a soft name
357 * then copy the service params name, otherwise use the fc name
359 if (vport
->fc_nodename
.u
.wwn
[0] == 0)
360 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
361 sizeof(struct lpfc_name
));
363 memcpy(&vport
->fc_sparam
.nodeName
, &vport
->fc_nodename
,
364 sizeof(struct lpfc_name
));
367 * If the port name has changed, then set the Param changes flag
370 if (vport
->fc_portname
.u
.wwn
[0] != 0 &&
371 memcmp(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
372 sizeof(struct lpfc_name
))) {
373 vport
->vport_flag
|= FAWWPN_PARAM_CHG
;
375 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
376 vport
->port_type
== LPFC_PHYSICAL_PORT
&&
377 phba
->sli4_hba
.fawwpn_flag
& LPFC_FAWWPN_FABRIC
) {
378 if (!(phba
->sli4_hba
.fawwpn_flag
& LPFC_FAWWPN_CONFIG
))
379 phba
->sli4_hba
.fawwpn_flag
&=
381 lpfc_printf_log(phba
, KERN_INFO
,
382 LOG_SLI
| LOG_DISCOVERY
| LOG_ELS
,
383 "2701 FA-PWWN change WWPN from %llx to "
384 "%llx: vflag x%x fawwpn_flag x%x\n",
385 wwn_to_u64(vport
->fc_portname
.u
.wwn
),
387 (vport
->fc_sparam
.portName
.u
.wwn
),
389 phba
->sli4_hba
.fawwpn_flag
);
390 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
391 sizeof(struct lpfc_name
));
395 if (vport
->fc_portname
.u
.wwn
[0] == 0)
396 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
397 sizeof(struct lpfc_name
));
399 memcpy(&vport
->fc_sparam
.portName
, &vport
->fc_portname
,
400 sizeof(struct lpfc_name
));
404 * lpfc_config_port_post - Perform lpfc initialization after config port
405 * @phba: pointer to lpfc hba data structure.
407 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
408 * command call. It performs all internal resource and state setups on the
409 * port: post IOCB buffers, enable appropriate host interrupt attentions,
410 * ELS ring timers, etc.
414 * Any other value - error.
417 lpfc_config_port_post(struct lpfc_hba
*phba
)
419 struct lpfc_vport
*vport
= phba
->pport
;
420 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
423 struct lpfc_dmabuf
*mp
;
424 struct lpfc_sli
*psli
= &phba
->sli
;
425 uint32_t status
, timeout
;
429 spin_lock_irq(&phba
->hbalock
);
431 * If the Config port completed correctly the HBA is not
432 * over heated any more.
434 if (phba
->over_temp_state
== HBA_OVER_TEMP
)
435 phba
->over_temp_state
= HBA_NORMAL_TEMP
;
436 spin_unlock_irq(&phba
->hbalock
);
438 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
440 phba
->link_state
= LPFC_HBA_ERROR
;
445 /* Get login parameters for NID. */
446 rc
= lpfc_read_sparam(phba
, pmb
, 0);
448 mempool_free(pmb
, phba
->mbox_mem_pool
);
453 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
454 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
455 "0448 Adapter failed init, mbxCmd x%x "
456 "READ_SPARM mbxStatus x%x\n",
457 mb
->mbxCommand
, mb
->mbxStatus
);
458 phba
->link_state
= LPFC_HBA_ERROR
;
459 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
465 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
466 * longer needed. Prevent unintended ctx_buf access as the mbox is
469 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof (struct serv_parm
));
470 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
473 lpfc_update_vport_wwn(vport
);
475 /* Update the fc_host data structures with new wwn. */
476 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
477 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
478 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
480 /* If no serial number in VPD data, use low 6 bytes of WWNN */
481 /* This should be consolidated into parse_vpd ? - mr */
482 if (phba
->SerialNumber
[0] == 0) {
485 outptr
= &vport
->fc_nodename
.u
.s
.IEEE
[0];
486 for (i
= 0; i
< 12; i
++) {
488 j
= ((status
& 0xf0) >> 4);
490 phba
->SerialNumber
[i
] =
491 (char)((uint8_t) 0x30 + (uint8_t) j
);
493 phba
->SerialNumber
[i
] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
498 phba
->SerialNumber
[i
] =
499 (char)((uint8_t) 0x30 + (uint8_t) j
);
501 phba
->SerialNumber
[i
] =
502 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
506 lpfc_read_config(phba
, pmb
);
508 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
509 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
510 "0453 Adapter failed to init, mbxCmd x%x "
511 "READ_CONFIG, mbxStatus x%x\n",
512 mb
->mbxCommand
, mb
->mbxStatus
);
513 phba
->link_state
= LPFC_HBA_ERROR
;
514 mempool_free( pmb
, phba
->mbox_mem_pool
);
518 /* Check if the port is disabled */
519 lpfc_sli_read_link_ste(phba
);
521 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
522 if (phba
->cfg_hba_queue_depth
> mb
->un
.varRdConfig
.max_xri
) {
523 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
524 "3359 HBA queue depth changed from %d to %d\n",
525 phba
->cfg_hba_queue_depth
,
526 mb
->un
.varRdConfig
.max_xri
);
527 phba
->cfg_hba_queue_depth
= mb
->un
.varRdConfig
.max_xri
;
530 phba
->lmt
= mb
->un
.varRdConfig
.lmt
;
532 /* Get the default values for Model Name and Description */
533 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
535 phba
->link_state
= LPFC_LINK_DOWN
;
537 /* Only process IOCBs on ELS ring till hba_state is READY */
538 if (psli
->sli3_ring
[LPFC_EXTRA_RING
].sli
.sli3
.cmdringaddr
)
539 psli
->sli3_ring
[LPFC_EXTRA_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
540 if (psli
->sli3_ring
[LPFC_FCP_RING
].sli
.sli3
.cmdringaddr
)
541 psli
->sli3_ring
[LPFC_FCP_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
543 /* Post receive buffers for desired rings */
544 if (phba
->sli_rev
!= 3)
545 lpfc_post_rcv_buf(phba
);
548 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
550 if (phba
->intr_type
== MSIX
) {
551 rc
= lpfc_config_msi(phba
, pmb
);
553 mempool_free(pmb
, phba
->mbox_mem_pool
);
556 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
557 if (rc
!= MBX_SUCCESS
) {
558 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
559 "0352 Config MSI mailbox command "
560 "failed, mbxCmd x%x, mbxStatus x%x\n",
561 pmb
->u
.mb
.mbxCommand
,
562 pmb
->u
.mb
.mbxStatus
);
563 mempool_free(pmb
, phba
->mbox_mem_pool
);
568 spin_lock_irq(&phba
->hbalock
);
569 /* Initialize ERATT handling flag */
570 clear_bit(HBA_ERATT_HANDLED
, &phba
->hba_flag
);
572 /* Enable appropriate host interrupts */
573 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
574 spin_unlock_irq(&phba
->hbalock
);
577 status
|= HC_MBINT_ENA
| HC_ERINT_ENA
| HC_LAINT_ENA
;
578 if (psli
->num_rings
> 0)
579 status
|= HC_R0INT_ENA
;
580 if (psli
->num_rings
> 1)
581 status
|= HC_R1INT_ENA
;
582 if (psli
->num_rings
> 2)
583 status
|= HC_R2INT_ENA
;
584 if (psli
->num_rings
> 3)
585 status
|= HC_R3INT_ENA
;
587 if ((phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) &&
588 (phba
->cfg_poll
& DISABLE_FCP_RING_INT
))
589 status
&= ~(HC_R0INT_ENA
);
591 writel(status
, phba
->HCregaddr
);
592 readl(phba
->HCregaddr
); /* flush */
593 spin_unlock_irq(&phba
->hbalock
);
595 /* Set up ring-0 (ELS) timer */
596 timeout
= phba
->fc_ratov
* 2;
597 mod_timer(&vport
->els_tmofunc
,
598 jiffies
+ msecs_to_jiffies(1000 * timeout
));
599 /* Set up heart beat (HB) timer */
600 mod_timer(&phba
->hb_tmofunc
,
601 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
602 clear_bit(HBA_HBEAT_INP
, &phba
->hba_flag
);
603 clear_bit(HBA_HBEAT_TMO
, &phba
->hba_flag
);
604 phba
->last_completion_time
= jiffies
;
605 /* Set up error attention (ERATT) polling timer */
606 mod_timer(&phba
->eratt_poll
,
607 jiffies
+ msecs_to_jiffies(1000 * phba
->eratt_poll_interval
));
609 if (test_bit(LINK_DISABLED
, &phba
->hba_flag
)) {
610 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
611 "2598 Adapter Link is disabled.\n");
612 lpfc_down_link(phba
, pmb
);
613 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
614 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
615 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
616 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
617 "2599 Adapter failed to issue DOWN_LINK"
618 " mbox command rc 0x%x\n", rc
);
620 mempool_free(pmb
, phba
->mbox_mem_pool
);
623 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
624 mempool_free(pmb
, phba
->mbox_mem_pool
);
625 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
629 /* MBOX buffer will be freed in mbox compl */
630 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
632 phba
->link_state
= LPFC_HBA_ERROR
;
636 lpfc_config_async(phba
, pmb
, LPFC_ELS_RING
);
637 pmb
->mbox_cmpl
= lpfc_config_async_cmpl
;
638 pmb
->vport
= phba
->pport
;
639 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
641 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
642 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
643 "0456 Adapter failed to issue "
644 "ASYNCEVT_ENABLE mbox status x%x\n",
646 mempool_free(pmb
, phba
->mbox_mem_pool
);
649 /* Get Option rom version */
650 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
652 phba
->link_state
= LPFC_HBA_ERROR
;
656 lpfc_dump_wakeup_param(phba
, pmb
);
657 pmb
->mbox_cmpl
= lpfc_dump_wakeup_param_cmpl
;
658 pmb
->vport
= phba
->pport
;
659 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
661 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
662 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
663 "0435 Adapter failed "
664 "to get Option ROM version status x%x\n", rc
);
665 mempool_free(pmb
, phba
->mbox_mem_pool
);
672 * lpfc_sli4_refresh_params - update driver copy of params.
673 * @phba: Pointer to HBA context object.
675 * This is called to refresh driver copy of dynamic fields from the
676 * common_get_sli4_parameters descriptor.
679 lpfc_sli4_refresh_params(struct lpfc_hba
*phba
)
682 struct lpfc_mqe
*mqe
;
683 struct lpfc_sli4_parameters
*mbx_sli4_parameters
;
686 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
691 /* Read the port's SLI4 Config Parameters */
692 length
= (sizeof(struct lpfc_mbx_get_sli4_parameters
) -
693 sizeof(struct lpfc_sli4_cfg_mhdr
));
694 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
695 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS
,
696 length
, LPFC_SLI4_MBX_EMBED
);
698 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
700 mempool_free(mboxq
, phba
->mbox_mem_pool
);
703 mbx_sli4_parameters
= &mqe
->un
.get_sli4_parameters
.sli4_parameters
;
704 phba
->sli4_hba
.pc_sli4_params
.mi_cap
=
705 bf_get(cfg_mi_ver
, mbx_sli4_parameters
);
707 /* Are we forcing MI off via module parameter? */
708 if (phba
->cfg_enable_mi
)
709 phba
->sli4_hba
.pc_sli4_params
.mi_ver
=
710 bf_get(cfg_mi_ver
, mbx_sli4_parameters
);
712 phba
->sli4_hba
.pc_sli4_params
.mi_ver
= 0;
714 phba
->sli4_hba
.pc_sli4_params
.cmf
=
715 bf_get(cfg_cmf
, mbx_sli4_parameters
);
716 phba
->sli4_hba
.pc_sli4_params
.pls
=
717 bf_get(cfg_pvl
, mbx_sli4_parameters
);
719 mempool_free(mboxq
, phba
->mbox_mem_pool
);
724 * lpfc_hba_init_link - Initialize the FC link
725 * @phba: pointer to lpfc hba data structure.
726 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
728 * This routine will issue the INIT_LINK mailbox command call.
729 * It is available to other drivers through the lpfc_hba data
730 * structure for use as a delayed link up mechanism with the
731 * module parameter lpfc_suppress_link_up.
735 * Any other value - error
738 lpfc_hba_init_link(struct lpfc_hba
*phba
, uint32_t flag
)
740 return lpfc_hba_init_link_fc_topology(phba
, phba
->cfg_topology
, flag
);
744 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
745 * @phba: pointer to lpfc hba data structure.
746 * @fc_topology: desired fc topology.
747 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
749 * This routine will issue the INIT_LINK mailbox command call.
750 * It is available to other drivers through the lpfc_hba data
751 * structure for use as a delayed link up mechanism with the
752 * module parameter lpfc_suppress_link_up.
756 * Any other value - error
759 lpfc_hba_init_link_fc_topology(struct lpfc_hba
*phba
, uint32_t fc_topology
,
762 struct lpfc_vport
*vport
= phba
->pport
;
767 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
769 phba
->link_state
= LPFC_HBA_ERROR
;
775 if ((phba
->cfg_link_speed
> LPFC_USER_LINK_SPEED_MAX
) ||
776 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_1G
) &&
777 !(phba
->lmt
& LMT_1Gb
)) ||
778 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_2G
) &&
779 !(phba
->lmt
& LMT_2Gb
)) ||
780 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_4G
) &&
781 !(phba
->lmt
& LMT_4Gb
)) ||
782 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_8G
) &&
783 !(phba
->lmt
& LMT_8Gb
)) ||
784 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_10G
) &&
785 !(phba
->lmt
& LMT_10Gb
)) ||
786 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_16G
) &&
787 !(phba
->lmt
& LMT_16Gb
)) ||
788 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_32G
) &&
789 !(phba
->lmt
& LMT_32Gb
)) ||
790 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_64G
) &&
791 !(phba
->lmt
& LMT_64Gb
))) {
792 /* Reset link speed to auto */
793 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
794 "1302 Invalid speed for this board:%d "
795 "Reset link speed to auto.\n",
796 phba
->cfg_link_speed
);
797 phba
->cfg_link_speed
= LPFC_USER_LINK_SPEED_AUTO
;
799 lpfc_init_link(phba
, pmb
, fc_topology
, phba
->cfg_link_speed
);
800 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
801 if (phba
->sli_rev
< LPFC_SLI_REV4
)
802 lpfc_set_loopback_flag(phba
);
803 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
804 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
805 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
806 "0498 Adapter failed to init, mbxCmd x%x "
807 "INIT_LINK, mbxStatus x%x\n",
808 mb
->mbxCommand
, mb
->mbxStatus
);
809 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
810 /* Clear all interrupt enable conditions */
811 writel(0, phba
->HCregaddr
);
812 readl(phba
->HCregaddr
); /* flush */
813 /* Clear all pending interrupts */
814 writel(0xffffffff, phba
->HAregaddr
);
815 readl(phba
->HAregaddr
); /* flush */
817 phba
->link_state
= LPFC_HBA_ERROR
;
818 if (rc
!= MBX_BUSY
|| flag
== MBX_POLL
)
819 mempool_free(pmb
, phba
->mbox_mem_pool
);
822 phba
->cfg_suppress_link_up
= LPFC_INITIALIZE_LINK
;
823 if (flag
== MBX_POLL
)
824 mempool_free(pmb
, phba
->mbox_mem_pool
);
830 * lpfc_hba_down_link - this routine downs the FC link
831 * @phba: pointer to lpfc hba data structure.
832 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
834 * This routine will issue the DOWN_LINK mailbox command call.
835 * It is available to other drivers through the lpfc_hba data
836 * structure for use to stop the link.
840 * Any other value - error
843 lpfc_hba_down_link(struct lpfc_hba
*phba
, uint32_t flag
)
848 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
850 phba
->link_state
= LPFC_HBA_ERROR
;
854 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
855 "0491 Adapter Link is disabled.\n");
856 lpfc_down_link(phba
, pmb
);
857 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
858 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
859 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
860 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
861 "2522 Adapter failed to issue DOWN_LINK"
862 " mbox command rc 0x%x\n", rc
);
864 mempool_free(pmb
, phba
->mbox_mem_pool
);
867 if (flag
== MBX_POLL
)
868 mempool_free(pmb
, phba
->mbox_mem_pool
);
874 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
875 * @phba: pointer to lpfc HBA data structure.
877 * This routine will do LPFC uninitialization before the HBA is reset when
878 * bringing down the SLI Layer.
882 * Any other value - error.
885 lpfc_hba_down_prep(struct lpfc_hba
*phba
)
887 struct lpfc_vport
**vports
;
890 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
891 /* Disable interrupts */
892 writel(0, phba
->HCregaddr
);
893 readl(phba
->HCregaddr
); /* flush */
896 if (test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
897 lpfc_cleanup_discovery_resources(phba
->pport
);
899 vports
= lpfc_create_vport_work_array(phba
);
901 for (i
= 0; i
<= phba
->max_vports
&&
902 vports
[i
] != NULL
; i
++)
903 lpfc_cleanup_discovery_resources(vports
[i
]);
904 lpfc_destroy_vport_work_array(phba
, vports
);
910 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
911 * rspiocb which got deferred
913 * @phba: pointer to lpfc HBA data structure.
915 * This routine will cleanup completed slow path events after HBA is reset
916 * when bringing down the SLI Layer.
923 lpfc_sli4_free_sp_events(struct lpfc_hba
*phba
)
925 struct lpfc_iocbq
*rspiocbq
;
926 struct hbq_dmabuf
*dmabuf
;
927 struct lpfc_cq_event
*cq_event
;
929 clear_bit(HBA_SP_QUEUE_EVT
, &phba
->hba_flag
);
931 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
932 /* Get the response iocb from the head of work queue */
933 spin_lock_irq(&phba
->hbalock
);
934 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
935 cq_event
, struct lpfc_cq_event
, list
);
936 spin_unlock_irq(&phba
->hbalock
);
938 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
939 case CQE_CODE_COMPL_WQE
:
940 rspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
942 lpfc_sli_release_iocbq(phba
, rspiocbq
);
944 case CQE_CODE_RECEIVE
:
945 case CQE_CODE_RECEIVE_V1
:
946 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
948 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
954 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
955 * @phba: pointer to lpfc HBA data structure.
957 * This routine will cleanup posted ELS buffers after the HBA is reset
958 * when bringing down the SLI Layer.
965 lpfc_hba_free_post_buf(struct lpfc_hba
*phba
)
967 struct lpfc_sli
*psli
= &phba
->sli
;
968 struct lpfc_sli_ring
*pring
;
969 struct lpfc_dmabuf
*mp
, *next_mp
;
973 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
974 lpfc_sli_hbqbuf_free_all(phba
);
976 /* Cleanup preposted buffers on the ELS ring */
977 pring
= &psli
->sli3_ring
[LPFC_ELS_RING
];
978 spin_lock_irq(&phba
->hbalock
);
979 list_splice_init(&pring
->postbufq
, &buflist
);
980 spin_unlock_irq(&phba
->hbalock
);
983 list_for_each_entry_safe(mp
, next_mp
, &buflist
, list
) {
986 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
990 spin_lock_irq(&phba
->hbalock
);
991 pring
->postbufq_cnt
-= count
;
992 spin_unlock_irq(&phba
->hbalock
);
997 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
998 * @phba: pointer to lpfc HBA data structure.
1000 * This routine will cleanup the txcmplq after the HBA is reset when bringing
1001 * down the SLI Layer.
1007 lpfc_hba_clean_txcmplq(struct lpfc_hba
*phba
)
1009 struct lpfc_sli
*psli
= &phba
->sli
;
1010 struct lpfc_queue
*qp
= NULL
;
1011 struct lpfc_sli_ring
*pring
;
1012 LIST_HEAD(completions
);
1014 struct lpfc_iocbq
*piocb
, *next_iocb
;
1016 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
1017 for (i
= 0; i
< psli
->num_rings
; i
++) {
1018 pring
= &psli
->sli3_ring
[i
];
1019 spin_lock_irq(&phba
->hbalock
);
1020 /* At this point in time the HBA is either reset or DOA
1021 * Nothing should be on txcmplq as it will
1024 list_splice_init(&pring
->txcmplq
, &completions
);
1025 pring
->txcmplq_cnt
= 0;
1026 spin_unlock_irq(&phba
->hbalock
);
1028 lpfc_sli_abort_iocb_ring(phba
, pring
);
1030 /* Cancel all the IOCBs from the completions list */
1031 lpfc_sli_cancel_iocbs(phba
, &completions
,
1032 IOSTAT_LOCAL_REJECT
, IOERR_SLI_ABORTED
);
1035 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
1039 spin_lock_irq(&pring
->ring_lock
);
1040 list_for_each_entry_safe(piocb
, next_iocb
,
1041 &pring
->txcmplq
, list
)
1042 piocb
->cmd_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
1043 list_splice_init(&pring
->txcmplq
, &completions
);
1044 pring
->txcmplq_cnt
= 0;
1045 spin_unlock_irq(&pring
->ring_lock
);
1046 lpfc_sli_abort_iocb_ring(phba
, pring
);
1048 /* Cancel all the IOCBs from the completions list */
1049 lpfc_sli_cancel_iocbs(phba
, &completions
,
1050 IOSTAT_LOCAL_REJECT
, IOERR_SLI_ABORTED
);
1054 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1055 * @phba: pointer to lpfc HBA data structure.
1057 * This routine will do uninitialization after the HBA is reset when bring
1058 * down the SLI Layer.
1062 * Any other value - error.
1065 lpfc_hba_down_post_s3(struct lpfc_hba
*phba
)
1067 lpfc_hba_free_post_buf(phba
);
1068 lpfc_hba_clean_txcmplq(phba
);
1073 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1074 * @phba: pointer to lpfc HBA data structure.
1076 * This routine will do uninitialization after the HBA is reset when bring
1077 * down the SLI Layer.
1081 * Any other value - error.
1084 lpfc_hba_down_post_s4(struct lpfc_hba
*phba
)
1086 struct lpfc_io_buf
*psb
, *psb_next
;
1087 struct lpfc_async_xchg_ctx
*ctxp
, *ctxp_next
;
1088 struct lpfc_sli4_hdw_queue
*qp
;
1090 LIST_HEAD(nvme_aborts
);
1091 LIST_HEAD(nvmet_aborts
);
1092 struct lpfc_sglq
*sglq_entry
= NULL
;
1096 lpfc_sli_hbqbuf_free_all(phba
);
1097 lpfc_hba_clean_txcmplq(phba
);
1099 /* At this point in time the HBA is either reset or DOA. Either
1100 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1101 * on the lpfc_els_sgl_list so that it can either be freed if the
1102 * driver is unloading or reposted if the driver is restarting
1106 /* sgl_list_lock required because worker thread uses this
1109 spin_lock_irq(&phba
->sli4_hba
.sgl_list_lock
);
1110 list_for_each_entry(sglq_entry
,
1111 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
)
1112 sglq_entry
->state
= SGL_FREED
;
1114 list_splice_init(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
,
1115 &phba
->sli4_hba
.lpfc_els_sgl_list
);
1118 spin_unlock_irq(&phba
->sli4_hba
.sgl_list_lock
);
1120 /* abts_xxxx_buf_list_lock required because worker thread uses this
1123 spin_lock_irq(&phba
->hbalock
);
1125 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
1126 qp
= &phba
->sli4_hba
.hdwq
[idx
];
1128 spin_lock(&qp
->abts_io_buf_list_lock
);
1129 list_splice_init(&qp
->lpfc_abts_io_buf_list
,
1132 list_for_each_entry_safe(psb
, psb_next
, &aborts
, list
) {
1134 psb
->status
= IOSTAT_SUCCESS
;
1137 spin_lock(&qp
->io_buf_list_put_lock
);
1138 list_splice_init(&aborts
, &qp
->lpfc_io_buf_list_put
);
1139 qp
->put_io_bufs
+= qp
->abts_scsi_io_bufs
;
1140 qp
->put_io_bufs
+= qp
->abts_nvme_io_bufs
;
1141 qp
->abts_scsi_io_bufs
= 0;
1142 qp
->abts_nvme_io_bufs
= 0;
1143 spin_unlock(&qp
->io_buf_list_put_lock
);
1144 spin_unlock(&qp
->abts_io_buf_list_lock
);
1146 spin_unlock_irq(&phba
->hbalock
);
1148 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
1149 spin_lock_irq(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1150 list_splice_init(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1152 spin_unlock_irq(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1153 list_for_each_entry_safe(ctxp
, ctxp_next
, &nvmet_aborts
, list
) {
1154 ctxp
->flag
&= ~(LPFC_NVME_XBUSY
| LPFC_NVME_ABORT_OP
);
1155 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1159 lpfc_sli4_free_sp_events(phba
);
1164 * lpfc_hba_down_post - Wrapper func for hba down post routine
1165 * @phba: pointer to lpfc HBA data structure.
1167 * This routine wraps the actual SLI3 or SLI4 routine for performing
1168 * uninitialization after the HBA is reset when bring down the SLI Layer.
1172 * Any other value - error.
1175 lpfc_hba_down_post(struct lpfc_hba
*phba
)
1177 return (*phba
->lpfc_hba_down_post
)(phba
);
1181 * lpfc_hb_timeout - The HBA-timer timeout handler
1182 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1184 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1185 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1186 * work-port-events bitmap and the worker thread is notified. This timeout
1187 * event will be used by the worker thread to invoke the actual timeout
1188 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1189 * be performed in the timeout handler and the HBA timeout event bit shall
1190 * be cleared by the worker thread after it has taken the event bitmap out.
1193 lpfc_hb_timeout(struct timer_list
*t
)
1195 struct lpfc_hba
*phba
;
1196 uint32_t tmo_posted
;
1197 unsigned long iflag
;
1199 phba
= from_timer(phba
, t
, hb_tmofunc
);
1201 /* Check for heart beat timeout conditions */
1202 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
1203 tmo_posted
= phba
->pport
->work_port_events
& WORKER_HB_TMO
;
1205 phba
->pport
->work_port_events
|= WORKER_HB_TMO
;
1206 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
1208 /* Tell the worker thread there is work to do */
1210 lpfc_worker_wake_up(phba
);
1215 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1216 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1218 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1219 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1220 * work-port-events bitmap and the worker thread is notified. This timeout
1221 * event will be used by the worker thread to invoke the actual timeout
1222 * handler routine, lpfc_rrq_handler. Any periodical operations will
1223 * be performed in the timeout handler and the RRQ timeout event bit shall
1224 * be cleared by the worker thread after it has taken the event bitmap out.
1227 lpfc_rrq_timeout(struct timer_list
*t
)
1229 struct lpfc_hba
*phba
;
1231 phba
= from_timer(phba
, t
, rrq_tmr
);
1232 if (test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
)) {
1233 clear_bit(HBA_RRQ_ACTIVE
, &phba
->hba_flag
);
1237 set_bit(HBA_RRQ_ACTIVE
, &phba
->hba_flag
);
1238 lpfc_worker_wake_up(phba
);
1242 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1243 * @phba: pointer to lpfc hba data structure.
1244 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1246 * This is the callback function to the lpfc heart-beat mailbox command.
1247 * If configured, the lpfc driver issues the heart-beat mailbox command to
1248 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1249 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1250 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1251 * heart-beat outstanding state. Once the mailbox command comes back and
1252 * no error conditions detected, the heart-beat mailbox command timer is
1253 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1254 * state is cleared for the next heart-beat. If the timer expired with the
1255 * heart-beat outstanding state set, the driver will put the HBA offline.
1258 lpfc_hb_mbox_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
1260 clear_bit(HBA_HBEAT_INP
, &phba
->hba_flag
);
1261 clear_bit(HBA_HBEAT_TMO
, &phba
->hba_flag
);
1263 /* Check and reset heart-beat timer if necessary */
1264 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1265 if (!test_bit(FC_OFFLINE_MODE
, &phba
->pport
->fc_flag
) &&
1266 !(phba
->link_state
== LPFC_HBA_ERROR
) &&
1267 !test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
1268 mod_timer(&phba
->hb_tmofunc
,
1270 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1275 * lpfc_idle_stat_delay_work - idle_stat tracking
1277 * This routine tracks per-eq idle_stat and determines polling decisions.
1283 lpfc_idle_stat_delay_work(struct work_struct
*work
)
1285 struct lpfc_hba
*phba
= container_of(to_delayed_work(work
),
1287 idle_stat_delay_work
);
1288 struct lpfc_queue
*eq
;
1289 struct lpfc_sli4_hdw_queue
*hdwq
;
1290 struct lpfc_idle_stat
*idle_stat
;
1291 u32 i
, idle_percent
;
1292 u64 wall
, wall_idle
, diff_wall
, diff_idle
, busy_time
;
1294 if (test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
1297 if (phba
->link_state
== LPFC_HBA_ERROR
||
1298 test_bit(FC_OFFLINE_MODE
, &phba
->pport
->fc_flag
) ||
1299 phba
->cmf_active_mode
!= LPFC_CFG_OFF
)
1302 for_each_present_cpu(i
) {
1303 hdwq
= &phba
->sli4_hba
.hdwq
[phba
->sli4_hba
.cpu_map
[i
].hdwq
];
1306 /* Skip if we've already handled this eq's primary CPU */
1310 idle_stat
= &phba
->sli4_hba
.idle_stat
[i
];
1312 /* get_cpu_idle_time returns values as running counters. Thus,
1313 * to know the amount for this period, the prior counter values
1314 * need to be subtracted from the current counter values.
1315 * From there, the idle time stat can be calculated as a
1316 * percentage of 100 - the sum of the other consumption times.
1318 wall_idle
= get_cpu_idle_time(i
, &wall
, 1);
1319 diff_idle
= wall_idle
- idle_stat
->prev_idle
;
1320 diff_wall
= wall
- idle_stat
->prev_wall
;
1322 if (diff_wall
<= diff_idle
)
1325 busy_time
= diff_wall
- diff_idle
;
1327 idle_percent
= div64_u64(100 * busy_time
, diff_wall
);
1328 idle_percent
= 100 - idle_percent
;
1330 if (idle_percent
< 15)
1331 eq
->poll_mode
= LPFC_QUEUE_WORK
;
1333 eq
->poll_mode
= LPFC_THREADED_IRQ
;
1335 idle_stat
->prev_idle
= wall_idle
;
1336 idle_stat
->prev_wall
= wall
;
1340 schedule_delayed_work(&phba
->idle_stat_delay_work
,
1341 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY
));
1345 lpfc_hb_eq_delay_work(struct work_struct
*work
)
1347 struct lpfc_hba
*phba
= container_of(to_delayed_work(work
),
1348 struct lpfc_hba
, eq_delay_work
);
1349 struct lpfc_eq_intr_info
*eqi
, *eqi_new
;
1350 struct lpfc_queue
*eq
, *eq_next
;
1351 unsigned char *ena_delay
= NULL
;
1355 if (!phba
->cfg_auto_imax
||
1356 test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
1359 if (phba
->link_state
== LPFC_HBA_ERROR
||
1360 test_bit(FC_OFFLINE_MODE
, &phba
->pport
->fc_flag
))
1363 ena_delay
= kcalloc(phba
->sli4_hba
.num_possible_cpu
, sizeof(*ena_delay
),
1368 for (i
= 0; i
< phba
->cfg_irq_chann
; i
++) {
1369 /* Get the EQ corresponding to the IRQ vector */
1370 eq
= phba
->sli4_hba
.hba_eq_hdl
[i
].eq
;
1373 if (eq
->q_mode
|| eq
->q_flag
& HBA_EQ_DELAY_CHK
) {
1374 eq
->q_flag
&= ~HBA_EQ_DELAY_CHK
;
1375 ena_delay
[eq
->last_cpu
] = 1;
1379 for_each_present_cpu(i
) {
1380 eqi
= per_cpu_ptr(phba
->sli4_hba
.eq_info
, i
);
1382 usdelay
= (eqi
->icnt
>> 10) * LPFC_EQ_DELAY_STEP
;
1383 if (usdelay
> LPFC_MAX_AUTO_EQ_DELAY
)
1384 usdelay
= LPFC_MAX_AUTO_EQ_DELAY
;
1391 list_for_each_entry_safe(eq
, eq_next
, &eqi
->list
, cpu_list
) {
1392 if (unlikely(eq
->last_cpu
!= i
)) {
1393 eqi_new
= per_cpu_ptr(phba
->sli4_hba
.eq_info
,
1395 list_move_tail(&eq
->cpu_list
, &eqi_new
->list
);
1398 if (usdelay
!= eq
->q_mode
)
1399 lpfc_modify_hba_eq_delay(phba
, eq
->hdwq
, 1,
1407 queue_delayed_work(phba
->wq
, &phba
->eq_delay_work
,
1408 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS
));
1412 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1413 * @phba: pointer to lpfc hba data structure.
1415 * For each heartbeat, this routine does some heuristic methods to adjust
1416 * XRI distribution. The goal is to fully utilize free XRIs.
1418 static void lpfc_hb_mxp_handler(struct lpfc_hba
*phba
)
1423 hwq_count
= phba
->cfg_hdw_queue
;
1424 for (i
= 0; i
< hwq_count
; i
++) {
1425 /* Adjust XRIs in private pool */
1426 lpfc_adjust_pvt_pool_count(phba
, i
);
1428 /* Adjust high watermark */
1429 lpfc_adjust_high_watermark(phba
, i
);
1431 #ifdef LPFC_MXP_STAT
1432 /* Snapshot pbl, pvt and busy count */
1433 lpfc_snapshot_mxp(phba
, i
);
1439 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1440 * @phba: pointer to lpfc hba data structure.
1442 * If a HB mbox is not already in progrees, this routine will allocate
1443 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1444 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1447 lpfc_issue_hb_mbox(struct lpfc_hba
*phba
)
1449 LPFC_MBOXQ_t
*pmboxq
;
1452 /* Is a Heartbeat mbox already in progress */
1453 if (test_bit(HBA_HBEAT_INP
, &phba
->hba_flag
))
1456 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1460 lpfc_heart_beat(phba
, pmboxq
);
1461 pmboxq
->mbox_cmpl
= lpfc_hb_mbox_cmpl
;
1462 pmboxq
->vport
= phba
->pport
;
1463 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
1465 if (retval
!= MBX_BUSY
&& retval
!= MBX_SUCCESS
) {
1466 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1469 set_bit(HBA_HBEAT_INP
, &phba
->hba_flag
);
1475 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1476 * @phba: pointer to lpfc hba data structure.
1478 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1479 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1480 * of the value of lpfc_enable_hba_heartbeat.
1481 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1482 * try to issue a MBX_HEARTBEAT mbox command.
1485 lpfc_issue_hb_tmo(struct lpfc_hba
*phba
)
1487 if (phba
->cfg_enable_hba_heartbeat
)
1489 set_bit(HBA_HBEAT_TMO
, &phba
->hba_flag
);
1493 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1494 * @phba: pointer to lpfc hba data structure.
1496 * This is the actual HBA-timer timeout handler to be invoked by the worker
1497 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1498 * handler performs any periodic operations needed for the device. If such
1499 * periodic event has already been attended to either in the interrupt handler
1500 * or by processing slow-ring or fast-ring events within the HBA-timer
1501 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1502 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1503 * is configured and there is no heart-beat mailbox command outstanding, a
1504 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1505 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1509 lpfc_hb_timeout_handler(struct lpfc_hba
*phba
)
1511 struct lpfc_vport
**vports
;
1512 struct lpfc_dmabuf
*buf_ptr
;
1515 struct lpfc_sli
*psli
= &phba
->sli
;
1516 LIST_HEAD(completions
);
1518 if (phba
->cfg_xri_rebalancing
) {
1519 /* Multi-XRI pools handler */
1520 lpfc_hb_mxp_handler(phba
);
1523 vports
= lpfc_create_vport_work_array(phba
);
1525 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
1526 lpfc_rcv_seq_check_edtov(vports
[i
]);
1527 lpfc_fdmi_change_check(vports
[i
]);
1529 lpfc_destroy_vport_work_array(phba
, vports
);
1531 if (phba
->link_state
== LPFC_HBA_ERROR
||
1532 test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
) ||
1533 test_bit(FC_OFFLINE_MODE
, &phba
->pport
->fc_flag
))
1536 if (phba
->elsbuf_cnt
&&
1537 (phba
->elsbuf_cnt
== phba
->elsbuf_prev_cnt
)) {
1538 spin_lock_irq(&phba
->hbalock
);
1539 list_splice_init(&phba
->elsbuf
, &completions
);
1540 phba
->elsbuf_cnt
= 0;
1541 phba
->elsbuf_prev_cnt
= 0;
1542 spin_unlock_irq(&phba
->hbalock
);
1544 while (!list_empty(&completions
)) {
1545 list_remove_head(&completions
, buf_ptr
,
1546 struct lpfc_dmabuf
, list
);
1547 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
1551 phba
->elsbuf_prev_cnt
= phba
->elsbuf_cnt
;
1553 /* If there is no heart beat outstanding, issue a heartbeat command */
1554 if (phba
->cfg_enable_hba_heartbeat
) {
1555 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1556 spin_lock_irq(&phba
->pport
->work_port_lock
);
1557 if (time_after(phba
->last_completion_time
+
1558 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
),
1560 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1561 if (test_bit(HBA_HBEAT_INP
, &phba
->hba_flag
))
1562 tmo
= (1000 * LPFC_HB_MBOX_TIMEOUT
);
1564 tmo
= (1000 * LPFC_HB_MBOX_INTERVAL
);
1567 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1569 /* Check if a MBX_HEARTBEAT is already in progress */
1570 if (test_bit(HBA_HBEAT_INP
, &phba
->hba_flag
)) {
1572 * If heart beat timeout called with HBA_HBEAT_INP set
1573 * we need to give the hb mailbox cmd a chance to
1576 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1577 "0459 Adapter heartbeat still outstanding: "
1578 "last compl time was %d ms.\n",
1579 jiffies_to_msecs(jiffies
1580 - phba
->last_completion_time
));
1581 tmo
= (1000 * LPFC_HB_MBOX_TIMEOUT
);
1583 if ((!(psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
)) &&
1584 (list_empty(&psli
->mboxq
))) {
1586 retval
= lpfc_issue_hb_mbox(phba
);
1588 tmo
= (1000 * LPFC_HB_MBOX_INTERVAL
);
1591 phba
->skipped_hb
= 0;
1592 } else if (time_before_eq(phba
->last_completion_time
,
1593 phba
->skipped_hb
)) {
1594 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1595 "2857 Last completion time not "
1596 " updated in %d ms\n",
1597 jiffies_to_msecs(jiffies
1598 - phba
->last_completion_time
));
1600 phba
->skipped_hb
= jiffies
;
1602 tmo
= (1000 * LPFC_HB_MBOX_TIMEOUT
);
1606 /* Check to see if we want to force a MBX_HEARTBEAT */
1607 if (test_bit(HBA_HBEAT_TMO
, &phba
->hba_flag
)) {
1608 retval
= lpfc_issue_hb_mbox(phba
);
1610 tmo
= (1000 * LPFC_HB_MBOX_INTERVAL
);
1612 tmo
= (1000 * LPFC_HB_MBOX_TIMEOUT
);
1615 tmo
= (1000 * LPFC_HB_MBOX_INTERVAL
);
1618 mod_timer(&phba
->hb_tmofunc
, jiffies
+ msecs_to_jiffies(tmo
));
1622 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1623 * @phba: pointer to lpfc hba data structure.
1625 * This routine is called to bring the HBA offline when HBA hardware error
1626 * other than Port Error 6 has been detected.
1629 lpfc_offline_eratt(struct lpfc_hba
*phba
)
1631 struct lpfc_sli
*psli
= &phba
->sli
;
1633 spin_lock_irq(&phba
->hbalock
);
1634 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1635 spin_unlock_irq(&phba
->hbalock
);
1636 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1639 lpfc_reset_barrier(phba
);
1640 spin_lock_irq(&phba
->hbalock
);
1641 lpfc_sli_brdreset(phba
);
1642 spin_unlock_irq(&phba
->hbalock
);
1643 lpfc_hba_down_post(phba
);
1644 lpfc_sli_brdready(phba
, HS_MBRDY
);
1645 lpfc_unblock_mgmt_io(phba
);
1646 phba
->link_state
= LPFC_HBA_ERROR
;
1651 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1652 * @phba: pointer to lpfc hba data structure.
1654 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1655 * other than Port Error 6 has been detected.
1658 lpfc_sli4_offline_eratt(struct lpfc_hba
*phba
)
1660 spin_lock_irq(&phba
->hbalock
);
1661 if (phba
->link_state
== LPFC_HBA_ERROR
&&
1662 test_bit(HBA_PCI_ERR
, &phba
->bit_flags
)) {
1663 spin_unlock_irq(&phba
->hbalock
);
1666 phba
->link_state
= LPFC_HBA_ERROR
;
1667 spin_unlock_irq(&phba
->hbalock
);
1669 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1670 lpfc_sli_flush_io_rings(phba
);
1672 lpfc_hba_down_post(phba
);
1673 lpfc_unblock_mgmt_io(phba
);
1677 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1678 * @phba: pointer to lpfc hba data structure.
1680 * This routine is invoked to handle the deferred HBA hardware error
1681 * conditions. This type of error is indicated by HBA by setting ER1
1682 * and another ER bit in the host status register. The driver will
1683 * wait until the ER1 bit clears before handling the error condition.
1686 lpfc_handle_deferred_eratt(struct lpfc_hba
*phba
)
1688 uint32_t old_host_status
= phba
->work_hs
;
1689 struct lpfc_sli
*psli
= &phba
->sli
;
1691 /* If the pci channel is offline, ignore possible errors,
1692 * since we cannot communicate with the pci card anyway.
1694 if (pci_channel_offline(phba
->pcidev
)) {
1695 clear_bit(DEFER_ERATT
, &phba
->hba_flag
);
1699 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1700 "0479 Deferred Adapter Hardware Error "
1701 "Data: x%x x%x x%x\n",
1702 phba
->work_hs
, phba
->work_status
[0],
1703 phba
->work_status
[1]);
1705 spin_lock_irq(&phba
->hbalock
);
1706 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1707 spin_unlock_irq(&phba
->hbalock
);
1711 * Firmware stops when it triggred erratt. That could cause the I/Os
1712 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1713 * SCSI layer retry it after re-establishing link.
1715 lpfc_sli_abort_fcp_rings(phba
);
1718 * There was a firmware error. Take the hba offline and then
1719 * attempt to restart it.
1721 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
1724 /* Wait for the ER1 bit to clear.*/
1725 while (phba
->work_hs
& HS_FFER1
) {
1727 if (lpfc_readl(phba
->HSregaddr
, &phba
->work_hs
)) {
1728 phba
->work_hs
= UNPLUG_ERR
;
1731 /* If driver is unloading let the worker thread continue */
1732 if (test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
)) {
1739 * This is to ptrotect against a race condition in which
1740 * first write to the host attention register clear the
1741 * host status register.
1743 if (!phba
->work_hs
&& !test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
1744 phba
->work_hs
= old_host_status
& ~HS_FFER1
;
1746 clear_bit(DEFER_ERATT
, &phba
->hba_flag
);
1747 phba
->work_status
[0] = readl(phba
->MBslimaddr
+ 0xa8);
1748 phba
->work_status
[1] = readl(phba
->MBslimaddr
+ 0xac);
1752 lpfc_board_errevt_to_mgmt(struct lpfc_hba
*phba
)
1754 struct lpfc_board_event_header board_event
;
1755 struct Scsi_Host
*shost
;
1757 board_event
.event_type
= FC_REG_BOARD_EVENT
;
1758 board_event
.subcategory
= LPFC_EVENT_PORTINTERR
;
1759 shost
= lpfc_shost_from_vport(phba
->pport
);
1760 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1761 sizeof(board_event
),
1762 (char *) &board_event
,
1767 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1768 * @phba: pointer to lpfc hba data structure.
1770 * This routine is invoked to handle the following HBA hardware error
1772 * 1 - HBA error attention interrupt
1773 * 2 - DMA ring index out of range
1774 * 3 - Mailbox command came back as unknown
1777 lpfc_handle_eratt_s3(struct lpfc_hba
*phba
)
1779 struct lpfc_vport
*vport
= phba
->pport
;
1780 struct lpfc_sli
*psli
= &phba
->sli
;
1781 uint32_t event_data
;
1782 unsigned long temperature
;
1783 struct temp_event temp_event_data
;
1784 struct Scsi_Host
*shost
;
1786 /* If the pci channel is offline, ignore possible errors,
1787 * since we cannot communicate with the pci card anyway.
1789 if (pci_channel_offline(phba
->pcidev
)) {
1790 clear_bit(DEFER_ERATT
, &phba
->hba_flag
);
1794 /* If resets are disabled then leave the HBA alone and return */
1795 if (!phba
->cfg_enable_hba_reset
)
1798 /* Send an internal error event to mgmt application */
1799 lpfc_board_errevt_to_mgmt(phba
);
1801 if (test_bit(DEFER_ERATT
, &phba
->hba_flag
))
1802 lpfc_handle_deferred_eratt(phba
);
1804 if ((phba
->work_hs
& HS_FFER6
) || (phba
->work_hs
& HS_FFER8
)) {
1805 if (phba
->work_hs
& HS_FFER6
)
1806 /* Re-establishing Link */
1807 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1808 "1301 Re-establishing Link "
1809 "Data: x%x x%x x%x\n",
1810 phba
->work_hs
, phba
->work_status
[0],
1811 phba
->work_status
[1]);
1812 if (phba
->work_hs
& HS_FFER8
)
1813 /* Device Zeroization */
1814 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1815 "2861 Host Authentication device "
1816 "zeroization Data:x%x x%x x%x\n",
1817 phba
->work_hs
, phba
->work_status
[0],
1818 phba
->work_status
[1]);
1820 spin_lock_irq(&phba
->hbalock
);
1821 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1822 spin_unlock_irq(&phba
->hbalock
);
1825 * Firmware stops when it triggled erratt with HS_FFER6.
1826 * That could cause the I/Os dropped by the firmware.
1827 * Error iocb (I/O) on txcmplq and let the SCSI layer
1828 * retry it after re-establishing link.
1830 lpfc_sli_abort_fcp_rings(phba
);
1833 * There was a firmware error. Take the hba offline and then
1834 * attempt to restart it.
1836 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1838 lpfc_sli_brdrestart(phba
);
1839 if (lpfc_online(phba
) == 0) { /* Initialize the HBA */
1840 lpfc_unblock_mgmt_io(phba
);
1843 lpfc_unblock_mgmt_io(phba
);
1844 } else if (phba
->work_hs
& HS_CRIT_TEMP
) {
1845 temperature
= readl(phba
->MBslimaddr
+ TEMPERATURE_OFFSET
);
1846 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
1847 temp_event_data
.event_code
= LPFC_CRIT_TEMP
;
1848 temp_event_data
.data
= (uint32_t)temperature
;
1850 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1851 "0406 Adapter maximum temperature exceeded "
1852 "(%ld), taking this port offline "
1853 "Data: x%x x%x x%x\n",
1854 temperature
, phba
->work_hs
,
1855 phba
->work_status
[0], phba
->work_status
[1]);
1857 shost
= lpfc_shost_from_vport(phba
->pport
);
1858 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1859 sizeof(temp_event_data
),
1860 (char *) &temp_event_data
,
1861 SCSI_NL_VID_TYPE_PCI
1862 | PCI_VENDOR_ID_EMULEX
);
1864 spin_lock_irq(&phba
->hbalock
);
1865 phba
->over_temp_state
= HBA_OVER_TEMP
;
1866 spin_unlock_irq(&phba
->hbalock
);
1867 lpfc_offline_eratt(phba
);
1870 /* The if clause above forces this code path when the status
1871 * failure is a value other than FFER6. Do not call the offline
1872 * twice. This is the adapter hardware error path.
1874 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1875 "0457 Adapter Hardware Error "
1876 "Data: x%x x%x x%x\n",
1878 phba
->work_status
[0], phba
->work_status
[1]);
1880 event_data
= FC_REG_DUMP_EVENT
;
1881 shost
= lpfc_shost_from_vport(vport
);
1882 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1883 sizeof(event_data
), (char *) &event_data
,
1884 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
1886 lpfc_offline_eratt(phba
);
1892 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1893 * @phba: pointer to lpfc hba data structure.
1894 * @mbx_action: flag for mailbox shutdown action.
1895 * @en_rn_msg: send reset/port recovery message.
1896 * This routine is invoked to perform an SLI4 port PCI function reset in
1897 * response to port status register polling attention. It waits for port
1898 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1899 * During this process, interrupt vectors are freed and later requested
1900 * for handling possible port resource change.
1903 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba
*phba
, int mbx_action
,
1908 LPFC_MBOXQ_t
*mboxq
;
1910 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) >=
1911 LPFC_SLI_INTF_IF_TYPE_2
) {
1913 * On error status condition, driver need to wait for port
1914 * ready before performing reset.
1916 rc
= lpfc_sli4_pdev_status_reg_wait(phba
);
1921 /* need reset: attempt for port recovery */
1923 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
1924 "2887 Reset Needed: Attempting Port "
1927 /* If we are no wait, the HBA has been reset and is not
1928 * functional, thus we should clear
1929 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1931 if (mbx_action
== LPFC_MBX_NO_WAIT
) {
1932 spin_lock_irq(&phba
->hbalock
);
1933 phba
->sli
.sli_flag
&= ~LPFC_SLI_ACTIVE
;
1934 if (phba
->sli
.mbox_active
) {
1935 mboxq
= phba
->sli
.mbox_active
;
1936 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
1937 __lpfc_mbox_cmpl_put(phba
, mboxq
);
1938 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1939 phba
->sli
.mbox_active
= NULL
;
1941 spin_unlock_irq(&phba
->hbalock
);
1944 lpfc_offline_prep(phba
, mbx_action
);
1945 lpfc_sli_flush_io_rings(phba
);
1947 /* release interrupt for possible resource change */
1948 lpfc_sli4_disable_intr(phba
);
1949 rc
= lpfc_sli_brdrestart(phba
);
1951 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1952 "6309 Failed to restart board\n");
1955 /* request and enable interrupt */
1956 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
1957 if (intr_mode
== LPFC_INTR_ERROR
) {
1958 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1959 "3175 Failed to enable interrupt\n");
1962 phba
->intr_mode
= intr_mode
;
1963 rc
= lpfc_online(phba
);
1965 lpfc_unblock_mgmt_io(phba
);
1971 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1972 * @phba: pointer to lpfc hba data structure.
1974 * This routine is invoked to handle the SLI4 HBA hardware error attention
1978 lpfc_handle_eratt_s4(struct lpfc_hba
*phba
)
1980 struct lpfc_vport
*vport
= phba
->pport
;
1981 uint32_t event_data
;
1982 struct Scsi_Host
*shost
;
1984 struct lpfc_register portstat_reg
= {0};
1985 uint32_t reg_err1
, reg_err2
;
1986 uint32_t uerrlo_reg
, uemasklo_reg
;
1987 uint32_t smphr_port_status
= 0, pci_rd_rc1
, pci_rd_rc2
;
1988 bool en_rn_msg
= true;
1989 struct temp_event temp_event_data
;
1990 struct lpfc_register portsmphr_reg
;
1993 /* If the pci channel is offline, ignore possible errors, since
1994 * we cannot communicate with the pci card anyway.
1996 if (pci_channel_offline(phba
->pcidev
)) {
1997 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1998 "3166 pci channel is offline\n");
1999 lpfc_sli_flush_io_rings(phba
);
2003 memset(&portsmphr_reg
, 0, sizeof(portsmphr_reg
));
2004 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
2006 case LPFC_SLI_INTF_IF_TYPE_0
:
2007 pci_rd_rc1
= lpfc_readl(
2008 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
2010 pci_rd_rc2
= lpfc_readl(
2011 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
,
2013 /* consider PCI bus read error as pci_channel_offline */
2014 if (pci_rd_rc1
== -EIO
&& pci_rd_rc2
== -EIO
)
2016 if (!test_bit(HBA_RECOVERABLE_UE
, &phba
->hba_flag
)) {
2017 lpfc_sli4_offline_eratt(phba
);
2020 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2021 "7623 Checking UE recoverable");
2023 for (i
= 0; i
< phba
->sli4_hba
.ue_to_sr
/ 1000; i
++) {
2024 if (lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
2025 &portsmphr_reg
.word0
))
2028 smphr_port_status
= bf_get(lpfc_port_smphr_port_status
,
2030 if ((smphr_port_status
& LPFC_PORT_SEM_MASK
) ==
2031 LPFC_PORT_SEM_UE_RECOVERABLE
)
2033 /*Sleep for 1Sec, before checking SEMAPHORE */
2037 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2038 "4827 smphr_port_status x%x : Waited %dSec",
2039 smphr_port_status
, i
);
2041 /* Recoverable UE, reset the HBA device */
2042 if ((smphr_port_status
& LPFC_PORT_SEM_MASK
) ==
2043 LPFC_PORT_SEM_UE_RECOVERABLE
) {
2044 for (i
= 0; i
< 20; i
++) {
2046 if (!lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
2047 &portsmphr_reg
.word0
) &&
2048 (LPFC_POST_STAGE_PORT_READY
==
2049 bf_get(lpfc_port_smphr_port_status
,
2051 rc
= lpfc_sli4_port_sta_fn_reset(phba
,
2052 LPFC_MBX_NO_WAIT
, en_rn_msg
);
2055 lpfc_printf_log(phba
, KERN_ERR
,
2057 "4215 Failed to recover UE");
2062 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2063 "7624 Firmware not ready: Failing UE recovery,"
2064 " waited %dSec", i
);
2065 phba
->link_state
= LPFC_HBA_ERROR
;
2068 case LPFC_SLI_INTF_IF_TYPE_2
:
2069 case LPFC_SLI_INTF_IF_TYPE_6
:
2070 pci_rd_rc1
= lpfc_readl(
2071 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
2072 &portstat_reg
.word0
);
2073 /* consider PCI bus read error as pci_channel_offline */
2074 if (pci_rd_rc1
== -EIO
) {
2075 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2076 "3151 PCI bus read access failure: x%x\n",
2077 readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
));
2078 lpfc_sli4_offline_eratt(phba
);
2081 reg_err1
= readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
2082 reg_err2
= readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
2083 if (bf_get(lpfc_sliport_status_oti
, &portstat_reg
)) {
2084 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2085 "2889 Port Overtemperature event, "
2086 "taking port offline Data: x%x x%x\n",
2087 reg_err1
, reg_err2
);
2089 phba
->sfp_alarm
|= LPFC_TRANSGRESSION_HIGH_TEMPERATURE
;
2090 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
2091 temp_event_data
.event_code
= LPFC_CRIT_TEMP
;
2092 temp_event_data
.data
= 0xFFFFFFFF;
2094 shost
= lpfc_shost_from_vport(phba
->pport
);
2095 fc_host_post_vendor_event(shost
, fc_get_event_number(),
2096 sizeof(temp_event_data
),
2097 (char *)&temp_event_data
,
2098 SCSI_NL_VID_TYPE_PCI
2099 | PCI_VENDOR_ID_EMULEX
);
2101 spin_lock_irq(&phba
->hbalock
);
2102 phba
->over_temp_state
= HBA_OVER_TEMP
;
2103 spin_unlock_irq(&phba
->hbalock
);
2104 lpfc_sli4_offline_eratt(phba
);
2107 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2108 reg_err2
== SLIPORT_ERR2_REG_FW_RESTART
) {
2109 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2110 "3143 Port Down: Firmware Update "
2113 } else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2114 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
2115 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2116 "3144 Port Down: Debug Dump\n");
2117 else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2118 reg_err2
== SLIPORT_ERR2_REG_FUNC_PROVISON
)
2119 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2120 "3145 Port Down: Provisioning\n");
2122 /* If resets are disabled then leave the HBA alone and return */
2123 if (!phba
->cfg_enable_hba_reset
)
2126 /* Check port status register for function reset */
2127 rc
= lpfc_sli4_port_sta_fn_reset(phba
, LPFC_MBX_NO_WAIT
,
2130 /* don't report event on forced debug dump */
2131 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
2132 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
2137 /* fall through for not able to recover */
2138 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2139 "3152 Unrecoverable error\n");
2140 lpfc_sli4_offline_eratt(phba
);
2142 case LPFC_SLI_INTF_IF_TYPE_1
:
2146 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2147 "3123 Report dump event to upper layer\n");
2148 /* Send an internal error event to mgmt application */
2149 lpfc_board_errevt_to_mgmt(phba
);
2151 event_data
= FC_REG_DUMP_EVENT
;
2152 shost
= lpfc_shost_from_vport(vport
);
2153 fc_host_post_vendor_event(shost
, fc_get_event_number(),
2154 sizeof(event_data
), (char *) &event_data
,
2155 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
2159 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2160 * @phba: pointer to lpfc HBA data structure.
2162 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2163 * routine from the API jump table function pointer from the lpfc_hba struct.
2167 * Any other value - error.
2170 lpfc_handle_eratt(struct lpfc_hba
*phba
)
2172 (*phba
->lpfc_handle_eratt
)(phba
);
2176 * lpfc_handle_latt - The HBA link event handler
2177 * @phba: pointer to lpfc hba data structure.
2179 * This routine is invoked from the worker thread to handle a HBA host
2180 * attention link event. SLI3 only.
2183 lpfc_handle_latt(struct lpfc_hba
*phba
)
2185 struct lpfc_vport
*vport
= phba
->pport
;
2186 struct lpfc_sli
*psli
= &phba
->sli
;
2188 volatile uint32_t control
;
2191 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2194 goto lpfc_handle_latt_err_exit
;
2197 rc
= lpfc_mbox_rsrc_prep(phba
, pmb
);
2200 mempool_free(pmb
, phba
->mbox_mem_pool
);
2201 goto lpfc_handle_latt_err_exit
;
2204 /* Cleanup any outstanding ELS commands */
2205 lpfc_els_flush_all_cmd(phba
);
2206 psli
->slistat
.link_event
++;
2207 lpfc_read_topology(phba
, pmb
, pmb
->ctx_buf
);
2208 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
2210 /* Block ELS IOCBs until we have processed this mbox command */
2211 phba
->sli
.sli3_ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
2212 rc
= lpfc_sli_issue_mbox (phba
, pmb
, MBX_NOWAIT
);
2213 if (rc
== MBX_NOT_FINISHED
) {
2215 goto lpfc_handle_latt_free_mbuf
;
2218 /* Clear Link Attention in HA REG */
2219 spin_lock_irq(&phba
->hbalock
);
2220 writel(HA_LATT
, phba
->HAregaddr
);
2221 readl(phba
->HAregaddr
); /* flush */
2222 spin_unlock_irq(&phba
->hbalock
);
2226 lpfc_handle_latt_free_mbuf
:
2227 phba
->sli
.sli3_ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2228 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
2229 lpfc_handle_latt_err_exit
:
2230 /* Enable Link attention interrupts */
2231 spin_lock_irq(&phba
->hbalock
);
2232 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2233 control
= readl(phba
->HCregaddr
);
2234 control
|= HC_LAINT_ENA
;
2235 writel(control
, phba
->HCregaddr
);
2236 readl(phba
->HCregaddr
); /* flush */
2238 /* Clear Link Attention in HA REG */
2239 writel(HA_LATT
, phba
->HAregaddr
);
2240 readl(phba
->HAregaddr
); /* flush */
2241 spin_unlock_irq(&phba
->hbalock
);
2242 lpfc_linkdown(phba
);
2243 phba
->link_state
= LPFC_HBA_ERROR
;
2245 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2246 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc
);
2252 lpfc_fill_vpd(struct lpfc_hba
*phba
, uint8_t *vpd
, int length
, int *pindex
)
2256 while (length
> 0) {
2257 /* Look for Serial Number */
2258 if ((vpd
[*pindex
] == 'S') && (vpd
[*pindex
+ 1] == 'N')) {
2265 phba
->SerialNumber
[j
++] = vpd
[(*pindex
)++];
2269 phba
->SerialNumber
[j
] = 0;
2271 } else if ((vpd
[*pindex
] == 'V') && (vpd
[*pindex
+ 1] == '1')) {
2272 phba
->vpd_flag
|= VPD_MODEL_DESC
;
2279 phba
->ModelDesc
[j
++] = vpd
[(*pindex
)++];
2283 phba
->ModelDesc
[j
] = 0;
2285 } else if ((vpd
[*pindex
] == 'V') && (vpd
[*pindex
+ 1] == '2')) {
2286 phba
->vpd_flag
|= VPD_MODEL_NAME
;
2293 phba
->ModelName
[j
++] = vpd
[(*pindex
)++];
2297 phba
->ModelName
[j
] = 0;
2299 } else if ((vpd
[*pindex
] == 'V') && (vpd
[*pindex
+ 1] == '3')) {
2300 phba
->vpd_flag
|= VPD_PROGRAM_TYPE
;
2307 phba
->ProgramType
[j
++] = vpd
[(*pindex
)++];
2311 phba
->ProgramType
[j
] = 0;
2313 } else if ((vpd
[*pindex
] == 'V') && (vpd
[*pindex
+ 1] == '4')) {
2314 phba
->vpd_flag
|= VPD_PORT
;
2321 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2322 (phba
->sli4_hba
.pport_name_sta
==
2323 LPFC_SLI4_PPNAME_GET
)) {
2327 phba
->Port
[j
++] = vpd
[(*pindex
)++];
2331 if ((phba
->sli_rev
!= LPFC_SLI_REV4
) ||
2332 (phba
->sli4_hba
.pport_name_sta
==
2333 LPFC_SLI4_PPNAME_NON
))
2347 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2348 * @phba: pointer to lpfc hba data structure.
2349 * @vpd: pointer to the vital product data.
2350 * @len: length of the vital product data in bytes.
2352 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2353 * an array of characters. In this routine, the ModelName, ProgramType, and
2354 * ModelDesc, etc. fields of the phba data structure will be populated.
2357 * 0 - pointer to the VPD passed in is NULL
2361 lpfc_parse_vpd(struct lpfc_hba
*phba
, uint8_t *vpd
, int len
)
2363 uint8_t lenlo
, lenhi
;
2373 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
2374 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2375 (uint32_t) vpd
[0], (uint32_t) vpd
[1], (uint32_t) vpd
[2],
2377 while (!finished
&& (index
< (len
- 4))) {
2378 switch (vpd
[index
]) {
2386 i
= ((((unsigned short)lenhi
) << 8) + lenlo
);
2395 Length
= ((((unsigned short)lenhi
) << 8) + lenlo
);
2396 if (Length
> len
- index
)
2397 Length
= len
- index
;
2399 lpfc_fill_vpd(phba
, vpd
, Length
, &index
);
2415 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2416 * @phba: pointer to lpfc hba data structure.
2417 * @mdp: pointer to the data structure to hold the derived model name.
2418 * @descp: pointer to the data structure to hold the derived description.
2420 * This routine retrieves HBA's description based on its registered PCI device
2421 * ID. The @descp passed into this function points to an array of 256 chars. It
2422 * shall be returned with the model name, maximum speed, and the host bus type.
2423 * The @mdp passed into this function points to an array of 80 chars. When the
2424 * function returns, the @mdp will be filled with the model name.
2427 lpfc_get_atto_model_desc(struct lpfc_hba
*phba
, uint8_t *mdp
, uint8_t *descp
)
2429 uint16_t sub_dev_id
= phba
->pcidev
->subsystem_device
;
2430 char *model
= "<Unknown>";
2433 switch (sub_dev_id
) {
2434 case PCI_DEVICE_ID_CLRY_161E
:
2437 case PCI_DEVICE_ID_CLRY_162E
:
2440 case PCI_DEVICE_ID_CLRY_164E
:
2443 case PCI_DEVICE_ID_CLRY_161P
:
2446 case PCI_DEVICE_ID_CLRY_162P
:
2449 case PCI_DEVICE_ID_CLRY_164P
:
2452 case PCI_DEVICE_ID_CLRY_321E
:
2455 case PCI_DEVICE_ID_CLRY_322E
:
2458 case PCI_DEVICE_ID_CLRY_324E
:
2461 case PCI_DEVICE_ID_CLRY_321P
:
2464 case PCI_DEVICE_ID_CLRY_322P
:
2467 case PCI_DEVICE_ID_CLRY_324P
:
2470 case PCI_DEVICE_ID_TLFC_2XX2
:
2474 case PCI_DEVICE_ID_TLFC_3162
:
2478 case PCI_DEVICE_ID_TLFC_3322
:
2487 if (mdp
&& mdp
[0] == '\0')
2488 snprintf(mdp
, 79, "%s", model
);
2490 if (descp
&& descp
[0] == '\0')
2491 snprintf(descp
, 255,
2492 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2493 (tbolt
) ? "ThunderLink FC " : "Celerity FC-",
2499 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2500 * @phba: pointer to lpfc hba data structure.
2501 * @mdp: pointer to the data structure to hold the derived model name.
2502 * @descp: pointer to the data structure to hold the derived description.
2504 * This routine retrieves HBA's description based on its registered PCI device
2505 * ID. The @descp passed into this function points to an array of 256 chars. It
2506 * shall be returned with the model name, maximum speed, and the host bus type.
2507 * The @mdp passed into this function points to an array of 80 chars. When the
2508 * function returns, the @mdp will be filled with the model name.
2511 lpfc_get_hba_model_desc(struct lpfc_hba
*phba
, uint8_t *mdp
, uint8_t *descp
)
2514 uint16_t dev_id
= phba
->pcidev
->device
;
2517 int oneConnect
= 0; /* default is not a oneConnect */
2522 } m
= {"<Unknown>", "", ""};
2524 if (mdp
&& mdp
[0] != '\0'
2525 && descp
&& descp
[0] != '\0')
2528 if (phba
->pcidev
->vendor
== PCI_VENDOR_ID_ATTO
) {
2529 lpfc_get_atto_model_desc(phba
, mdp
, descp
);
2533 if (phba
->lmt
& LMT_64Gb
)
2535 else if (phba
->lmt
& LMT_32Gb
)
2537 else if (phba
->lmt
& LMT_16Gb
)
2539 else if (phba
->lmt
& LMT_10Gb
)
2541 else if (phba
->lmt
& LMT_8Gb
)
2543 else if (phba
->lmt
& LMT_4Gb
)
2545 else if (phba
->lmt
& LMT_2Gb
)
2547 else if (phba
->lmt
& LMT_1Gb
)
2555 case PCI_DEVICE_ID_FIREFLY
:
2556 m
= (typeof(m
)){"LP6000", "PCI",
2557 "Obsolete, Unsupported Fibre Channel Adapter"};
2559 case PCI_DEVICE_ID_SUPERFLY
:
2560 if (vp
->rev
.biuRev
>= 1 && vp
->rev
.biuRev
<= 3)
2561 m
= (typeof(m
)){"LP7000", "PCI", ""};
2563 m
= (typeof(m
)){"LP7000E", "PCI", ""};
2564 m
.function
= "Obsolete, Unsupported Fibre Channel Adapter";
2566 case PCI_DEVICE_ID_DRAGONFLY
:
2567 m
= (typeof(m
)){"LP8000", "PCI",
2568 "Obsolete, Unsupported Fibre Channel Adapter"};
2570 case PCI_DEVICE_ID_CENTAUR
:
2571 if (FC_JEDEC_ID(vp
->rev
.biuRev
) == CENTAUR_2G_JEDEC_ID
)
2572 m
= (typeof(m
)){"LP9002", "PCI", ""};
2574 m
= (typeof(m
)){"LP9000", "PCI", ""};
2575 m
.function
= "Obsolete, Unsupported Fibre Channel Adapter";
2577 case PCI_DEVICE_ID_RFLY
:
2578 m
= (typeof(m
)){"LP952", "PCI",
2579 "Obsolete, Unsupported Fibre Channel Adapter"};
2581 case PCI_DEVICE_ID_PEGASUS
:
2582 m
= (typeof(m
)){"LP9802", "PCI-X",
2583 "Obsolete, Unsupported Fibre Channel Adapter"};
2585 case PCI_DEVICE_ID_THOR
:
2586 m
= (typeof(m
)){"LP10000", "PCI-X",
2587 "Obsolete, Unsupported Fibre Channel Adapter"};
2589 case PCI_DEVICE_ID_VIPER
:
2590 m
= (typeof(m
)){"LPX1000", "PCI-X",
2591 "Obsolete, Unsupported Fibre Channel Adapter"};
2593 case PCI_DEVICE_ID_PFLY
:
2594 m
= (typeof(m
)){"LP982", "PCI-X",
2595 "Obsolete, Unsupported Fibre Channel Adapter"};
2597 case PCI_DEVICE_ID_TFLY
:
2598 m
= (typeof(m
)){"LP1050", "PCI-X",
2599 "Obsolete, Unsupported Fibre Channel Adapter"};
2601 case PCI_DEVICE_ID_HELIOS
:
2602 m
= (typeof(m
)){"LP11000", "PCI-X2",
2603 "Obsolete, Unsupported Fibre Channel Adapter"};
2605 case PCI_DEVICE_ID_HELIOS_SCSP
:
2606 m
= (typeof(m
)){"LP11000-SP", "PCI-X2",
2607 "Obsolete, Unsupported Fibre Channel Adapter"};
2609 case PCI_DEVICE_ID_HELIOS_DCSP
:
2610 m
= (typeof(m
)){"LP11002-SP", "PCI-X2",
2611 "Obsolete, Unsupported Fibre Channel Adapter"};
2613 case PCI_DEVICE_ID_NEPTUNE
:
2614 m
= (typeof(m
)){"LPe1000", "PCIe",
2615 "Obsolete, Unsupported Fibre Channel Adapter"};
2617 case PCI_DEVICE_ID_NEPTUNE_SCSP
:
2618 m
= (typeof(m
)){"LPe1000-SP", "PCIe",
2619 "Obsolete, Unsupported Fibre Channel Adapter"};
2621 case PCI_DEVICE_ID_NEPTUNE_DCSP
:
2622 m
= (typeof(m
)){"LPe1002-SP", "PCIe",
2623 "Obsolete, Unsupported Fibre Channel Adapter"};
2625 case PCI_DEVICE_ID_BMID
:
2626 m
= (typeof(m
)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2628 case PCI_DEVICE_ID_BSMB
:
2629 m
= (typeof(m
)){"LP111", "PCI-X2",
2630 "Obsolete, Unsupported Fibre Channel Adapter"};
2632 case PCI_DEVICE_ID_ZEPHYR
:
2633 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2635 case PCI_DEVICE_ID_ZEPHYR_SCSP
:
2636 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2638 case PCI_DEVICE_ID_ZEPHYR_DCSP
:
2639 m
= (typeof(m
)){"LP2105", "PCIe", "FCoE Adapter"};
2642 case PCI_DEVICE_ID_ZMID
:
2643 m
= (typeof(m
)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2645 case PCI_DEVICE_ID_ZSMB
:
2646 m
= (typeof(m
)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2648 case PCI_DEVICE_ID_LP101
:
2649 m
= (typeof(m
)){"LP101", "PCI-X",
2650 "Obsolete, Unsupported Fibre Channel Adapter"};
2652 case PCI_DEVICE_ID_LP10000S
:
2653 m
= (typeof(m
)){"LP10000-S", "PCI",
2654 "Obsolete, Unsupported Fibre Channel Adapter"};
2656 case PCI_DEVICE_ID_LP11000S
:
2657 m
= (typeof(m
)){"LP11000-S", "PCI-X2",
2658 "Obsolete, Unsupported Fibre Channel Adapter"};
2660 case PCI_DEVICE_ID_LPE11000S
:
2661 m
= (typeof(m
)){"LPe11000-S", "PCIe",
2662 "Obsolete, Unsupported Fibre Channel Adapter"};
2664 case PCI_DEVICE_ID_SAT
:
2665 m
= (typeof(m
)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2667 case PCI_DEVICE_ID_SAT_MID
:
2668 m
= (typeof(m
)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2670 case PCI_DEVICE_ID_SAT_SMB
:
2671 m
= (typeof(m
)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2673 case PCI_DEVICE_ID_SAT_DCSP
:
2674 m
= (typeof(m
)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2676 case PCI_DEVICE_ID_SAT_SCSP
:
2677 m
= (typeof(m
)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2679 case PCI_DEVICE_ID_SAT_S
:
2680 m
= (typeof(m
)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2682 case PCI_DEVICE_ID_PROTEUS_VF
:
2683 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2684 "Obsolete, Unsupported Fibre Channel Adapter"};
2686 case PCI_DEVICE_ID_PROTEUS_PF
:
2687 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2688 "Obsolete, Unsupported Fibre Channel Adapter"};
2690 case PCI_DEVICE_ID_PROTEUS_S
:
2691 m
= (typeof(m
)){"LPemv12002-S", "PCIe IOV",
2692 "Obsolete, Unsupported Fibre Channel Adapter"};
2694 case PCI_DEVICE_ID_TIGERSHARK
:
2696 m
= (typeof(m
)){"OCe10100", "PCIe", "FCoE"};
2698 case PCI_DEVICE_ID_TOMCAT
:
2700 m
= (typeof(m
)){"OCe11100", "PCIe", "FCoE"};
2702 case PCI_DEVICE_ID_FALCON
:
2703 m
= (typeof(m
)){"LPSe12002-ML1-E", "PCIe",
2704 "EmulexSecure Fibre"};
2706 case PCI_DEVICE_ID_BALIUS
:
2707 m
= (typeof(m
)){"LPVe12002", "PCIe Shared I/O",
2708 "Obsolete, Unsupported Fibre Channel Adapter"};
2710 case PCI_DEVICE_ID_LANCER_FC
:
2711 m
= (typeof(m
)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2713 case PCI_DEVICE_ID_LANCER_FC_VF
:
2714 m
= (typeof(m
)){"LPe16000", "PCIe",
2715 "Obsolete, Unsupported Fibre Channel Adapter"};
2717 case PCI_DEVICE_ID_LANCER_FCOE
:
2719 m
= (typeof(m
)){"OCe15100", "PCIe", "FCoE"};
2721 case PCI_DEVICE_ID_LANCER_FCOE_VF
:
2723 m
= (typeof(m
)){"OCe15100", "PCIe",
2724 "Obsolete, Unsupported FCoE"};
2726 case PCI_DEVICE_ID_LANCER_G6_FC
:
2727 m
= (typeof(m
)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2729 case PCI_DEVICE_ID_LANCER_G7_FC
:
2730 m
= (typeof(m
)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2732 case PCI_DEVICE_ID_LANCER_G7P_FC
:
2733 m
= (typeof(m
)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2735 case PCI_DEVICE_ID_SKYHAWK
:
2736 case PCI_DEVICE_ID_SKYHAWK_VF
:
2738 m
= (typeof(m
)){"OCe14000", "PCIe", "FCoE"};
2741 m
= (typeof(m
)){"Unknown", "", ""};
2745 if (mdp
&& mdp
[0] == '\0')
2746 snprintf(mdp
, 79,"%s", m
.name
);
2748 * oneConnect hba requires special processing, they are all initiators
2749 * and we put the port number on the end
2751 if (descp
&& descp
[0] == '\0') {
2753 snprintf(descp
, 255,
2754 "Emulex OneConnect %s, %s Initiator %s",
2757 else if (max_speed
== 0)
2758 snprintf(descp
, 255,
2760 m
.name
, m
.bus
, m
.function
);
2762 snprintf(descp
, 255,
2763 "Emulex %s %d%s %s %s",
2764 m
.name
, max_speed
, (GE
) ? "GE" : "Gb",
2770 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2771 * @phba: pointer to lpfc hba data structure.
2772 * @pring: pointer to a IOCB ring.
2773 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2775 * This routine posts a given number of IOCBs with the associated DMA buffer
2776 * descriptors specified by the cnt argument to the given IOCB ring.
2779 * The number of IOCBs NOT able to be posted to the IOCB ring.
2782 lpfc_sli3_post_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
, int cnt
)
2785 struct lpfc_iocbq
*iocb
;
2786 struct lpfc_dmabuf
*mp1
, *mp2
;
2788 cnt
+= pring
->missbufcnt
;
2790 /* While there are buffers to post */
2792 /* Allocate buffer for command iocb */
2793 iocb
= lpfc_sli_get_iocbq(phba
);
2795 pring
->missbufcnt
= cnt
;
2800 /* 2 buffers can be posted per command */
2801 /* Allocate buffer to post */
2802 mp1
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2804 mp1
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &mp1
->phys
);
2805 if (!mp1
|| !mp1
->virt
) {
2807 lpfc_sli_release_iocbq(phba
, iocb
);
2808 pring
->missbufcnt
= cnt
;
2812 INIT_LIST_HEAD(&mp1
->list
);
2813 /* Allocate buffer to post */
2815 mp2
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2817 mp2
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
2819 if (!mp2
|| !mp2
->virt
) {
2821 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2823 lpfc_sli_release_iocbq(phba
, iocb
);
2824 pring
->missbufcnt
= cnt
;
2828 INIT_LIST_HEAD(&mp2
->list
);
2833 icmd
->un
.cont64
[0].addrHigh
= putPaddrHigh(mp1
->phys
);
2834 icmd
->un
.cont64
[0].addrLow
= putPaddrLow(mp1
->phys
);
2835 icmd
->un
.cont64
[0].tus
.f
.bdeSize
= FCELSSIZE
;
2836 icmd
->ulpBdeCount
= 1;
2839 icmd
->un
.cont64
[1].addrHigh
= putPaddrHigh(mp2
->phys
);
2840 icmd
->un
.cont64
[1].addrLow
= putPaddrLow(mp2
->phys
);
2841 icmd
->un
.cont64
[1].tus
.f
.bdeSize
= FCELSSIZE
;
2843 icmd
->ulpBdeCount
= 2;
2846 icmd
->ulpCommand
= CMD_QUE_RING_BUF64_CN
;
2849 if (lpfc_sli_issue_iocb(phba
, pring
->ringno
, iocb
, 0) ==
2851 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2855 lpfc_mbuf_free(phba
, mp2
->virt
, mp2
->phys
);
2859 lpfc_sli_release_iocbq(phba
, iocb
);
2860 pring
->missbufcnt
= cnt
;
2863 lpfc_sli_ringpostbuf_put(phba
, pring
, mp1
);
2865 lpfc_sli_ringpostbuf_put(phba
, pring
, mp2
);
2867 pring
->missbufcnt
= 0;
2872 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2873 * @phba: pointer to lpfc hba data structure.
2875 * This routine posts initial receive IOCB buffers to the ELS ring. The
2876 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2877 * set to 64 IOCBs. SLI3 only.
2880 * 0 - success (currently always success)
2883 lpfc_post_rcv_buf(struct lpfc_hba
*phba
)
2885 struct lpfc_sli
*psli
= &phba
->sli
;
2887 /* Ring 0, ELS / CT buffers */
2888 lpfc_sli3_post_buffer(phba
, &psli
->sli3_ring
[LPFC_ELS_RING
], LPFC_BUF_RING0
);
2889 /* Ring 2 - FCP no buffers needed */
2894 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2897 * lpfc_sha_init - Set up initial array of hash table entries
2898 * @HashResultPointer: pointer to an array as hash table.
2900 * This routine sets up the initial values to the array of hash table entries
2904 lpfc_sha_init(uint32_t * HashResultPointer
)
2906 HashResultPointer
[0] = 0x67452301;
2907 HashResultPointer
[1] = 0xEFCDAB89;
2908 HashResultPointer
[2] = 0x98BADCFE;
2909 HashResultPointer
[3] = 0x10325476;
2910 HashResultPointer
[4] = 0xC3D2E1F0;
2914 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2915 * @HashResultPointer: pointer to an initial/result hash table.
2916 * @HashWorkingPointer: pointer to an working hash table.
2918 * This routine iterates an initial hash table pointed by @HashResultPointer
2919 * with the values from the working hash table pointeed by @HashWorkingPointer.
2920 * The results are putting back to the initial hash table, returned through
2921 * the @HashResultPointer as the result hash table.
2924 lpfc_sha_iterate(uint32_t * HashResultPointer
, uint32_t * HashWorkingPointer
)
2928 uint32_t A
, B
, C
, D
, E
;
2931 HashWorkingPointer
[t
] =
2933 HashWorkingPointer
[t
- 3] ^ HashWorkingPointer
[t
-
2935 HashWorkingPointer
[t
- 14] ^ HashWorkingPointer
[t
- 16]);
2936 } while (++t
<= 79);
2938 A
= HashResultPointer
[0];
2939 B
= HashResultPointer
[1];
2940 C
= HashResultPointer
[2];
2941 D
= HashResultPointer
[3];
2942 E
= HashResultPointer
[4];
2946 TEMP
= ((B
& C
) | ((~B
) & D
)) + 0x5A827999;
2947 } else if (t
< 40) {
2948 TEMP
= (B
^ C
^ D
) + 0x6ED9EBA1;
2949 } else if (t
< 60) {
2950 TEMP
= ((B
& C
) | (B
& D
) | (C
& D
)) + 0x8F1BBCDC;
2952 TEMP
= (B
^ C
^ D
) + 0xCA62C1D6;
2954 TEMP
+= S(5, A
) + E
+ HashWorkingPointer
[t
];
2960 } while (++t
<= 79);
2962 HashResultPointer
[0] += A
;
2963 HashResultPointer
[1] += B
;
2964 HashResultPointer
[2] += C
;
2965 HashResultPointer
[3] += D
;
2966 HashResultPointer
[4] += E
;
2971 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2972 * @RandomChallenge: pointer to the entry of host challenge random number array.
2973 * @HashWorking: pointer to the entry of the working hash array.
2975 * This routine calculates the working hash array referred by @HashWorking
2976 * from the challenge random numbers associated with the host, referred by
2977 * @RandomChallenge. The result is put into the entry of the working hash
2978 * array and returned by reference through @HashWorking.
2981 lpfc_challenge_key(uint32_t * RandomChallenge
, uint32_t * HashWorking
)
2983 *HashWorking
= (*RandomChallenge
^ *HashWorking
);
2987 * lpfc_hba_init - Perform special handling for LC HBA initialization
2988 * @phba: pointer to lpfc hba data structure.
2989 * @hbainit: pointer to an array of unsigned 32-bit integers.
2991 * This routine performs the special handling for LC HBA initialization.
2994 lpfc_hba_init(struct lpfc_hba
*phba
, uint32_t *hbainit
)
2997 uint32_t *HashWorking
;
2998 uint32_t *pwwnn
= (uint32_t *) phba
->wwnn
;
3000 HashWorking
= kcalloc(80, sizeof(uint32_t), GFP_KERNEL
);
3004 HashWorking
[0] = HashWorking
[78] = *pwwnn
++;
3005 HashWorking
[1] = HashWorking
[79] = *pwwnn
;
3007 for (t
= 0; t
< 7; t
++)
3008 lpfc_challenge_key(phba
->RandomData
+ t
, HashWorking
+ t
);
3010 lpfc_sha_init(hbainit
);
3011 lpfc_sha_iterate(hbainit
, HashWorking
);
3016 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3017 * @vport: pointer to a virtual N_Port data structure.
3019 * This routine performs the necessary cleanups before deleting the @vport.
3020 * It invokes the discovery state machine to perform necessary state
3021 * transitions and to release the ndlps associated with the @vport. Note,
3022 * the physical port is treated as @vport 0.
3025 lpfc_cleanup(struct lpfc_vport
*vport
)
3027 struct lpfc_hba
*phba
= vport
->phba
;
3028 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3031 if (phba
->link_state
> LPFC_LINK_DOWN
)
3032 lpfc_port_link_failure(vport
);
3034 /* Clean up VMID resources */
3035 if (lpfc_is_vmid_enabled(phba
))
3036 lpfc_vmid_vport_cleanup(vport
);
3038 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
3039 if (vport
->port_type
!= LPFC_PHYSICAL_PORT
&&
3040 ndlp
->nlp_DID
== Fabric_DID
) {
3041 /* Just free up ndlp with Fabric_DID for vports */
3046 if (ndlp
->nlp_DID
== Fabric_Cntl_DID
&&
3047 ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
3052 /* Fabric Ports not in UNMAPPED state are cleaned up in the
3055 if (ndlp
->nlp_type
& NLP_FABRIC
&&
3056 ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
)
3057 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
3058 NLP_EVT_DEVICE_RECOVERY
);
3060 if (!(ndlp
->fc4_xpt_flags
& (NVME_XPT_REGD
|SCSI_XPT_REGD
)))
3061 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
3065 /* This is a special case flush to return all
3066 * IOs before entering this loop. There are
3067 * two points in the code where a flush is
3068 * avoided if the FC_UNLOADING flag is set.
3069 * one is in the multipool destroy,
3070 * (this prevents a crash) and the other is
3071 * in the nvme abort handler, ( also prevents
3072 * a crash). Both of these exceptions are
3073 * cases where the slot is still accessible.
3074 * The flush here is only when the pci slot
3077 if (test_bit(FC_UNLOADING
, &vport
->load_flag
) &&
3078 pci_channel_offline(phba
->pcidev
))
3079 lpfc_sli_flush_io_rings(vport
->phba
);
3081 /* At this point, ALL ndlp's should be gone
3082 * because of the previous NLP_EVT_DEVICE_RM.
3083 * Lets wait for this to happen, if needed.
3085 while (!list_empty(&vport
->fc_nodes
)) {
3087 lpfc_printf_vlog(vport
, KERN_ERR
,
3089 "0233 Nodelist not empty\n");
3090 list_for_each_entry_safe(ndlp
, next_ndlp
,
3091 &vport
->fc_nodes
, nlp_listp
) {
3092 lpfc_printf_vlog(ndlp
->vport
, KERN_ERR
,
3094 "0282 did:x%x ndlp:x%px "
3095 "refcnt:%d xflags x%x nflag x%x\n",
3096 ndlp
->nlp_DID
, (void *)ndlp
,
3097 kref_read(&ndlp
->kref
),
3098 ndlp
->fc4_xpt_flags
,
3104 /* Wait for any activity on ndlps to settle */
3107 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3111 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3112 * @vport: pointer to a virtual N_Port data structure.
3114 * This routine stops all the timers associated with a @vport. This function
3115 * is invoked before disabling or deleting a @vport. Note that the physical
3116 * port is treated as @vport 0.
3119 lpfc_stop_vport_timers(struct lpfc_vport
*vport
)
3121 del_timer_sync(&vport
->els_tmofunc
);
3122 del_timer_sync(&vport
->delayed_disc_tmo
);
3123 lpfc_can_disctmo(vport
);
3128 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3129 * @phba: pointer to lpfc hba data structure.
3131 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3132 * caller of this routine should already hold the host lock.
3135 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
3137 /* Clear pending FCF rediscovery wait flag */
3138 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
3140 /* Now, try to stop the timer */
3141 del_timer(&phba
->fcf
.redisc_wait
);
3145 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3146 * @phba: pointer to lpfc hba data structure.
3148 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3149 * checks whether the FCF rediscovery wait timer is pending with the host
3150 * lock held before proceeding with disabling the timer and clearing the
3151 * wait timer pendig flag.
3154 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
3156 spin_lock_irq(&phba
->hbalock
);
3157 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
3158 /* FCF rediscovery timer already fired or stopped */
3159 spin_unlock_irq(&phba
->hbalock
);
3162 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
3163 /* Clear failover in progress flags */
3164 phba
->fcf
.fcf_flag
&= ~(FCF_DEAD_DISC
| FCF_ACVL_DISC
);
3165 spin_unlock_irq(&phba
->hbalock
);
3169 * lpfc_cmf_stop - Stop CMF processing
3170 * @phba: pointer to lpfc hba data structure.
3172 * This is called when the link goes down or if CMF mode is turned OFF.
3173 * It is also called when going offline or unloaded just before the
3174 * congestion info buffer is unregistered.
3177 lpfc_cmf_stop(struct lpfc_hba
*phba
)
3180 struct lpfc_cgn_stat
*cgs
;
3182 /* We only do something if CMF is enabled */
3183 if (!phba
->sli4_hba
.pc_sli4_params
.cmf
)
3186 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
3187 "6221 Stop CMF / Cancel Timer\n");
3189 /* Cancel the CMF timer */
3190 hrtimer_cancel(&phba
->cmf_stats_timer
);
3191 hrtimer_cancel(&phba
->cmf_timer
);
3193 /* Zero CMF counters */
3194 atomic_set(&phba
->cmf_busy
, 0);
3195 for_each_present_cpu(cpu
) {
3196 cgs
= per_cpu_ptr(phba
->cmf_stat
, cpu
);
3197 atomic64_set(&cgs
->total_bytes
, 0);
3198 atomic64_set(&cgs
->rcv_bytes
, 0);
3199 atomic_set(&cgs
->rx_io_cnt
, 0);
3200 atomic64_set(&cgs
->rx_latency
, 0);
3202 atomic_set(&phba
->cmf_bw_wait
, 0);
3204 /* Resume any blocked IO - Queue unblock on workqueue */
3205 queue_work(phba
->wq
, &phba
->unblock_request_work
);
3208 static inline uint64_t
3209 lpfc_get_max_line_rate(struct lpfc_hba
*phba
)
3211 uint64_t rate
= lpfc_sli_port_speed_get(phba
);
3213 return ((((unsigned long)rate
) * 1024 * 1024) / 10);
3217 lpfc_cmf_signal_init(struct lpfc_hba
*phba
)
3219 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
3220 "6223 Signal CMF init\n");
3222 /* Use the new fc_linkspeed to recalculate */
3223 phba
->cmf_interval_rate
= LPFC_CMF_INTERVAL
;
3224 phba
->cmf_max_line_rate
= lpfc_get_max_line_rate(phba
);
3225 phba
->cmf_link_byte_count
= div_u64(phba
->cmf_max_line_rate
*
3226 phba
->cmf_interval_rate
, 1000);
3227 phba
->cmf_max_bytes_per_interval
= phba
->cmf_link_byte_count
;
3229 /* This is a signal to firmware to sync up CMF BW with link speed */
3230 lpfc_issue_cmf_sync_wqe(phba
, 0, 0);
3234 * lpfc_cmf_start - Start CMF processing
3235 * @phba: pointer to lpfc hba data structure.
3237 * This is called when the link comes up or if CMF mode is turned OFF
3238 * to Monitor or Managed.
3241 lpfc_cmf_start(struct lpfc_hba
*phba
)
3243 struct lpfc_cgn_stat
*cgs
;
3246 /* We only do something if CMF is enabled */
3247 if (!phba
->sli4_hba
.pc_sli4_params
.cmf
||
3248 phba
->cmf_active_mode
== LPFC_CFG_OFF
)
3251 /* Reinitialize congestion buffer info */
3252 lpfc_init_congestion_buf(phba
);
3254 atomic_set(&phba
->cgn_fabric_warn_cnt
, 0);
3255 atomic_set(&phba
->cgn_fabric_alarm_cnt
, 0);
3256 atomic_set(&phba
->cgn_sync_alarm_cnt
, 0);
3257 atomic_set(&phba
->cgn_sync_warn_cnt
, 0);
3259 atomic_set(&phba
->cmf_busy
, 0);
3260 for_each_present_cpu(cpu
) {
3261 cgs
= per_cpu_ptr(phba
->cmf_stat
, cpu
);
3262 atomic64_set(&cgs
->total_bytes
, 0);
3263 atomic64_set(&cgs
->rcv_bytes
, 0);
3264 atomic_set(&cgs
->rx_io_cnt
, 0);
3265 atomic64_set(&cgs
->rx_latency
, 0);
3267 phba
->cmf_latency
.tv_sec
= 0;
3268 phba
->cmf_latency
.tv_nsec
= 0;
3270 lpfc_cmf_signal_init(phba
);
3272 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
3273 "6222 Start CMF / Timer\n");
3275 phba
->cmf_timer_cnt
= 0;
3276 hrtimer_start(&phba
->cmf_timer
,
3277 ktime_set(0, LPFC_CMF_INTERVAL
* NSEC_PER_MSEC
),
3279 hrtimer_start(&phba
->cmf_stats_timer
,
3280 ktime_set(0, LPFC_SEC_MIN
* NSEC_PER_SEC
),
3282 /* Setup for latency check in IO cmpl routines */
3283 ktime_get_real_ts64(&phba
->cmf_latency
);
3285 atomic_set(&phba
->cmf_bw_wait
, 0);
3286 atomic_set(&phba
->cmf_stop_io
, 0);
3290 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3291 * @phba: pointer to lpfc hba data structure.
3293 * This routine stops all the timers associated with a HBA. This function is
3294 * invoked before either putting a HBA offline or unloading the driver.
3297 lpfc_stop_hba_timers(struct lpfc_hba
*phba
)
3300 lpfc_stop_vport_timers(phba
->pport
);
3301 cancel_delayed_work_sync(&phba
->eq_delay_work
);
3302 cancel_delayed_work_sync(&phba
->idle_stat_delay_work
);
3303 del_timer_sync(&phba
->sli
.mbox_tmo
);
3304 del_timer_sync(&phba
->fabric_block_timer
);
3305 del_timer_sync(&phba
->eratt_poll
);
3306 del_timer_sync(&phba
->hb_tmofunc
);
3307 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3308 del_timer_sync(&phba
->rrq_tmr
);
3309 clear_bit(HBA_RRQ_ACTIVE
, &phba
->hba_flag
);
3311 clear_bit(HBA_HBEAT_INP
, &phba
->hba_flag
);
3312 clear_bit(HBA_HBEAT_TMO
, &phba
->hba_flag
);
3314 switch (phba
->pci_dev_grp
) {
3315 case LPFC_PCI_DEV_LP
:
3316 /* Stop any LightPulse device specific driver timers */
3317 del_timer_sync(&phba
->fcp_poll_timer
);
3319 case LPFC_PCI_DEV_OC
:
3320 /* Stop any OneConnect device specific driver timers */
3321 lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
3324 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3325 "0297 Invalid device group (x%x)\n",
3333 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3334 * @phba: pointer to lpfc hba data structure.
3335 * @mbx_action: flag for mailbox no wait action.
3337 * This routine marks a HBA's management interface as blocked. Once the HBA's
3338 * management interface is marked as blocked, all the user space access to
3339 * the HBA, whether they are from sysfs interface or libdfc interface will
3340 * all be blocked. The HBA is set to block the management interface when the
3341 * driver prepares the HBA interface for online or offline.
3344 lpfc_block_mgmt_io(struct lpfc_hba
*phba
, int mbx_action
)
3346 unsigned long iflag
;
3347 uint8_t actcmd
= MBX_HEARTBEAT
;
3348 unsigned long timeout
;
3350 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3351 phba
->sli
.sli_flag
|= LPFC_BLOCK_MGMT_IO
;
3352 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3353 if (mbx_action
== LPFC_MBX_NO_WAIT
)
3355 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
3356 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3357 if (phba
->sli
.mbox_active
) {
3358 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
3359 /* Determine how long we might wait for the active mailbox
3360 * command to be gracefully completed by firmware.
3362 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
3363 phba
->sli
.mbox_active
) * 1000) + jiffies
;
3365 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3367 /* Wait for the outstnading mailbox command to complete */
3368 while (phba
->sli
.mbox_active
) {
3369 /* Check active mailbox complete status every 2ms */
3371 if (time_after(jiffies
, timeout
)) {
3372 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3373 "2813 Mgmt IO is Blocked %x "
3374 "- mbox cmd %x still active\n",
3375 phba
->sli
.sli_flag
, actcmd
);
3382 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3383 * @phba: pointer to lpfc hba data structure.
3385 * Allocate RPIs for all active remote nodes. This is needed whenever
3386 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3387 * is to fixup the temporary rpi assignments.
3390 lpfc_sli4_node_prep(struct lpfc_hba
*phba
)
3392 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3393 struct lpfc_vport
**vports
;
3396 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
3399 vports
= lpfc_create_vport_work_array(phba
);
3403 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3404 if (test_bit(FC_UNLOADING
, &vports
[i
]->load_flag
))
3407 list_for_each_entry_safe(ndlp
, next_ndlp
,
3408 &vports
[i
]->fc_nodes
,
3410 rpi
= lpfc_sli4_alloc_rpi(phba
);
3411 if (rpi
== LPFC_RPI_ALLOC_ERROR
) {
3412 /* TODO print log? */
3415 ndlp
->nlp_rpi
= rpi
;
3416 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
,
3417 LOG_NODE
| LOG_DISCOVERY
,
3418 "0009 Assign RPI x%x to ndlp x%px "
3419 "DID:x%06x flg:x%x\n",
3420 ndlp
->nlp_rpi
, ndlp
, ndlp
->nlp_DID
,
3424 lpfc_destroy_vport_work_array(phba
, vports
);
3428 * lpfc_create_expedite_pool - create expedite pool
3429 * @phba: pointer to lpfc hba data structure.
3431 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3432 * to expedite pool. Mark them as expedite.
3434 static void lpfc_create_expedite_pool(struct lpfc_hba
*phba
)
3436 struct lpfc_sli4_hdw_queue
*qp
;
3437 struct lpfc_io_buf
*lpfc_ncmd
;
3438 struct lpfc_io_buf
*lpfc_ncmd_next
;
3439 struct lpfc_epd_pool
*epd_pool
;
3440 unsigned long iflag
;
3442 epd_pool
= &phba
->epd_pool
;
3443 qp
= &phba
->sli4_hba
.hdwq
[0];
3445 spin_lock_init(&epd_pool
->lock
);
3446 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3447 spin_lock(&epd_pool
->lock
);
3448 INIT_LIST_HEAD(&epd_pool
->list
);
3449 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3450 &qp
->lpfc_io_buf_list_put
, list
) {
3451 list_move_tail(&lpfc_ncmd
->list
, &epd_pool
->list
);
3452 lpfc_ncmd
->expedite
= true;
3455 if (epd_pool
->count
>= XRI_BATCH
)
3458 spin_unlock(&epd_pool
->lock
);
3459 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3463 * lpfc_destroy_expedite_pool - destroy expedite pool
3464 * @phba: pointer to lpfc hba data structure.
3466 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3467 * of HWQ 0. Clear the mark.
3469 static void lpfc_destroy_expedite_pool(struct lpfc_hba
*phba
)
3471 struct lpfc_sli4_hdw_queue
*qp
;
3472 struct lpfc_io_buf
*lpfc_ncmd
;
3473 struct lpfc_io_buf
*lpfc_ncmd_next
;
3474 struct lpfc_epd_pool
*epd_pool
;
3475 unsigned long iflag
;
3477 epd_pool
= &phba
->epd_pool
;
3478 qp
= &phba
->sli4_hba
.hdwq
[0];
3480 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3481 spin_lock(&epd_pool
->lock
);
3482 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3483 &epd_pool
->list
, list
) {
3484 list_move_tail(&lpfc_ncmd
->list
,
3485 &qp
->lpfc_io_buf_list_put
);
3486 lpfc_ncmd
->flags
= false;
3490 spin_unlock(&epd_pool
->lock
);
3491 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3495 * lpfc_create_multixri_pools - create multi-XRI pools
3496 * @phba: pointer to lpfc hba data structure.
3498 * This routine initialize public, private per HWQ. Then, move XRIs from
3499 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3502 void lpfc_create_multixri_pools(struct lpfc_hba
*phba
)
3507 struct lpfc_io_buf
*lpfc_ncmd
;
3508 struct lpfc_io_buf
*lpfc_ncmd_next
;
3509 unsigned long iflag
;
3510 struct lpfc_sli4_hdw_queue
*qp
;
3511 struct lpfc_multixri_pool
*multixri_pool
;
3512 struct lpfc_pbl_pool
*pbl_pool
;
3513 struct lpfc_pvt_pool
*pvt_pool
;
3515 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3516 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3517 phba
->cfg_hdw_queue
, phba
->sli4_hba
.num_present_cpu
,
3518 phba
->sli4_hba
.io_xri_cnt
);
3520 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
3521 lpfc_create_expedite_pool(phba
);
3523 hwq_count
= phba
->cfg_hdw_queue
;
3524 count_per_hwq
= phba
->sli4_hba
.io_xri_cnt
/ hwq_count
;
3526 for (i
= 0; i
< hwq_count
; i
++) {
3527 multixri_pool
= kzalloc(sizeof(*multixri_pool
), GFP_KERNEL
);
3529 if (!multixri_pool
) {
3530 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3531 "1238 Failed to allocate memory for "
3534 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
3535 lpfc_destroy_expedite_pool(phba
);
3539 qp
= &phba
->sli4_hba
.hdwq
[j
];
3540 kfree(qp
->p_multixri_pool
);
3543 phba
->cfg_xri_rebalancing
= 0;
3547 qp
= &phba
->sli4_hba
.hdwq
[i
];
3548 qp
->p_multixri_pool
= multixri_pool
;
3550 multixri_pool
->xri_limit
= count_per_hwq
;
3551 multixri_pool
->rrb_next_hwqid
= i
;
3553 /* Deal with public free xri pool */
3554 pbl_pool
= &multixri_pool
->pbl_pool
;
3555 spin_lock_init(&pbl_pool
->lock
);
3556 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3557 spin_lock(&pbl_pool
->lock
);
3558 INIT_LIST_HEAD(&pbl_pool
->list
);
3559 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3560 &qp
->lpfc_io_buf_list_put
, list
) {
3561 list_move_tail(&lpfc_ncmd
->list
, &pbl_pool
->list
);
3565 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3566 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3567 pbl_pool
->count
, i
);
3568 spin_unlock(&pbl_pool
->lock
);
3569 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3571 /* Deal with private free xri pool */
3572 pvt_pool
= &multixri_pool
->pvt_pool
;
3573 pvt_pool
->high_watermark
= multixri_pool
->xri_limit
/ 2;
3574 pvt_pool
->low_watermark
= XRI_BATCH
;
3575 spin_lock_init(&pvt_pool
->lock
);
3576 spin_lock_irqsave(&pvt_pool
->lock
, iflag
);
3577 INIT_LIST_HEAD(&pvt_pool
->list
);
3578 pvt_pool
->count
= 0;
3579 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
3584 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3585 * @phba: pointer to lpfc hba data structure.
3587 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3589 static void lpfc_destroy_multixri_pools(struct lpfc_hba
*phba
)
3593 struct lpfc_io_buf
*lpfc_ncmd
;
3594 struct lpfc_io_buf
*lpfc_ncmd_next
;
3595 unsigned long iflag
;
3596 struct lpfc_sli4_hdw_queue
*qp
;
3597 struct lpfc_multixri_pool
*multixri_pool
;
3598 struct lpfc_pbl_pool
*pbl_pool
;
3599 struct lpfc_pvt_pool
*pvt_pool
;
3601 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
3602 lpfc_destroy_expedite_pool(phba
);
3604 if (!test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
))
3605 lpfc_sli_flush_io_rings(phba
);
3607 hwq_count
= phba
->cfg_hdw_queue
;
3609 for (i
= 0; i
< hwq_count
; i
++) {
3610 qp
= &phba
->sli4_hba
.hdwq
[i
];
3611 multixri_pool
= qp
->p_multixri_pool
;
3615 qp
->p_multixri_pool
= NULL
;
3617 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
);
3619 /* Deal with public free xri pool */
3620 pbl_pool
= &multixri_pool
->pbl_pool
;
3621 spin_lock(&pbl_pool
->lock
);
3623 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3624 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3625 pbl_pool
->count
, i
);
3627 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3628 &pbl_pool
->list
, list
) {
3629 list_move_tail(&lpfc_ncmd
->list
,
3630 &qp
->lpfc_io_buf_list_put
);
3635 INIT_LIST_HEAD(&pbl_pool
->list
);
3636 pbl_pool
->count
= 0;
3638 spin_unlock(&pbl_pool
->lock
);
3640 /* Deal with private free xri pool */
3641 pvt_pool
= &multixri_pool
->pvt_pool
;
3642 spin_lock(&pvt_pool
->lock
);
3644 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3645 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3646 pvt_pool
->count
, i
);
3648 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
3649 &pvt_pool
->list
, list
) {
3650 list_move_tail(&lpfc_ncmd
->list
,
3651 &qp
->lpfc_io_buf_list_put
);
3656 INIT_LIST_HEAD(&pvt_pool
->list
);
3657 pvt_pool
->count
= 0;
3659 spin_unlock(&pvt_pool
->lock
);
3660 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
, iflag
);
3662 kfree(multixri_pool
);
3667 * lpfc_online - Initialize and bring a HBA online
3668 * @phba: pointer to lpfc hba data structure.
3670 * This routine initializes the HBA and brings a HBA online. During this
3671 * process, the management interface is blocked to prevent user space access
3672 * to the HBA interfering with the driver initialization.
3679 lpfc_online(struct lpfc_hba
*phba
)
3681 struct lpfc_vport
*vport
;
3682 struct lpfc_vport
**vports
;
3684 bool vpis_cleared
= false;
3688 vport
= phba
->pport
;
3690 if (!test_bit(FC_OFFLINE_MODE
, &vport
->fc_flag
))
3693 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3694 "0458 Bring Adapter online\n");
3696 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
3698 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3699 if (lpfc_sli4_hba_setup(phba
)) { /* Initialize SLI4 HBA */
3700 lpfc_unblock_mgmt_io(phba
);
3703 spin_lock_irq(&phba
->hbalock
);
3704 if (!phba
->sli4_hba
.max_cfg_param
.vpi_used
)
3705 vpis_cleared
= true;
3706 spin_unlock_irq(&phba
->hbalock
);
3708 /* Reestablish the local initiator port.
3709 * The offline process destroyed the previous lport.
3711 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
&&
3712 !phba
->nvmet_support
) {
3713 error
= lpfc_nvme_create_localport(phba
->pport
);
3715 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3716 "6132 NVME restore reg failed "
3717 "on nvmei error x%x\n", error
);
3720 lpfc_sli_queue_init(phba
);
3721 if (lpfc_sli_hba_setup(phba
)) { /* Initialize SLI2/SLI3 HBA */
3722 lpfc_unblock_mgmt_io(phba
);
3727 vports
= lpfc_create_vport_work_array(phba
);
3728 if (vports
!= NULL
) {
3729 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3730 clear_bit(FC_OFFLINE_MODE
, &vports
[i
]->fc_flag
);
3731 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
3732 set_bit(FC_VPORT_NEEDS_REG_VPI
,
3733 &vports
[i
]->fc_flag
);
3734 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3735 set_bit(FC_VPORT_NEEDS_INIT_VPI
,
3736 &vports
[i
]->fc_flag
);
3737 if ((vpis_cleared
) &&
3738 (vports
[i
]->port_type
!=
3739 LPFC_PHYSICAL_PORT
))
3744 lpfc_destroy_vport_work_array(phba
, vports
);
3746 if (phba
->cfg_xri_rebalancing
)
3747 lpfc_create_multixri_pools(phba
);
3749 lpfc_cpuhp_add(phba
);
3751 lpfc_unblock_mgmt_io(phba
);
3756 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3757 * @phba: pointer to lpfc hba data structure.
3759 * This routine marks a HBA's management interface as not blocked. Once the
3760 * HBA's management interface is marked as not blocked, all the user space
3761 * access to the HBA, whether they are from sysfs interface or libdfc
3762 * interface will be allowed. The HBA is set to block the management interface
3763 * when the driver prepares the HBA interface for online or offline and then
3764 * set to unblock the management interface afterwards.
3767 lpfc_unblock_mgmt_io(struct lpfc_hba
* phba
)
3769 unsigned long iflag
;
3771 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3772 phba
->sli
.sli_flag
&= ~LPFC_BLOCK_MGMT_IO
;
3773 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3777 * lpfc_offline_prep - Prepare a HBA to be brought offline
3778 * @phba: pointer to lpfc hba data structure.
3779 * @mbx_action: flag for mailbox shutdown action.
3781 * This routine is invoked to prepare a HBA to be brought offline. It performs
3782 * unregistration login to all the nodes on all vports and flushes the mailbox
3783 * queue to make it ready to be brought offline.
3786 lpfc_offline_prep(struct lpfc_hba
*phba
, int mbx_action
)
3788 struct lpfc_vport
*vport
= phba
->pport
;
3789 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3790 struct lpfc_vport
**vports
;
3791 struct Scsi_Host
*shost
;
3796 if (test_bit(FC_OFFLINE_MODE
, &vport
->fc_flag
))
3799 lpfc_block_mgmt_io(phba
, mbx_action
);
3801 lpfc_linkdown(phba
);
3803 offline
= pci_channel_offline(phba
->pcidev
);
3804 hba_pci_err
= test_bit(HBA_PCI_ERR
, &phba
->bit_flags
);
3806 /* Issue an unreg_login to all nodes on all vports */
3807 vports
= lpfc_create_vport_work_array(phba
);
3808 if (vports
!= NULL
) {
3809 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3810 if (test_bit(FC_UNLOADING
, &vports
[i
]->load_flag
))
3812 shost
= lpfc_shost_from_vport(vports
[i
]);
3813 spin_lock_irq(shost
->host_lock
);
3814 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3815 spin_unlock_irq(shost
->host_lock
);
3816 set_bit(FC_VPORT_NEEDS_REG_VPI
, &vports
[i
]->fc_flag
);
3817 clear_bit(FC_VFI_REGISTERED
, &vports
[i
]->fc_flag
);
3819 list_for_each_entry_safe(ndlp
, next_ndlp
,
3820 &vports
[i
]->fc_nodes
,
3823 spin_lock_irq(&ndlp
->lock
);
3824 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3825 spin_unlock_irq(&ndlp
->lock
);
3827 if (offline
|| hba_pci_err
) {
3828 spin_lock_irq(&ndlp
->lock
);
3829 ndlp
->nlp_flag
&= ~(NLP_UNREG_INP
|
3830 NLP_RPI_REGISTERED
);
3831 spin_unlock_irq(&ndlp
->lock
);
3832 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3833 lpfc_sli_rpi_release(vports
[i
],
3836 lpfc_unreg_rpi(vports
[i
], ndlp
);
3839 * Whenever an SLI4 port goes offline, free the
3840 * RPI. Get a new RPI when the adapter port
3841 * comes back online.
3843 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3844 lpfc_printf_vlog(vports
[i
], KERN_INFO
,
3845 LOG_NODE
| LOG_DISCOVERY
,
3846 "0011 Free RPI x%x on "
3847 "ndlp: x%px did x%x\n",
3848 ndlp
->nlp_rpi
, ndlp
,
3850 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
3851 ndlp
->nlp_rpi
= LPFC_RPI_ALLOC_ERROR
;
3854 if (ndlp
->nlp_type
& NLP_FABRIC
) {
3855 lpfc_disc_state_machine(vports
[i
], ndlp
,
3856 NULL
, NLP_EVT_DEVICE_RECOVERY
);
3858 /* Don't remove the node unless the node
3859 * has been unregistered with the
3860 * transport, and we're not in recovery
3861 * before dev_loss_tmo triggered.
3862 * Otherwise, let dev_loss take care of
3865 if (!(ndlp
->save_flags
&
3866 NLP_IN_RECOV_POST_DEV_LOSS
) &&
3867 !(ndlp
->fc4_xpt_flags
&
3868 (NVME_XPT_REGD
| SCSI_XPT_REGD
)))
3869 lpfc_disc_state_machine
3877 lpfc_destroy_vport_work_array(phba
, vports
);
3879 lpfc_sli_mbox_sys_shutdown(phba
, mbx_action
);
3882 flush_workqueue(phba
->wq
);
3886 * lpfc_offline - Bring a HBA offline
3887 * @phba: pointer to lpfc hba data structure.
3889 * This routine actually brings a HBA offline. It stops all the timers
3890 * associated with the HBA, brings down the SLI layer, and eventually
3891 * marks the HBA as in offline state for the upper layer protocol.
3894 lpfc_offline(struct lpfc_hba
*phba
)
3896 struct Scsi_Host
*shost
;
3897 struct lpfc_vport
**vports
;
3900 if (test_bit(FC_OFFLINE_MODE
, &phba
->pport
->fc_flag
))
3903 /* stop port and all timers associated with this hba */
3904 lpfc_stop_port(phba
);
3906 /* Tear down the local and target port registrations. The
3907 * nvme transports need to cleanup.
3909 lpfc_nvmet_destroy_targetport(phba
);
3910 lpfc_nvme_destroy_localport(phba
->pport
);
3912 vports
= lpfc_create_vport_work_array(phba
);
3914 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
3915 lpfc_stop_vport_timers(vports
[i
]);
3916 lpfc_destroy_vport_work_array(phba
, vports
);
3917 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3918 "0460 Bring Adapter offline\n");
3919 /* Bring down the SLI Layer and cleanup. The HBA is offline
3921 lpfc_sli_hba_down(phba
);
3922 spin_lock_irq(&phba
->hbalock
);
3924 spin_unlock_irq(&phba
->hbalock
);
3925 vports
= lpfc_create_vport_work_array(phba
);
3927 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3928 shost
= lpfc_shost_from_vport(vports
[i
]);
3929 spin_lock_irq(shost
->host_lock
);
3930 vports
[i
]->work_port_events
= 0;
3931 spin_unlock_irq(shost
->host_lock
);
3932 set_bit(FC_OFFLINE_MODE
, &vports
[i
]->fc_flag
);
3934 lpfc_destroy_vport_work_array(phba
, vports
);
3935 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3938 if (test_bit(FC_OFFLINE_MODE
, &phba
->pport
->fc_flag
))
3939 __lpfc_cpuhp_remove(phba
);
3941 if (phba
->cfg_xri_rebalancing
)
3942 lpfc_destroy_multixri_pools(phba
);
3946 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3947 * @phba: pointer to lpfc hba data structure.
3949 * This routine is to free all the SCSI buffers and IOCBs from the driver
3950 * list back to kernel. It is called from lpfc_pci_remove_one to free
3951 * the internal resources before the device is removed from the system.
3954 lpfc_scsi_free(struct lpfc_hba
*phba
)
3956 struct lpfc_io_buf
*sb
, *sb_next
;
3958 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
3961 spin_lock_irq(&phba
->hbalock
);
3963 /* Release all the lpfc_scsi_bufs maintained by this host. */
3965 spin_lock(&phba
->scsi_buf_list_put_lock
);
3966 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_put
,
3968 list_del(&sb
->list
);
3969 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
, sb
->data
,
3972 phba
->total_scsi_bufs
--;
3974 spin_unlock(&phba
->scsi_buf_list_put_lock
);
3976 spin_lock(&phba
->scsi_buf_list_get_lock
);
3977 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_get
,
3979 list_del(&sb
->list
);
3980 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
, sb
->data
,
3983 phba
->total_scsi_bufs
--;
3985 spin_unlock(&phba
->scsi_buf_list_get_lock
);
3986 spin_unlock_irq(&phba
->hbalock
);
3990 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3991 * @phba: pointer to lpfc hba data structure.
3993 * This routine is to free all the IO buffers and IOCBs from the driver
3994 * list back to kernel. It is called from lpfc_pci_remove_one to free
3995 * the internal resources before the device is removed from the system.
3998 lpfc_io_free(struct lpfc_hba
*phba
)
4000 struct lpfc_io_buf
*lpfc_ncmd
, *lpfc_ncmd_next
;
4001 struct lpfc_sli4_hdw_queue
*qp
;
4004 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
4005 qp
= &phba
->sli4_hba
.hdwq
[idx
];
4006 /* Release all the lpfc_nvme_bufs maintained by this host. */
4007 spin_lock(&qp
->io_buf_list_put_lock
);
4008 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
4009 &qp
->lpfc_io_buf_list_put
,
4011 list_del(&lpfc_ncmd
->list
);
4013 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4014 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
4015 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
4016 lpfc_put_sgl_per_hdwq(phba
, lpfc_ncmd
);
4017 lpfc_put_cmd_rsp_buf_per_hdwq(phba
, lpfc_ncmd
);
4019 qp
->total_io_bufs
--;
4021 spin_unlock(&qp
->io_buf_list_put_lock
);
4023 spin_lock(&qp
->io_buf_list_get_lock
);
4024 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
4025 &qp
->lpfc_io_buf_list_get
,
4027 list_del(&lpfc_ncmd
->list
);
4029 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4030 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
4031 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
4032 lpfc_put_sgl_per_hdwq(phba
, lpfc_ncmd
);
4033 lpfc_put_cmd_rsp_buf_per_hdwq(phba
, lpfc_ncmd
);
4035 qp
->total_io_bufs
--;
4037 spin_unlock(&qp
->io_buf_list_get_lock
);
4042 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4043 * @phba: pointer to lpfc hba data structure.
4045 * This routine first calculates the sizes of the current els and allocated
4046 * scsi sgl lists, and then goes through all sgls to updates the physical
4047 * XRIs assigned due to port function reset. During port initialization, the
4048 * current els and allocated scsi sgl lists are 0s.
4051 * 0 - successful (for now, it always returns 0)
4054 lpfc_sli4_els_sgl_update(struct lpfc_hba
*phba
)
4056 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_entry_next
= NULL
;
4057 uint16_t i
, lxri
, xri_cnt
, els_xri_cnt
;
4058 LIST_HEAD(els_sgl_list
);
4062 * update on pci function's els xri-sgl list
4064 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
4066 if (els_xri_cnt
> phba
->sli4_hba
.els_xri_cnt
) {
4067 /* els xri-sgl expanded */
4068 xri_cnt
= els_xri_cnt
- phba
->sli4_hba
.els_xri_cnt
;
4069 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4070 "3157 ELS xri-sgl count increased from "
4071 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
4073 /* allocate the additional els sgls */
4074 for (i
= 0; i
< xri_cnt
; i
++) {
4075 sglq_entry
= kzalloc(sizeof(struct lpfc_sglq
),
4077 if (sglq_entry
== NULL
) {
4078 lpfc_printf_log(phba
, KERN_ERR
,
4080 "2562 Failure to allocate an "
4081 "ELS sgl entry:%d\n", i
);
4085 sglq_entry
->buff_type
= GEN_BUFF_TYPE
;
4086 sglq_entry
->virt
= lpfc_mbuf_alloc(phba
, 0,
4088 if (sglq_entry
->virt
== NULL
) {
4090 lpfc_printf_log(phba
, KERN_ERR
,
4092 "2563 Failure to allocate an "
4093 "ELS mbuf:%d\n", i
);
4097 sglq_entry
->sgl
= sglq_entry
->virt
;
4098 memset(sglq_entry
->sgl
, 0, LPFC_BPL_SIZE
);
4099 sglq_entry
->state
= SGL_FREED
;
4100 list_add_tail(&sglq_entry
->list
, &els_sgl_list
);
4102 spin_lock_irq(&phba
->sli4_hba
.sgl_list_lock
);
4103 list_splice_init(&els_sgl_list
,
4104 &phba
->sli4_hba
.lpfc_els_sgl_list
);
4105 spin_unlock_irq(&phba
->sli4_hba
.sgl_list_lock
);
4106 } else if (els_xri_cnt
< phba
->sli4_hba
.els_xri_cnt
) {
4107 /* els xri-sgl shrinked */
4108 xri_cnt
= phba
->sli4_hba
.els_xri_cnt
- els_xri_cnt
;
4109 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4110 "3158 ELS xri-sgl count decreased from "
4111 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
4113 spin_lock_irq(&phba
->sli4_hba
.sgl_list_lock
);
4114 list_splice_init(&phba
->sli4_hba
.lpfc_els_sgl_list
,
4116 /* release extra els sgls from list */
4117 for (i
= 0; i
< xri_cnt
; i
++) {
4118 list_remove_head(&els_sgl_list
,
4119 sglq_entry
, struct lpfc_sglq
, list
);
4121 __lpfc_mbuf_free(phba
, sglq_entry
->virt
,
4126 list_splice_init(&els_sgl_list
,
4127 &phba
->sli4_hba
.lpfc_els_sgl_list
);
4128 spin_unlock_irq(&phba
->sli4_hba
.sgl_list_lock
);
4130 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4131 "3163 ELS xri-sgl count unchanged: %d\n",
4133 phba
->sli4_hba
.els_xri_cnt
= els_xri_cnt
;
4135 /* update xris to els sgls on the list */
4137 sglq_entry_next
= NULL
;
4138 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
4139 &phba
->sli4_hba
.lpfc_els_sgl_list
, list
) {
4140 lxri
= lpfc_sli4_next_xritag(phba
);
4141 if (lxri
== NO_XRI
) {
4142 lpfc_printf_log(phba
, KERN_ERR
,
4144 "2400 Failed to allocate xri for "
4149 sglq_entry
->sli4_lxritag
= lxri
;
4150 sglq_entry
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
4155 lpfc_free_els_sgl_list(phba
);
4160 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4161 * @phba: pointer to lpfc hba data structure.
4163 * This routine first calculates the sizes of the current els and allocated
4164 * scsi sgl lists, and then goes through all sgls to updates the physical
4165 * XRIs assigned due to port function reset. During port initialization, the
4166 * current els and allocated scsi sgl lists are 0s.
4169 * 0 - successful (for now, it always returns 0)
4172 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba
*phba
)
4174 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_entry_next
= NULL
;
4175 uint16_t i
, lxri
, xri_cnt
, els_xri_cnt
;
4176 uint16_t nvmet_xri_cnt
;
4177 LIST_HEAD(nvmet_sgl_list
);
4181 * update on pci function's nvmet xri-sgl list
4183 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
4185 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4186 nvmet_xri_cnt
= phba
->sli4_hba
.max_cfg_param
.max_xri
- els_xri_cnt
;
4187 if (nvmet_xri_cnt
> phba
->sli4_hba
.nvmet_xri_cnt
) {
4188 /* els xri-sgl expanded */
4189 xri_cnt
= nvmet_xri_cnt
- phba
->sli4_hba
.nvmet_xri_cnt
;
4190 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4191 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4192 phba
->sli4_hba
.nvmet_xri_cnt
, nvmet_xri_cnt
);
4193 /* allocate the additional nvmet sgls */
4194 for (i
= 0; i
< xri_cnt
; i
++) {
4195 sglq_entry
= kzalloc(sizeof(struct lpfc_sglq
),
4197 if (sglq_entry
== NULL
) {
4198 lpfc_printf_log(phba
, KERN_ERR
,
4200 "6303 Failure to allocate an "
4201 "NVMET sgl entry:%d\n", i
);
4205 sglq_entry
->buff_type
= NVMET_BUFF_TYPE
;
4206 sglq_entry
->virt
= lpfc_nvmet_buf_alloc(phba
, 0,
4208 if (sglq_entry
->virt
== NULL
) {
4210 lpfc_printf_log(phba
, KERN_ERR
,
4212 "6304 Failure to allocate an "
4213 "NVMET buf:%d\n", i
);
4217 sglq_entry
->sgl
= sglq_entry
->virt
;
4218 memset(sglq_entry
->sgl
, 0,
4219 phba
->cfg_sg_dma_buf_size
);
4220 sglq_entry
->state
= SGL_FREED
;
4221 list_add_tail(&sglq_entry
->list
, &nvmet_sgl_list
);
4223 spin_lock_irq(&phba
->hbalock
);
4224 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
4225 list_splice_init(&nvmet_sgl_list
,
4226 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
4227 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
4228 spin_unlock_irq(&phba
->hbalock
);
4229 } else if (nvmet_xri_cnt
< phba
->sli4_hba
.nvmet_xri_cnt
) {
4230 /* nvmet xri-sgl shrunk */
4231 xri_cnt
= phba
->sli4_hba
.nvmet_xri_cnt
- nvmet_xri_cnt
;
4232 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4233 "6305 NVMET xri-sgl count decreased from "
4234 "%d to %d\n", phba
->sli4_hba
.nvmet_xri_cnt
,
4236 spin_lock_irq(&phba
->hbalock
);
4237 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
4238 list_splice_init(&phba
->sli4_hba
.lpfc_nvmet_sgl_list
,
4240 /* release extra nvmet sgls from list */
4241 for (i
= 0; i
< xri_cnt
; i
++) {
4242 list_remove_head(&nvmet_sgl_list
,
4243 sglq_entry
, struct lpfc_sglq
, list
);
4245 lpfc_nvmet_buf_free(phba
, sglq_entry
->virt
,
4250 list_splice_init(&nvmet_sgl_list
,
4251 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
4252 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
4253 spin_unlock_irq(&phba
->hbalock
);
4255 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4256 "6306 NVMET xri-sgl count unchanged: %d\n",
4258 phba
->sli4_hba
.nvmet_xri_cnt
= nvmet_xri_cnt
;
4260 /* update xris to nvmet sgls on the list */
4262 sglq_entry_next
= NULL
;
4263 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
4264 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
, list
) {
4265 lxri
= lpfc_sli4_next_xritag(phba
);
4266 if (lxri
== NO_XRI
) {
4267 lpfc_printf_log(phba
, KERN_ERR
,
4269 "6307 Failed to allocate xri for "
4274 sglq_entry
->sli4_lxritag
= lxri
;
4275 sglq_entry
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
4280 lpfc_free_nvmet_sgl_list(phba
);
4285 lpfc_io_buf_flush(struct lpfc_hba
*phba
, struct list_head
*cbuf
)
4288 struct lpfc_sli4_hdw_queue
*qp
;
4289 struct lpfc_io_buf
*lpfc_cmd
;
4290 struct lpfc_io_buf
*iobufp
, *prev_iobufp
;
4291 int idx
, cnt
, xri
, inserted
;
4294 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
4295 qp
= &phba
->sli4_hba
.hdwq
[idx
];
4296 spin_lock_irq(&qp
->io_buf_list_get_lock
);
4297 spin_lock(&qp
->io_buf_list_put_lock
);
4299 /* Take everything off the get and put lists */
4300 list_splice_init(&qp
->lpfc_io_buf_list_get
, &blist
);
4301 list_splice(&qp
->lpfc_io_buf_list_put
, &blist
);
4302 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_get
);
4303 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_put
);
4304 cnt
+= qp
->get_io_bufs
+ qp
->put_io_bufs
;
4305 qp
->get_io_bufs
= 0;
4306 qp
->put_io_bufs
= 0;
4307 qp
->total_io_bufs
= 0;
4308 spin_unlock(&qp
->io_buf_list_put_lock
);
4309 spin_unlock_irq(&qp
->io_buf_list_get_lock
);
4313 * Take IO buffers off blist and put on cbuf sorted by XRI.
4314 * This is because POST_SGL takes a sequential range of XRIs
4315 * to post to the firmware.
4317 for (idx
= 0; idx
< cnt
; idx
++) {
4318 list_remove_head(&blist
, lpfc_cmd
, struct lpfc_io_buf
, list
);
4322 list_add_tail(&lpfc_cmd
->list
, cbuf
);
4325 xri
= lpfc_cmd
->cur_iocbq
.sli4_xritag
;
4328 list_for_each_entry(iobufp
, cbuf
, list
) {
4329 if (xri
< iobufp
->cur_iocbq
.sli4_xritag
) {
4331 list_add(&lpfc_cmd
->list
,
4332 &prev_iobufp
->list
);
4334 list_add(&lpfc_cmd
->list
, cbuf
);
4338 prev_iobufp
= iobufp
;
4341 list_add_tail(&lpfc_cmd
->list
, cbuf
);
4347 lpfc_io_buf_replenish(struct lpfc_hba
*phba
, struct list_head
*cbuf
)
4349 struct lpfc_sli4_hdw_queue
*qp
;
4350 struct lpfc_io_buf
*lpfc_cmd
;
4352 unsigned long iflags
;
4354 qp
= phba
->sli4_hba
.hdwq
;
4356 while (!list_empty(cbuf
)) {
4357 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
4358 list_remove_head(cbuf
, lpfc_cmd
,
4359 struct lpfc_io_buf
, list
);
4363 qp
= &phba
->sli4_hba
.hdwq
[idx
];
4364 lpfc_cmd
->hdwq_no
= idx
;
4365 lpfc_cmd
->hdwq
= qp
;
4366 lpfc_cmd
->cur_iocbq
.cmd_cmpl
= NULL
;
4367 spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflags
);
4368 list_add_tail(&lpfc_cmd
->list
,
4369 &qp
->lpfc_io_buf_list_put
);
4371 qp
->total_io_bufs
++;
4372 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
,
4380 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4381 * @phba: pointer to lpfc hba data structure.
4383 * This routine first calculates the sizes of the current els and allocated
4384 * scsi sgl lists, and then goes through all sgls to updates the physical
4385 * XRIs assigned due to port function reset. During port initialization, the
4386 * current els and allocated scsi sgl lists are 0s.
4389 * 0 - successful (for now, it always returns 0)
4392 lpfc_sli4_io_sgl_update(struct lpfc_hba
*phba
)
4394 struct lpfc_io_buf
*lpfc_ncmd
= NULL
, *lpfc_ncmd_next
= NULL
;
4395 uint16_t i
, lxri
, els_xri_cnt
;
4396 uint16_t io_xri_cnt
, io_xri_max
;
4397 LIST_HEAD(io_sgl_list
);
4401 * update on pci function's allocated nvme xri-sgl list
4404 /* maximum number of xris available for nvme buffers */
4405 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
4406 io_xri_max
= phba
->sli4_hba
.max_cfg_param
.max_xri
- els_xri_cnt
;
4407 phba
->sli4_hba
.io_xri_max
= io_xri_max
;
4409 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4410 "6074 Current allocated XRI sgl count:%d, "
4411 "maximum XRI count:%d els_xri_cnt:%d\n\n",
4412 phba
->sli4_hba
.io_xri_cnt
,
4413 phba
->sli4_hba
.io_xri_max
,
4416 cnt
= lpfc_io_buf_flush(phba
, &io_sgl_list
);
4418 if (phba
->sli4_hba
.io_xri_cnt
> phba
->sli4_hba
.io_xri_max
) {
4419 /* max nvme xri shrunk below the allocated nvme buffers */
4420 io_xri_cnt
= phba
->sli4_hba
.io_xri_cnt
-
4421 phba
->sli4_hba
.io_xri_max
;
4422 /* release the extra allocated nvme buffers */
4423 for (i
= 0; i
< io_xri_cnt
; i
++) {
4424 list_remove_head(&io_sgl_list
, lpfc_ncmd
,
4425 struct lpfc_io_buf
, list
);
4427 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4429 lpfc_ncmd
->dma_handle
);
4433 phba
->sli4_hba
.io_xri_cnt
-= io_xri_cnt
;
4436 /* update xris associated to remaining allocated nvme buffers */
4438 lpfc_ncmd_next
= NULL
;
4439 phba
->sli4_hba
.io_xri_cnt
= cnt
;
4440 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
4441 &io_sgl_list
, list
) {
4442 lxri
= lpfc_sli4_next_xritag(phba
);
4443 if (lxri
== NO_XRI
) {
4444 lpfc_printf_log(phba
, KERN_ERR
,
4446 "6075 Failed to allocate xri for "
4451 lpfc_ncmd
->cur_iocbq
.sli4_lxritag
= lxri
;
4452 lpfc_ncmd
->cur_iocbq
.sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
4454 cnt
= lpfc_io_buf_replenish(phba
, &io_sgl_list
);
4463 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4464 * @phba: Pointer to lpfc hba data structure.
4465 * @num_to_alloc: The requested number of buffers to allocate.
4467 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4468 * the nvme buffer contains all the necessary information needed to initiate
4469 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4470 * them on a list, it post them to the port by using SGL block post.
4473 * int - number of IO buffers that were allocated and posted.
4474 * 0 = failure, less than num_to_alloc is a partial failure.
4477 lpfc_new_io_buf(struct lpfc_hba
*phba
, int num_to_alloc
)
4479 struct lpfc_io_buf
*lpfc_ncmd
;
4480 struct lpfc_iocbq
*pwqeq
;
4481 uint16_t iotag
, lxri
= 0;
4482 int bcnt
, num_posted
;
4483 LIST_HEAD(prep_nblist
);
4484 LIST_HEAD(post_nblist
);
4485 LIST_HEAD(nvme_nblist
);
4487 phba
->sli4_hba
.io_xri_cnt
= 0;
4488 for (bcnt
= 0; bcnt
< num_to_alloc
; bcnt
++) {
4489 lpfc_ncmd
= kzalloc(sizeof(*lpfc_ncmd
), GFP_KERNEL
);
4493 * Get memory from the pci pool to map the virt space to
4494 * pci bus space for an I/O. The DMA buffer includes the
4495 * number of SGE's necessary to support the sg_tablesize.
4497 lpfc_ncmd
->data
= dma_pool_zalloc(phba
->lpfc_sg_dma_buf_pool
,
4499 &lpfc_ncmd
->dma_handle
);
4500 if (!lpfc_ncmd
->data
) {
4505 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
) {
4506 INIT_LIST_HEAD(&lpfc_ncmd
->dma_sgl_xtra_list
);
4509 * 4K Page alignment is CRITICAL to BlockGuard, double
4512 if ((phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
4513 (((unsigned long)(lpfc_ncmd
->data
) &
4514 (unsigned long)(SLI4_PAGE_SIZE
- 1)) != 0)) {
4515 lpfc_printf_log(phba
, KERN_ERR
,
4517 "3369 Memory alignment err: "
4519 (unsigned long)lpfc_ncmd
->data
);
4520 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4522 lpfc_ncmd
->dma_handle
);
4528 INIT_LIST_HEAD(&lpfc_ncmd
->dma_cmd_rsp_list
);
4530 lxri
= lpfc_sli4_next_xritag(phba
);
4531 if (lxri
== NO_XRI
) {
4532 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4533 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
4537 pwqeq
= &lpfc_ncmd
->cur_iocbq
;
4539 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4540 iotag
= lpfc_sli_next_iotag(phba
, pwqeq
);
4542 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
4543 lpfc_ncmd
->data
, lpfc_ncmd
->dma_handle
);
4545 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4546 "6121 Failed to allocate IOTAG for"
4547 " XRI:0x%x\n", lxri
);
4548 lpfc_sli4_free_xri(phba
, lxri
);
4551 pwqeq
->sli4_lxritag
= lxri
;
4552 pwqeq
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
4554 /* Initialize local short-hand pointers. */
4555 lpfc_ncmd
->dma_sgl
= lpfc_ncmd
->data
;
4556 lpfc_ncmd
->dma_phys_sgl
= lpfc_ncmd
->dma_handle
;
4557 lpfc_ncmd
->cur_iocbq
.io_buf
= lpfc_ncmd
;
4558 spin_lock_init(&lpfc_ncmd
->buf_lock
);
4560 /* add the nvme buffer to a post list */
4561 list_add_tail(&lpfc_ncmd
->list
, &post_nblist
);
4562 phba
->sli4_hba
.io_xri_cnt
++;
4564 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
4565 "6114 Allocate %d out of %d requested new NVME "
4566 "buffers of size x%zu bytes\n", bcnt
, num_to_alloc
,
4567 sizeof(*lpfc_ncmd
));
4570 /* post the list of nvme buffer sgls to port if available */
4571 if (!list_empty(&post_nblist
))
4572 num_posted
= lpfc_sli4_post_io_sgl_list(
4573 phba
, &post_nblist
, bcnt
);
4581 lpfc_get_wwpn(struct lpfc_hba
*phba
)
4585 LPFC_MBOXQ_t
*mboxq
;
4588 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
4591 return (uint64_t)-1;
4593 /* First get WWN of HBA instance */
4594 lpfc_read_nv(phba
, mboxq
);
4595 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4596 if (rc
!= MBX_SUCCESS
) {
4597 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4598 "6019 Mailbox failed , mbxCmd x%x "
4599 "READ_NV, mbxStatus x%x\n",
4600 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
4601 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
4602 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4603 return (uint64_t) -1;
4606 memcpy(&wwn
, (char *)mb
->un
.varRDnvp
.portname
, sizeof(uint64_t));
4607 /* wwn is WWPN of HBA instance */
4608 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4609 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4610 return be64_to_cpu(wwn
);
4612 return rol64(wwn
, 32);
4615 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba
*phba
)
4617 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4618 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
4619 return LPFC_MAX_SG_TABLESIZE
;
4621 return phba
->cfg_scsi_seg_cnt
;
4623 return phba
->cfg_sg_seg_cnt
;
4627 * lpfc_vmid_res_alloc - Allocates resources for VMID
4628 * @phba: pointer to lpfc hba data structure.
4629 * @vport: pointer to vport data structure
4631 * This routine allocated the resources needed for the VMID.
4638 lpfc_vmid_res_alloc(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4640 /* VMID feature is supported only on SLI4 */
4641 if (phba
->sli_rev
== LPFC_SLI_REV3
) {
4642 phba
->cfg_vmid_app_header
= 0;
4643 phba
->cfg_vmid_priority_tagging
= 0;
4646 if (lpfc_is_vmid_enabled(phba
)) {
4648 kcalloc(phba
->cfg_max_vmid
, sizeof(struct lpfc_vmid
),
4653 rwlock_init(&vport
->vmid_lock
);
4655 /* Set the VMID parameters for the vport */
4656 vport
->vmid_priority_tagging
= phba
->cfg_vmid_priority_tagging
;
4657 vport
->vmid_inactivity_timeout
=
4658 phba
->cfg_vmid_inactivity_timeout
;
4659 vport
->max_vmid
= phba
->cfg_max_vmid
;
4660 vport
->cur_vmid_cnt
= 0;
4662 vport
->vmid_priority_range
= bitmap_zalloc
4663 (LPFC_VMID_MAX_PRIORITY_RANGE
, GFP_KERNEL
);
4665 if (!vport
->vmid_priority_range
) {
4670 hash_init(vport
->hash_table
);
4676 * lpfc_create_port - Create an FC port
4677 * @phba: pointer to lpfc hba data structure.
4678 * @instance: a unique integer ID to this FC port.
4679 * @dev: pointer to the device data structure.
4681 * This routine creates a FC port for the upper layer protocol. The FC port
4682 * can be created on top of either a physical port or a virtual port provided
4683 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4684 * and associates the FC port created before adding the shost into the SCSI
4688 * @vport - pointer to the virtual N_Port data structure.
4689 * NULL - port create failed.
4692 lpfc_create_port(struct lpfc_hba
*phba
, int instance
, struct device
*dev
)
4694 struct lpfc_vport
*vport
;
4695 struct Scsi_Host
*shost
= NULL
;
4696 struct scsi_host_template
*template;
4700 bool use_no_reset_hba
= false;
4704 if (lpfc_no_hba_reset_cnt
) {
4705 if (phba
->sli_rev
< LPFC_SLI_REV4
&&
4706 dev
== &phba
->pcidev
->dev
) {
4707 /* Reset the port first */
4708 lpfc_sli_brdrestart(phba
);
4709 rc
= lpfc_sli_chipset_init(phba
);
4713 wwn
= lpfc_get_wwpn(phba
);
4716 for (i
= 0; i
< lpfc_no_hba_reset_cnt
; i
++) {
4717 if (wwn
== lpfc_no_hba_reset
[i
]) {
4718 lpfc_printf_log(phba
, KERN_ERR
,
4720 "6020 Setting use_no_reset port=%llx\n",
4722 use_no_reset_hba
= true;
4727 /* Seed template for SCSI host registration */
4728 if (dev
== &phba
->pcidev
->dev
) {
4729 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
) {
4730 /* Seed physical port template */
4731 template = &lpfc_template
;
4733 if (use_no_reset_hba
)
4734 /* template is for a no reset SCSI Host */
4735 template->eh_host_reset_handler
= NULL
;
4737 /* Seed updated value of sg_tablesize */
4738 template->sg_tablesize
= lpfc_get_sg_tablesize(phba
);
4740 /* NVMET is for physical port only */
4741 template = &lpfc_template_nvme
;
4744 /* Seed vport template */
4745 template = &lpfc_vport_template
;
4747 /* Seed updated value of sg_tablesize */
4748 template->sg_tablesize
= lpfc_get_sg_tablesize(phba
);
4751 shost
= scsi_host_alloc(template, sizeof(struct lpfc_vport
));
4755 vport
= (struct lpfc_vport
*) shost
->hostdata
;
4757 set_bit(FC_LOADING
, &vport
->load_flag
);
4758 set_bit(FC_VPORT_NEEDS_REG_VPI
, &vport
->fc_flag
);
4759 vport
->fc_rscn_flush
= 0;
4760 atomic_set(&vport
->fc_plogi_cnt
, 0);
4761 atomic_set(&vport
->fc_adisc_cnt
, 0);
4762 atomic_set(&vport
->fc_reglogin_cnt
, 0);
4763 atomic_set(&vport
->fc_prli_cnt
, 0);
4764 atomic_set(&vport
->fc_unmap_cnt
, 0);
4765 atomic_set(&vport
->fc_map_cnt
, 0);
4766 atomic_set(&vport
->fc_npr_cnt
, 0);
4767 atomic_set(&vport
->fc_unused_cnt
, 0);
4768 lpfc_get_vport_cfgparam(vport
);
4770 /* Adjust value in vport */
4771 vport
->cfg_enable_fc4_type
= phba
->cfg_enable_fc4_type
;
4773 shost
->unique_id
= instance
;
4774 shost
->max_id
= LPFC_MAX_TARGET
;
4775 shost
->max_lun
= vport
->cfg_max_luns
;
4776 shost
->this_id
= -1;
4778 /* Set max_cmd_len applicable to ASIC support */
4779 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4780 if_type
= bf_get(lpfc_sli_intf_if_type
,
4781 &phba
->sli4_hba
.sli_intf
);
4783 case LPFC_SLI_INTF_IF_TYPE_2
:
4785 case LPFC_SLI_INTF_IF_TYPE_6
:
4786 shost
->max_cmd_len
= LPFC_FCP_CDB_LEN_32
;
4789 shost
->max_cmd_len
= LPFC_FCP_CDB_LEN
;
4793 shost
->max_cmd_len
= LPFC_FCP_CDB_LEN
;
4796 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4797 if (!phba
->cfg_fcp_mq_threshold
||
4798 phba
->cfg_fcp_mq_threshold
> phba
->cfg_hdw_queue
)
4799 phba
->cfg_fcp_mq_threshold
= phba
->cfg_hdw_queue
;
4801 shost
->nr_hw_queues
= min_t(int, 2 * num_possible_nodes(),
4802 phba
->cfg_fcp_mq_threshold
);
4804 shost
->dma_boundary
=
4805 phba
->sli4_hba
.pc_sli4_params
.sge_supp_len
-1;
4807 /* SLI-3 has a limited number of hardware queues (3),
4808 * thus there is only one for FCP processing.
4810 shost
->nr_hw_queues
= 1;
4813 * Set initial can_queue value since 0 is no longer supported and
4814 * scsi_add_host will fail. This will be adjusted later based on the
4815 * max xri value determined in hba setup.
4817 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
4818 if (dev
!= &phba
->pcidev
->dev
) {
4819 shost
->transportt
= lpfc_vport_transport_template
;
4820 vport
->port_type
= LPFC_NPIV_PORT
;
4822 shost
->transportt
= lpfc_transport_template
;
4823 vport
->port_type
= LPFC_PHYSICAL_PORT
;
4826 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
4827 "9081 CreatePort TMPLATE type %x TBLsize %d "
4829 vport
->port_type
, shost
->sg_tablesize
,
4830 phba
->cfg_scsi_seg_cnt
, phba
->cfg_sg_seg_cnt
);
4832 /* Allocate the resources for VMID */
4833 rc
= lpfc_vmid_res_alloc(phba
, vport
);
4838 /* Initialize all internally managed lists. */
4839 INIT_LIST_HEAD(&vport
->fc_nodes
);
4840 spin_lock_init(&vport
->fc_nodes_list_lock
);
4841 INIT_LIST_HEAD(&vport
->rcv_buffer_list
);
4842 spin_lock_init(&vport
->work_port_lock
);
4844 timer_setup(&vport
->fc_disctmo
, lpfc_disc_timeout
, 0);
4846 timer_setup(&vport
->els_tmofunc
, lpfc_els_timeout
, 0);
4848 timer_setup(&vport
->delayed_disc_tmo
, lpfc_delayed_disc_tmo
, 0);
4850 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
)
4851 lpfc_setup_bg(phba
, shost
);
4853 error
= scsi_add_host_with_dma(shost
, dev
, &phba
->pcidev
->dev
);
4857 spin_lock_irq(&phba
->port_list_lock
);
4858 list_add_tail(&vport
->listentry
, &phba
->port_list
);
4859 spin_unlock_irq(&phba
->port_list_lock
);
4864 bitmap_free(vport
->vmid_priority_range
);
4866 scsi_host_put(shost
);
4872 * destroy_port - destroy an FC port
4873 * @vport: pointer to an lpfc virtual N_Port data structure.
4875 * This routine destroys a FC port from the upper layer protocol. All the
4876 * resources associated with the port are released.
4879 destroy_port(struct lpfc_vport
*vport
)
4881 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4882 struct lpfc_hba
*phba
= vport
->phba
;
4884 lpfc_debugfs_terminate(vport
);
4885 fc_remove_host(shost
);
4886 scsi_remove_host(shost
);
4888 spin_lock_irq(&phba
->port_list_lock
);
4889 list_del_init(&vport
->listentry
);
4890 spin_unlock_irq(&phba
->port_list_lock
);
4892 lpfc_cleanup(vport
);
4897 * lpfc_get_instance - Get a unique integer ID
4899 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4900 * uses the kernel idr facility to perform the task.
4903 * instance - a unique integer ID allocated as the new instance.
4904 * -1 - lpfc get instance failed.
4907 lpfc_get_instance(void)
4911 ret
= idr_alloc(&lpfc_hba_index
, NULL
, 0, 0, GFP_KERNEL
);
4912 return ret
< 0 ? -1 : ret
;
4916 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4917 * @shost: pointer to SCSI host data structure.
4918 * @time: elapsed time of the scan in jiffies.
4920 * This routine is called by the SCSI layer with a SCSI host to determine
4921 * whether the scan host is finished.
4923 * Note: there is no scan_start function as adapter initialization will have
4924 * asynchronously kicked off the link initialization.
4927 * 0 - SCSI host scan is not over yet.
4928 * 1 - SCSI host scan is over.
4930 int lpfc_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
4932 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
4933 struct lpfc_hba
*phba
= vport
->phba
;
4936 spin_lock_irq(shost
->host_lock
);
4938 if (test_bit(FC_UNLOADING
, &vport
->load_flag
)) {
4942 if (time
>= msecs_to_jiffies(30 * 1000)) {
4943 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4944 "0461 Scanning longer than 30 "
4945 "seconds. Continuing initialization\n");
4949 if (time
>= msecs_to_jiffies(15 * 1000) &&
4950 phba
->link_state
<= LPFC_LINK_DOWN
) {
4951 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4952 "0465 Link down longer than 15 "
4953 "seconds. Continuing initialization\n");
4958 if (vport
->port_state
!= LPFC_VPORT_READY
)
4960 if (vport
->num_disc_nodes
|| vport
->fc_prli_sent
)
4962 if (!atomic_read(&vport
->fc_map_cnt
) &&
4963 time
< msecs_to_jiffies(2 * 1000))
4965 if ((phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) != 0)
4971 spin_unlock_irq(shost
->host_lock
);
4975 static void lpfc_host_supported_speeds_set(struct Scsi_Host
*shost
)
4977 struct lpfc_vport
*vport
= (struct lpfc_vport
*)shost
->hostdata
;
4978 struct lpfc_hba
*phba
= vport
->phba
;
4980 fc_host_supported_speeds(shost
) = 0;
4982 * Avoid reporting supported link speed for FCoE as it can't be
4983 * controlled via FCoE.
4985 if (test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
))
4988 if (phba
->lmt
& LMT_256Gb
)
4989 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_256GBIT
;
4990 if (phba
->lmt
& LMT_128Gb
)
4991 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_128GBIT
;
4992 if (phba
->lmt
& LMT_64Gb
)
4993 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_64GBIT
;
4994 if (phba
->lmt
& LMT_32Gb
)
4995 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_32GBIT
;
4996 if (phba
->lmt
& LMT_16Gb
)
4997 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_16GBIT
;
4998 if (phba
->lmt
& LMT_10Gb
)
4999 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_10GBIT
;
5000 if (phba
->lmt
& LMT_8Gb
)
5001 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_8GBIT
;
5002 if (phba
->lmt
& LMT_4Gb
)
5003 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_4GBIT
;
5004 if (phba
->lmt
& LMT_2Gb
)
5005 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_2GBIT
;
5006 if (phba
->lmt
& LMT_1Gb
)
5007 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_1GBIT
;
5011 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
5012 * @shost: pointer to SCSI host data structure.
5014 * This routine initializes a given SCSI host attributes on a FC port. The
5015 * SCSI host can be either on top of a physical port or a virtual port.
5017 void lpfc_host_attrib_init(struct Scsi_Host
*shost
)
5019 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5020 struct lpfc_hba
*phba
= vport
->phba
;
5022 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
5025 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
5026 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
5027 fc_host_supported_classes(shost
) = FC_COS_CLASS3
;
5029 memset(fc_host_supported_fc4s(shost
), 0,
5030 sizeof(fc_host_supported_fc4s(shost
)));
5031 fc_host_supported_fc4s(shost
)[2] = 1;
5032 fc_host_supported_fc4s(shost
)[7] = 1;
5034 lpfc_vport_symbolic_node_name(vport
, fc_host_symbolic_name(shost
),
5035 sizeof fc_host_symbolic_name(shost
));
5037 lpfc_host_supported_speeds_set(shost
);
5039 fc_host_maxframe_size(shost
) =
5040 (((uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
5041 (uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeLsb
;
5043 fc_host_dev_loss_tmo(shost
) = vport
->cfg_devloss_tmo
;
5045 /* This value is also unchanging */
5046 memset(fc_host_active_fc4s(shost
), 0,
5047 sizeof(fc_host_active_fc4s(shost
)));
5048 fc_host_active_fc4s(shost
)[2] = 1;
5049 fc_host_active_fc4s(shost
)[7] = 1;
5051 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
5052 clear_bit(FC_LOADING
, &vport
->load_flag
);
5056 * lpfc_stop_port_s3 - Stop SLI3 device port
5057 * @phba: pointer to lpfc hba data structure.
5059 * This routine is invoked to stop an SLI3 device port, it stops the device
5060 * from generating interrupts and stops the device driver's timers for the
5064 lpfc_stop_port_s3(struct lpfc_hba
*phba
)
5066 /* Clear all interrupt enable conditions */
5067 writel(0, phba
->HCregaddr
);
5068 readl(phba
->HCregaddr
); /* flush */
5069 /* Clear all pending interrupts */
5070 writel(0xffffffff, phba
->HAregaddr
);
5071 readl(phba
->HAregaddr
); /* flush */
5073 /* Reset some HBA SLI setup states */
5074 lpfc_stop_hba_timers(phba
);
5075 phba
->pport
->work_port_events
= 0;
5079 * lpfc_stop_port_s4 - Stop SLI4 device port
5080 * @phba: pointer to lpfc hba data structure.
5082 * This routine is invoked to stop an SLI4 device port, it stops the device
5083 * from generating interrupts and stops the device driver's timers for the
5087 lpfc_stop_port_s4(struct lpfc_hba
*phba
)
5089 /* Reset some HBA SLI4 setup states */
5090 lpfc_stop_hba_timers(phba
);
5092 phba
->pport
->work_port_events
= 0;
5093 phba
->sli4_hba
.intr_enable
= 0;
5097 * lpfc_stop_port - Wrapper function for stopping hba port
5098 * @phba: Pointer to HBA context object.
5100 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5101 * the API jump table function pointer from the lpfc_hba struct.
5104 lpfc_stop_port(struct lpfc_hba
*phba
)
5106 phba
->lpfc_stop_port(phba
);
5109 flush_workqueue(phba
->wq
);
5113 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5114 * @phba: Pointer to hba for which this call is being executed.
5116 * This routine starts the timer waiting for the FCF rediscovery to complete.
5119 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba
*phba
)
5121 unsigned long fcf_redisc_wait_tmo
=
5122 (jiffies
+ msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO
));
5123 /* Start fcf rediscovery wait period timer */
5124 mod_timer(&phba
->fcf
.redisc_wait
, fcf_redisc_wait_tmo
);
5125 spin_lock_irq(&phba
->hbalock
);
5126 /* Allow action to new fcf asynchronous event */
5127 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
5128 /* Mark the FCF rediscovery pending state */
5129 phba
->fcf
.fcf_flag
|= FCF_REDISC_PEND
;
5130 spin_unlock_irq(&phba
->hbalock
);
5134 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5135 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5137 * This routine is invoked when waiting for FCF table rediscover has been
5138 * timed out. If new FCF record(s) has (have) been discovered during the
5139 * wait period, a new FCF event shall be added to the FCOE async event
5140 * list, and then worker thread shall be waked up for processing from the
5141 * worker thread context.
5144 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list
*t
)
5146 struct lpfc_hba
*phba
= from_timer(phba
, t
, fcf
.redisc_wait
);
5148 /* Don't send FCF rediscovery event if timer cancelled */
5149 spin_lock_irq(&phba
->hbalock
);
5150 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
5151 spin_unlock_irq(&phba
->hbalock
);
5154 /* Clear FCF rediscovery timer pending flag */
5155 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
5156 /* FCF rediscovery event to worker thread */
5157 phba
->fcf
.fcf_flag
|= FCF_REDISC_EVT
;
5158 spin_unlock_irq(&phba
->hbalock
);
5159 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
5160 "2776 FCF rediscover quiescent timer expired\n");
5161 /* wake up worker thread */
5162 lpfc_worker_wake_up(phba
);
5166 * lpfc_vmid_poll - VMID timeout detection
5167 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5169 * This routine is invoked when there is no I/O on by a VM for the specified
5170 * amount of time. When this situation is detected, the VMID has to be
5171 * deregistered from the switch and all the local resources freed. The VMID
5172 * will be reassigned to the VM once the I/O begins.
5175 lpfc_vmid_poll(struct timer_list
*t
)
5177 struct lpfc_hba
*phba
= from_timer(phba
, t
, inactive_vmid_poll
);
5180 /* check if there is a need to issue QFPA */
5181 if (phba
->pport
->vmid_priority_tagging
) {
5183 phba
->pport
->work_port_events
|= WORKER_CHECK_VMID_ISSUE_QFPA
;
5186 /* Is the vmid inactivity timer enabled */
5187 if (phba
->pport
->vmid_inactivity_timeout
||
5188 test_bit(FC_DEREGISTER_ALL_APP_ID
, &phba
->pport
->load_flag
)) {
5190 phba
->pport
->work_port_events
|= WORKER_CHECK_INACTIVE_VMID
;
5194 lpfc_worker_wake_up(phba
);
5196 /* restart the timer for the next iteration */
5197 mod_timer(&phba
->inactive_vmid_poll
, jiffies
+ msecs_to_jiffies(1000 *
5202 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5203 * @phba: pointer to lpfc hba data structure.
5204 * @acqe_link: pointer to the async link completion queue entry.
5206 * This routine is to parse the SLI4 link-attention link fault code.
5209 lpfc_sli4_parse_latt_fault(struct lpfc_hba
*phba
,
5210 struct lpfc_acqe_link
*acqe_link
)
5212 switch (bf_get(lpfc_acqe_fc_la_att_type
, acqe_link
)) {
5213 case LPFC_FC_LA_TYPE_LINK_DOWN
:
5214 case LPFC_FC_LA_TYPE_TRUNKING_EVENT
:
5215 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL
:
5216 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT
:
5219 switch (bf_get(lpfc_acqe_link_fault
, acqe_link
)) {
5220 case LPFC_ASYNC_LINK_FAULT_NONE
:
5221 case LPFC_ASYNC_LINK_FAULT_LOCAL
:
5222 case LPFC_ASYNC_LINK_FAULT_REMOTE
:
5223 case LPFC_ASYNC_LINK_FAULT_LR_LRR
:
5226 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5227 "0398 Unknown link fault code: x%x\n",
5228 bf_get(lpfc_acqe_link_fault
, acqe_link
));
5236 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5237 * @phba: pointer to lpfc hba data structure.
5238 * @acqe_link: pointer to the async link completion queue entry.
5240 * This routine is to parse the SLI4 link attention type and translate it
5241 * into the base driver's link attention type coding.
5243 * Return: Link attention type in terms of base driver's coding.
5246 lpfc_sli4_parse_latt_type(struct lpfc_hba
*phba
,
5247 struct lpfc_acqe_link
*acqe_link
)
5251 switch (bf_get(lpfc_acqe_link_status
, acqe_link
)) {
5252 case LPFC_ASYNC_LINK_STATUS_DOWN
:
5253 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN
:
5254 att_type
= LPFC_ATT_LINK_DOWN
;
5256 case LPFC_ASYNC_LINK_STATUS_UP
:
5257 /* Ignore physical link up events - wait for logical link up */
5258 att_type
= LPFC_ATT_RESERVED
;
5260 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP
:
5261 att_type
= LPFC_ATT_LINK_UP
;
5264 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5265 "0399 Invalid link attention type: x%x\n",
5266 bf_get(lpfc_acqe_link_status
, acqe_link
));
5267 att_type
= LPFC_ATT_RESERVED
;
5274 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5275 * @phba: pointer to lpfc hba data structure.
5277 * This routine is to get an SLI3 FC port's link speed in Mbps.
5279 * Return: link speed in terms of Mbps.
5282 lpfc_sli_port_speed_get(struct lpfc_hba
*phba
)
5284 uint32_t link_speed
;
5286 if (!lpfc_is_link_up(phba
))
5289 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
5290 switch (phba
->fc_linkspeed
) {
5291 case LPFC_LINK_SPEED_1GHZ
:
5294 case LPFC_LINK_SPEED_2GHZ
:
5297 case LPFC_LINK_SPEED_4GHZ
:
5300 case LPFC_LINK_SPEED_8GHZ
:
5303 case LPFC_LINK_SPEED_10GHZ
:
5306 case LPFC_LINK_SPEED_16GHZ
:
5313 if (phba
->sli4_hba
.link_state
.logical_speed
)
5315 phba
->sli4_hba
.link_state
.logical_speed
;
5317 link_speed
= phba
->sli4_hba
.link_state
.speed
;
5323 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5324 * @phba: pointer to lpfc hba data structure.
5325 * @evt_code: asynchronous event code.
5326 * @speed_code: asynchronous event link speed code.
5328 * This routine is to parse the giving SLI4 async event link speed code into
5329 * value of Mbps for the link speed.
5331 * Return: link speed in terms of Mbps.
5334 lpfc_sli4_port_speed_parse(struct lpfc_hba
*phba
, uint32_t evt_code
,
5337 uint32_t port_speed
;
5340 case LPFC_TRAILER_CODE_LINK
:
5341 switch (speed_code
) {
5342 case LPFC_ASYNC_LINK_SPEED_ZERO
:
5345 case LPFC_ASYNC_LINK_SPEED_10MBPS
:
5348 case LPFC_ASYNC_LINK_SPEED_100MBPS
:
5351 case LPFC_ASYNC_LINK_SPEED_1GBPS
:
5354 case LPFC_ASYNC_LINK_SPEED_10GBPS
:
5357 case LPFC_ASYNC_LINK_SPEED_20GBPS
:
5360 case LPFC_ASYNC_LINK_SPEED_25GBPS
:
5363 case LPFC_ASYNC_LINK_SPEED_40GBPS
:
5366 case LPFC_ASYNC_LINK_SPEED_100GBPS
:
5367 port_speed
= 100000;
5373 case LPFC_TRAILER_CODE_FC
:
5374 switch (speed_code
) {
5375 case LPFC_FC_LA_SPEED_UNKNOWN
:
5378 case LPFC_FC_LA_SPEED_1G
:
5381 case LPFC_FC_LA_SPEED_2G
:
5384 case LPFC_FC_LA_SPEED_4G
:
5387 case LPFC_FC_LA_SPEED_8G
:
5390 case LPFC_FC_LA_SPEED_10G
:
5393 case LPFC_FC_LA_SPEED_16G
:
5396 case LPFC_FC_LA_SPEED_32G
:
5399 case LPFC_FC_LA_SPEED_64G
:
5402 case LPFC_FC_LA_SPEED_128G
:
5403 port_speed
= 128000;
5405 case LPFC_FC_LA_SPEED_256G
:
5406 port_speed
= 256000;
5419 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5420 * @phba: pointer to lpfc hba data structure.
5421 * @acqe_link: pointer to the async link completion queue entry.
5423 * This routine is to handle the SLI4 asynchronous FCoE link event.
5426 lpfc_sli4_async_link_evt(struct lpfc_hba
*phba
,
5427 struct lpfc_acqe_link
*acqe_link
)
5431 struct lpfc_mbx_read_top
*la
;
5435 att_type
= lpfc_sli4_parse_latt_type(phba
, acqe_link
);
5436 if (att_type
!= LPFC_ATT_LINK_DOWN
&& att_type
!= LPFC_ATT_LINK_UP
)
5438 phba
->fcoe_eventtag
= acqe_link
->event_tag
;
5439 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5441 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5442 "0395 The mboxq allocation failed\n");
5446 rc
= lpfc_mbox_rsrc_prep(phba
, pmb
);
5448 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5449 "0396 mailbox allocation failed\n");
5453 /* Cleanup any outstanding ELS commands */
5454 lpfc_els_flush_all_cmd(phba
);
5456 /* Block ELS IOCBs until we have done process link event */
5457 phba
->sli4_hba
.els_wq
->pring
->flag
|= LPFC_STOP_IOCB_EVENT
;
5459 /* Update link event statistics */
5460 phba
->sli
.slistat
.link_event
++;
5462 /* Create lpfc_handle_latt mailbox command from link ACQE */
5463 lpfc_read_topology(phba
, pmb
, pmb
->ctx_buf
);
5464 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
5465 pmb
->vport
= phba
->pport
;
5467 /* Keep the link status for extra SLI4 state machine reference */
5468 phba
->sli4_hba
.link_state
.speed
=
5469 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_LINK
,
5470 bf_get(lpfc_acqe_link_speed
, acqe_link
));
5471 phba
->sli4_hba
.link_state
.duplex
=
5472 bf_get(lpfc_acqe_link_duplex
, acqe_link
);
5473 phba
->sli4_hba
.link_state
.status
=
5474 bf_get(lpfc_acqe_link_status
, acqe_link
);
5475 phba
->sli4_hba
.link_state
.type
=
5476 bf_get(lpfc_acqe_link_type
, acqe_link
);
5477 phba
->sli4_hba
.link_state
.number
=
5478 bf_get(lpfc_acqe_link_number
, acqe_link
);
5479 phba
->sli4_hba
.link_state
.fault
=
5480 bf_get(lpfc_acqe_link_fault
, acqe_link
);
5481 phba
->sli4_hba
.link_state
.logical_speed
=
5482 bf_get(lpfc_acqe_logical_link_speed
, acqe_link
) * 10;
5484 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5485 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5486 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5487 "Logical speed:%dMbps Fault:%d\n",
5488 phba
->sli4_hba
.link_state
.speed
,
5489 phba
->sli4_hba
.link_state
.topology
,
5490 phba
->sli4_hba
.link_state
.status
,
5491 phba
->sli4_hba
.link_state
.type
,
5492 phba
->sli4_hba
.link_state
.number
,
5493 phba
->sli4_hba
.link_state
.logical_speed
,
5494 phba
->sli4_hba
.link_state
.fault
);
5496 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5497 * topology info. Note: Optional for non FC-AL ports.
5499 if (!test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
)) {
5500 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
5501 if (rc
== MBX_NOT_FINISHED
)
5506 * For FCoE Mode: fill in all the topology information we need and call
5507 * the READ_TOPOLOGY completion routine to continue without actually
5508 * sending the READ_TOPOLOGY mailbox command to the port.
5510 /* Initialize completion status */
5512 mb
->mbxStatus
= MBX_SUCCESS
;
5514 /* Parse port fault information field */
5515 lpfc_sli4_parse_latt_fault(phba
, acqe_link
);
5517 /* Parse and translate link attention fields */
5518 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
5519 la
->eventTag
= acqe_link
->event_tag
;
5520 bf_set(lpfc_mbx_read_top_att_type
, la
, att_type
);
5521 bf_set(lpfc_mbx_read_top_link_spd
, la
,
5522 (bf_get(lpfc_acqe_link_speed
, acqe_link
)));
5524 /* Fake the following irrelevant fields */
5525 bf_set(lpfc_mbx_read_top_topology
, la
, LPFC_TOPOLOGY_PT_PT
);
5526 bf_set(lpfc_mbx_read_top_alpa_granted
, la
, 0);
5527 bf_set(lpfc_mbx_read_top_il
, la
, 0);
5528 bf_set(lpfc_mbx_read_top_pb
, la
, 0);
5529 bf_set(lpfc_mbx_read_top_fa
, la
, 0);
5530 bf_set(lpfc_mbx_read_top_mm
, la
, 0);
5532 /* Invoke the lpfc_handle_latt mailbox command callback function */
5533 lpfc_mbx_cmpl_read_topology(phba
, pmb
);
5538 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
5542 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5544 * @phba: pointer to lpfc hba data structure.
5545 * @speed_code: asynchronous event link speed code.
5547 * This routine is to parse the giving SLI4 async event link speed code into
5548 * value of Read topology link speed.
5550 * Return: link speed in terms of Read topology.
5553 lpfc_async_link_speed_to_read_top(struct lpfc_hba
*phba
, uint8_t speed_code
)
5557 switch (speed_code
) {
5558 case LPFC_FC_LA_SPEED_1G
:
5559 port_speed
= LPFC_LINK_SPEED_1GHZ
;
5561 case LPFC_FC_LA_SPEED_2G
:
5562 port_speed
= LPFC_LINK_SPEED_2GHZ
;
5564 case LPFC_FC_LA_SPEED_4G
:
5565 port_speed
= LPFC_LINK_SPEED_4GHZ
;
5567 case LPFC_FC_LA_SPEED_8G
:
5568 port_speed
= LPFC_LINK_SPEED_8GHZ
;
5570 case LPFC_FC_LA_SPEED_16G
:
5571 port_speed
= LPFC_LINK_SPEED_16GHZ
;
5573 case LPFC_FC_LA_SPEED_32G
:
5574 port_speed
= LPFC_LINK_SPEED_32GHZ
;
5576 case LPFC_FC_LA_SPEED_64G
:
5577 port_speed
= LPFC_LINK_SPEED_64GHZ
;
5579 case LPFC_FC_LA_SPEED_128G
:
5580 port_speed
= LPFC_LINK_SPEED_128GHZ
;
5582 case LPFC_FC_LA_SPEED_256G
:
5583 port_speed
= LPFC_LINK_SPEED_256GHZ
;
5594 lpfc_cgn_dump_rxmonitor(struct lpfc_hba
*phba
)
5596 if (!phba
->rx_monitor
) {
5597 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
5598 "4411 Rx Monitor Info is empty.\n");
5600 lpfc_rx_monitor_report(phba
, phba
->rx_monitor
, NULL
, 0,
5601 LPFC_MAX_RXMONITOR_DUMP
);
5606 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5607 * @phba: pointer to lpfc hba data structure.
5608 * @dtag: FPIN descriptor received
5610 * Increment the FPIN received counter/time when it happens.
5613 lpfc_cgn_update_stat(struct lpfc_hba
*phba
, uint32_t dtag
)
5615 struct lpfc_cgn_info
*cp
;
5618 /* Make sure we have a congestion info buffer */
5621 cp
= (struct lpfc_cgn_info
*)phba
->cgn_i
->virt
;
5623 /* Update congestion statistics */
5625 case ELS_DTAG_LNK_INTEGRITY
:
5626 le32_add_cpu(&cp
->link_integ_notification
, 1);
5627 lpfc_cgn_update_tstamp(phba
, &cp
->stat_lnk
);
5629 case ELS_DTAG_DELIVERY
:
5630 le32_add_cpu(&cp
->delivery_notification
, 1);
5631 lpfc_cgn_update_tstamp(phba
, &cp
->stat_delivery
);
5633 case ELS_DTAG_PEER_CONGEST
:
5634 le32_add_cpu(&cp
->cgn_peer_notification
, 1);
5635 lpfc_cgn_update_tstamp(phba
, &cp
->stat_peer
);
5637 case ELS_DTAG_CONGESTION
:
5638 le32_add_cpu(&cp
->cgn_notification
, 1);
5639 lpfc_cgn_update_tstamp(phba
, &cp
->stat_fpin
);
5641 if (phba
->cgn_fpin_frequency
&&
5642 phba
->cgn_fpin_frequency
!= LPFC_FPIN_INIT_FREQ
) {
5643 value
= LPFC_CGN_TIMER_TO_MIN
/ phba
->cgn_fpin_frequency
;
5644 cp
->cgn_stat_npm
= value
;
5647 value
= lpfc_cgn_calc_crc32(cp
, LPFC_CGN_INFO_SZ
,
5648 LPFC_CGN_CRC32_SEED
);
5649 cp
->cgn_info_crc
= cpu_to_le32(value
);
5653 * lpfc_cgn_update_tstamp - Update cmf timestamp
5654 * @phba: pointer to lpfc hba data structure.
5655 * @ts: structure to write the timestamp to.
5658 lpfc_cgn_update_tstamp(struct lpfc_hba
*phba
, struct lpfc_cgn_ts
*ts
)
5660 struct timespec64 cur_time
;
5663 ktime_get_real_ts64(&cur_time
);
5664 time64_to_tm(cur_time
.tv_sec
, 0, &tm_val
);
5666 ts
->month
= tm_val
.tm_mon
+ 1;
5667 ts
->day
= tm_val
.tm_mday
;
5668 ts
->year
= tm_val
.tm_year
- 100;
5669 ts
->hour
= tm_val
.tm_hour
;
5670 ts
->minute
= tm_val
.tm_min
;
5671 ts
->second
= tm_val
.tm_sec
;
5673 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
5674 "2646 Updated CMF timestamp : "
5675 "%u/%u/%u %u:%u:%u\n",
5678 ts
->minute
, ts
->second
);
5682 * lpfc_cmf_stats_timer - Save data into registered congestion buffer
5683 * @timer: Timer cookie to access lpfc private data
5685 * Save the congestion event data every minute.
5686 * On the hour collapse all the minute data into hour data. Every day
5687 * collapse all the hour data into daily data. Separate driver
5688 * and fabrc congestion event counters that will be saved out
5689 * to the registered congestion buffer every minute.
5691 static enum hrtimer_restart
5692 lpfc_cmf_stats_timer(struct hrtimer
*timer
)
5694 struct lpfc_hba
*phba
;
5695 struct lpfc_cgn_info
*cp
;
5697 uint16_t value
, mvalue
;
5700 uint32_t dvalue
, wvalue
, lvalue
, avalue
;
5706 phba
= container_of(timer
, struct lpfc_hba
, cmf_stats_timer
);
5707 /* Make sure we have a congestion info buffer */
5709 return HRTIMER_NORESTART
;
5710 cp
= (struct lpfc_cgn_info
*)phba
->cgn_i
->virt
;
5712 phba
->cgn_evt_timestamp
= jiffies
+
5713 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN
);
5714 phba
->cgn_evt_minute
++;
5716 /* We should get to this point in the routine on 1 minute intervals */
5717 lpfc_cgn_update_tstamp(phba
, &cp
->base_time
);
5719 if (phba
->cgn_fpin_frequency
&&
5720 phba
->cgn_fpin_frequency
!= LPFC_FPIN_INIT_FREQ
) {
5721 value
= LPFC_CGN_TIMER_TO_MIN
/ phba
->cgn_fpin_frequency
;
5722 cp
->cgn_stat_npm
= value
;
5725 /* Read and clear the latency counters for this minute */
5726 lvalue
= atomic_read(&phba
->cgn_latency_evt_cnt
);
5727 latsum
= atomic64_read(&phba
->cgn_latency_evt
);
5728 atomic_set(&phba
->cgn_latency_evt_cnt
, 0);
5729 atomic64_set(&phba
->cgn_latency_evt
, 0);
5731 /* We need to store MB/sec bandwidth in the congestion information.
5732 * block_cnt is count of 512 byte blocks for the entire minute,
5733 * bps will get bytes per sec before finally converting to MB/sec.
5735 bps
= div_u64(phba
->rx_block_cnt
, LPFC_SEC_MIN
) * 512;
5736 phba
->rx_block_cnt
= 0;
5737 mvalue
= bps
/ (1024 * 1024); /* convert to MB/sec */
5740 /* cgn parameters */
5741 cp
->cgn_info_mode
= phba
->cgn_p
.cgn_param_mode
;
5742 cp
->cgn_info_level0
= phba
->cgn_p
.cgn_param_level0
;
5743 cp
->cgn_info_level1
= phba
->cgn_p
.cgn_param_level1
;
5744 cp
->cgn_info_level2
= phba
->cgn_p
.cgn_param_level2
;
5746 /* Fill in default LUN qdepth */
5747 value
= (uint16_t)(phba
->pport
->cfg_lun_queue_depth
);
5748 cp
->cgn_lunq
= cpu_to_le16(value
);
5750 /* Record congestion buffer info - every minute
5751 * cgn_driver_evt_cnt (Driver events)
5752 * cgn_fabric_warn_cnt (Congestion Warnings)
5753 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5754 * cgn_fabric_alarm_cnt (Congestion Alarms)
5756 index
= ++cp
->cgn_index_minute
;
5757 if (cp
->cgn_index_minute
== LPFC_MIN_HOUR
) {
5758 cp
->cgn_index_minute
= 0;
5762 /* Get the number of driver events in this sample and reset counter */
5763 dvalue
= atomic_read(&phba
->cgn_driver_evt_cnt
);
5764 atomic_set(&phba
->cgn_driver_evt_cnt
, 0);
5766 /* Get the number of warning events - FPIN and Signal for this minute */
5768 if ((phba
->cgn_reg_fpin
& LPFC_CGN_FPIN_WARN
) ||
5769 phba
->cgn_reg_signal
== EDC_CG_SIG_WARN_ONLY
||
5770 phba
->cgn_reg_signal
== EDC_CG_SIG_WARN_ALARM
)
5771 wvalue
= atomic_read(&phba
->cgn_fabric_warn_cnt
);
5772 atomic_set(&phba
->cgn_fabric_warn_cnt
, 0);
5774 /* Get the number of alarm events - FPIN and Signal for this minute */
5776 if ((phba
->cgn_reg_fpin
& LPFC_CGN_FPIN_ALARM
) ||
5777 phba
->cgn_reg_signal
== EDC_CG_SIG_WARN_ALARM
)
5778 avalue
= atomic_read(&phba
->cgn_fabric_alarm_cnt
);
5779 atomic_set(&phba
->cgn_fabric_alarm_cnt
, 0);
5781 /* Collect the driver, warning, alarm and latency counts for this
5782 * minute into the driver congestion buffer.
5784 ptr
= &cp
->cgn_drvr_min
[index
];
5785 value
= (uint16_t)dvalue
;
5786 *ptr
= cpu_to_le16(value
);
5788 ptr
= &cp
->cgn_warn_min
[index
];
5789 value
= (uint16_t)wvalue
;
5790 *ptr
= cpu_to_le16(value
);
5792 ptr
= &cp
->cgn_alarm_min
[index
];
5793 value
= (uint16_t)avalue
;
5794 *ptr
= cpu_to_le16(value
);
5796 lptr
= &cp
->cgn_latency_min
[index
];
5798 lvalue
= (uint32_t)div_u64(latsum
, lvalue
);
5799 *lptr
= cpu_to_le32(lvalue
);
5804 /* Collect the bandwidth value into the driver's congesion buffer. */
5805 mptr
= &cp
->cgn_bw_min
[index
];
5806 *mptr
= cpu_to_le16(mvalue
);
5808 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
5809 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5810 index
, dvalue
, wvalue
, *lptr
, mvalue
, avalue
);
5813 if ((phba
->cgn_evt_minute
% LPFC_MIN_HOUR
) == 0) {
5814 /* Record congestion buffer info - every hour
5815 * Collapse all minutes into an hour
5817 index
= ++cp
->cgn_index_hour
;
5818 if (cp
->cgn_index_hour
== LPFC_HOUR_DAY
) {
5819 cp
->cgn_index_hour
= 0;
5829 for (i
= 0; i
< LPFC_MIN_HOUR
; i
++) {
5830 dvalue
+= le16_to_cpu(cp
->cgn_drvr_min
[i
]);
5831 wvalue
+= le16_to_cpu(cp
->cgn_warn_min
[i
]);
5832 lvalue
+= le32_to_cpu(cp
->cgn_latency_min
[i
]);
5833 mbps
+= le16_to_cpu(cp
->cgn_bw_min
[i
]);
5834 avalue
+= le16_to_cpu(cp
->cgn_alarm_min
[i
]);
5836 if (lvalue
) /* Avg of latency averages */
5837 lvalue
/= LPFC_MIN_HOUR
;
5838 if (mbps
) /* Avg of Bandwidth averages */
5839 mvalue
= mbps
/ LPFC_MIN_HOUR
;
5841 lptr
= &cp
->cgn_drvr_hr
[index
];
5842 *lptr
= cpu_to_le32(dvalue
);
5843 lptr
= &cp
->cgn_warn_hr
[index
];
5844 *lptr
= cpu_to_le32(wvalue
);
5845 lptr
= &cp
->cgn_latency_hr
[index
];
5846 *lptr
= cpu_to_le32(lvalue
);
5847 mptr
= &cp
->cgn_bw_hr
[index
];
5848 *mptr
= cpu_to_le16(mvalue
);
5849 lptr
= &cp
->cgn_alarm_hr
[index
];
5850 *lptr
= cpu_to_le32(avalue
);
5852 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
5853 "2419 Congestion Info - hour "
5854 "(%d): %d %d %d %d %d\n",
5855 index
, dvalue
, wvalue
, lvalue
, mvalue
, avalue
);
5859 if ((phba
->cgn_evt_minute
% LPFC_MIN_DAY
) == 0) {
5860 /* Record congestion buffer info - every hour
5861 * Collapse all hours into a day. Rotate days
5862 * after LPFC_MAX_CGN_DAYS.
5864 index
= ++cp
->cgn_index_day
;
5865 if (cp
->cgn_index_day
== LPFC_MAX_CGN_DAYS
) {
5866 cp
->cgn_index_day
= 0;
5876 for (i
= 0; i
< LPFC_HOUR_DAY
; i
++) {
5877 dvalue
+= le32_to_cpu(cp
->cgn_drvr_hr
[i
]);
5878 wvalue
+= le32_to_cpu(cp
->cgn_warn_hr
[i
]);
5879 lvalue
+= le32_to_cpu(cp
->cgn_latency_hr
[i
]);
5880 mbps
+= le16_to_cpu(cp
->cgn_bw_hr
[i
]);
5881 avalue
+= le32_to_cpu(cp
->cgn_alarm_hr
[i
]);
5883 if (lvalue
) /* Avg of latency averages */
5884 lvalue
/= LPFC_HOUR_DAY
;
5885 if (mbps
) /* Avg of Bandwidth averages */
5886 mvalue
= mbps
/ LPFC_HOUR_DAY
;
5888 lptr
= &cp
->cgn_drvr_day
[index
];
5889 *lptr
= cpu_to_le32(dvalue
);
5890 lptr
= &cp
->cgn_warn_day
[index
];
5891 *lptr
= cpu_to_le32(wvalue
);
5892 lptr
= &cp
->cgn_latency_day
[index
];
5893 *lptr
= cpu_to_le32(lvalue
);
5894 mptr
= &cp
->cgn_bw_day
[index
];
5895 *mptr
= cpu_to_le16(mvalue
);
5896 lptr
= &cp
->cgn_alarm_day
[index
];
5897 *lptr
= cpu_to_le32(avalue
);
5899 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
5900 "2420 Congestion Info - daily (%d): "
5902 index
, dvalue
, wvalue
, lvalue
, mvalue
, avalue
);
5905 /* Use the frequency found in the last rcv'ed FPIN */
5906 value
= phba
->cgn_fpin_frequency
;
5907 cp
->cgn_warn_freq
= cpu_to_le16(value
);
5908 cp
->cgn_alarm_freq
= cpu_to_le16(value
);
5910 lvalue
= lpfc_cgn_calc_crc32(cp
, LPFC_CGN_INFO_SZ
,
5911 LPFC_CGN_CRC32_SEED
);
5912 cp
->cgn_info_crc
= cpu_to_le32(lvalue
);
5914 hrtimer_forward_now(timer
, ktime_set(0, LPFC_SEC_MIN
* NSEC_PER_SEC
));
5916 return HRTIMER_RESTART
;
5920 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5921 * @phba: The Hba for which this call is being executed.
5923 * The routine calculates the latency from the beginning of the CMF timer
5924 * interval to the current point in time. It is called from IO completion
5925 * when we exceed our Bandwidth limitation for the time interval.
5928 lpfc_calc_cmf_latency(struct lpfc_hba
*phba
)
5930 struct timespec64 cmpl_time
;
5933 ktime_get_real_ts64(&cmpl_time
);
5935 /* This routine works on a ms granularity so sec and usec are
5936 * converted accordingly.
5938 if (cmpl_time
.tv_sec
== phba
->cmf_latency
.tv_sec
) {
5939 msec
= (cmpl_time
.tv_nsec
- phba
->cmf_latency
.tv_nsec
) /
5942 if (cmpl_time
.tv_nsec
>= phba
->cmf_latency
.tv_nsec
) {
5943 msec
= (cmpl_time
.tv_sec
-
5944 phba
->cmf_latency
.tv_sec
) * MSEC_PER_SEC
;
5945 msec
+= ((cmpl_time
.tv_nsec
-
5946 phba
->cmf_latency
.tv_nsec
) / NSEC_PER_MSEC
);
5948 msec
= (cmpl_time
.tv_sec
- phba
->cmf_latency
.tv_sec
-
5950 msec
+= (((NSEC_PER_SEC
- phba
->cmf_latency
.tv_nsec
) +
5951 cmpl_time
.tv_nsec
) / NSEC_PER_MSEC
);
5958 * lpfc_cmf_timer - This is the timer function for one congestion
5960 * @timer: Pointer to the high resolution timer that expired
5962 static enum hrtimer_restart
5963 lpfc_cmf_timer(struct hrtimer
*timer
)
5965 struct lpfc_hba
*phba
= container_of(timer
, struct lpfc_hba
,
5967 struct rx_info_entry entry
;
5969 uint32_t busy
, max_read
;
5970 uint64_t total
, rcv
, lat
, mbpi
, extra
, cnt
;
5971 int timer_interval
= LPFC_CMF_INTERVAL
;
5973 struct lpfc_cgn_stat
*cgs
;
5976 /* Only restart the timer if congestion mgmt is on */
5977 if (phba
->cmf_active_mode
== LPFC_CFG_OFF
||
5978 !phba
->cmf_latency
.tv_sec
) {
5979 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
5980 "6224 CMF timer exit: %d %lld\n",
5981 phba
->cmf_active_mode
,
5982 (uint64_t)phba
->cmf_latency
.tv_sec
);
5983 return HRTIMER_NORESTART
;
5986 /* If pport is not ready yet, just exit and wait for
5987 * the next timer cycle to hit.
5992 /* Do not block SCSI IO while in the timer routine since
5993 * total_bytes will be cleared
5995 atomic_set(&phba
->cmf_stop_io
, 1);
5997 /* First we need to calculate the actual ms between
5998 * the last timer interrupt and this one. We ask for
5999 * LPFC_CMF_INTERVAL, however the actual time may
6000 * vary depending on system overhead.
6002 ms
= lpfc_calc_cmf_latency(phba
);
6005 /* Immediately after we calculate the time since the last
6006 * timer interrupt, set the start time for the next
6009 ktime_get_real_ts64(&phba
->cmf_latency
);
6011 phba
->cmf_link_byte_count
=
6012 div_u64(phba
->cmf_max_line_rate
* LPFC_CMF_INTERVAL
, 1000);
6014 /* Collect all the stats from the prior timer interval */
6019 for_each_present_cpu(cpu
) {
6020 cgs
= per_cpu_ptr(phba
->cmf_stat
, cpu
);
6021 total
+= atomic64_xchg(&cgs
->total_bytes
, 0);
6022 io_cnt
+= atomic_xchg(&cgs
->rx_io_cnt
, 0);
6023 lat
+= atomic64_xchg(&cgs
->rx_latency
, 0);
6024 rcv
+= atomic64_xchg(&cgs
->rcv_bytes
, 0);
6027 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6028 * returned from the last CMF_SYNC_WQE issued, from
6029 * cmf_last_sync_bw. This will be the target BW for
6030 * this next timer interval.
6032 if (phba
->cmf_active_mode
== LPFC_CFG_MANAGED
&&
6033 phba
->link_state
!= LPFC_LINK_DOWN
&&
6034 test_bit(HBA_SETUP
, &phba
->hba_flag
)) {
6035 mbpi
= phba
->cmf_last_sync_bw
;
6036 phba
->cmf_last_sync_bw
= 0;
6039 /* Calculate any extra bytes needed to account for the
6040 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6041 * calculate the adjustment needed for total to reflect
6042 * a full LPFC_CMF_INTERVAL.
6044 if (ms
&& ms
< LPFC_CMF_INTERVAL
) {
6045 cnt
= div_u64(total
, ms
); /* bytes per ms */
6046 cnt
*= LPFC_CMF_INTERVAL
; /* what total should be */
6047 extra
= cnt
- total
;
6049 lpfc_issue_cmf_sync_wqe(phba
, LPFC_CMF_INTERVAL
, total
+ extra
);
6051 /* For Monitor mode or link down we want mbpi
6052 * to be the full link speed
6054 mbpi
= phba
->cmf_link_byte_count
;
6057 phba
->cmf_timer_cnt
++;
6060 /* Update congestion info buffer latency in us */
6061 atomic_add(io_cnt
, &phba
->cgn_latency_evt_cnt
);
6062 atomic64_add(lat
, &phba
->cgn_latency_evt
);
6064 busy
= atomic_xchg(&phba
->cmf_busy
, 0);
6065 max_read
= atomic_xchg(&phba
->rx_max_read_cnt
, 0);
6067 /* Calculate MBPI for the next timer interval */
6069 if (mbpi
> phba
->cmf_link_byte_count
||
6070 phba
->cmf_active_mode
== LPFC_CFG_MONITOR
)
6071 mbpi
= phba
->cmf_link_byte_count
;
6073 /* Change max_bytes_per_interval to what the prior
6074 * CMF_SYNC_WQE cmpl indicated.
6076 if (mbpi
!= phba
->cmf_max_bytes_per_interval
)
6077 phba
->cmf_max_bytes_per_interval
= mbpi
;
6080 /* Save rxmonitor information for debug */
6081 if (phba
->rx_monitor
) {
6082 entry
.total_bytes
= total
;
6083 entry
.cmf_bytes
= total
+ extra
;
6084 entry
.rcv_bytes
= rcv
;
6085 entry
.cmf_busy
= busy
;
6086 entry
.cmf_info
= phba
->cmf_active_info
;
6088 entry
.avg_io_latency
= div_u64(lat
, io_cnt
);
6089 entry
.avg_io_size
= div_u64(rcv
, io_cnt
);
6091 entry
.avg_io_latency
= 0;
6092 entry
.avg_io_size
= 0;
6094 entry
.max_read_cnt
= max_read
;
6095 entry
.io_cnt
= io_cnt
;
6096 entry
.max_bytes_per_interval
= mbpi
;
6097 if (phba
->cmf_active_mode
== LPFC_CFG_MANAGED
)
6098 entry
.timer_utilization
= phba
->cmf_last_ts
;
6100 entry
.timer_utilization
= ms
;
6101 entry
.timer_interval
= ms
;
6102 phba
->cmf_last_ts
= 0;
6104 lpfc_rx_monitor_record(phba
->rx_monitor
, &entry
);
6107 if (phba
->cmf_active_mode
== LPFC_CFG_MONITOR
) {
6108 /* If Monitor mode, check if we are oversubscribed
6109 * against the full line rate.
6111 if (mbpi
&& total
> mbpi
)
6112 atomic_inc(&phba
->cgn_driver_evt_cnt
);
6114 phba
->rx_block_cnt
+= div_u64(rcv
, 512); /* save 512 byte block cnt */
6116 /* Since total_bytes has already been zero'ed, its okay to unblock
6117 * after max_bytes_per_interval is setup.
6119 if (atomic_xchg(&phba
->cmf_bw_wait
, 0))
6120 queue_work(phba
->wq
, &phba
->unblock_request_work
);
6122 /* SCSI IO is now unblocked */
6123 atomic_set(&phba
->cmf_stop_io
, 0);
6126 hrtimer_forward_now(timer
,
6127 ktime_set(0, timer_interval
* NSEC_PER_MSEC
));
6128 return HRTIMER_RESTART
;
6131 #define trunk_link_status(__idx)\
6132 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6133 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6134 "Link up" : "Link down") : "NA"
6135 /* Did port __idx reported an error */
6136 #define trunk_port_fault(__idx)\
6137 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6138 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6141 lpfc_update_trunk_link_status(struct lpfc_hba
*phba
,
6142 struct lpfc_acqe_fc_la
*acqe_fc
)
6144 uint8_t port_fault
= bf_get(lpfc_acqe_fc_la_trunk_linkmask
, acqe_fc
);
6145 uint8_t err
= bf_get(lpfc_acqe_fc_la_trunk_fault
, acqe_fc
);
6148 phba
->sli4_hba
.link_state
.speed
=
6149 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_FC
,
6150 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
6152 phba
->sli4_hba
.link_state
.logical_speed
=
6153 bf_get(lpfc_acqe_fc_la_llink_spd
, acqe_fc
) * 10;
6154 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6155 phba
->fc_linkspeed
=
6156 lpfc_async_link_speed_to_read_top(
6158 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
6160 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0
, acqe_fc
)) {
6161 phba
->trunk_link
.link0
.state
=
6162 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0
, acqe_fc
)
6163 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
6164 phba
->trunk_link
.link0
.fault
= port_fault
& 0x1 ? err
: 0;
6167 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1
, acqe_fc
)) {
6168 phba
->trunk_link
.link1
.state
=
6169 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1
, acqe_fc
)
6170 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
6171 phba
->trunk_link
.link1
.fault
= port_fault
& 0x2 ? err
: 0;
6174 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2
, acqe_fc
)) {
6175 phba
->trunk_link
.link2
.state
=
6176 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2
, acqe_fc
)
6177 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
6178 phba
->trunk_link
.link2
.fault
= port_fault
& 0x4 ? err
: 0;
6181 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3
, acqe_fc
)) {
6182 phba
->trunk_link
.link3
.state
=
6183 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3
, acqe_fc
)
6184 ? LPFC_LINK_UP
: LPFC_LINK_DOWN
;
6185 phba
->trunk_link
.link3
.fault
= port_fault
& 0x8 ? err
: 0;
6190 phba
->trunk_link
.phy_lnk_speed
=
6191 phba
->sli4_hba
.link_state
.logical_speed
/ (cnt
* 1000);
6193 phba
->trunk_link
.phy_lnk_speed
= LPFC_LINK_SPEED_UNKNOWN
;
6195 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6196 "2910 Async FC Trunking Event - Speed:%d\n"
6197 "\tLogical speed:%d "
6198 "port0: %s port1: %s port2: %s port3: %s\n",
6199 phba
->sli4_hba
.link_state
.speed
,
6200 phba
->sli4_hba
.link_state
.logical_speed
,
6201 trunk_link_status(0), trunk_link_status(1),
6202 trunk_link_status(2), trunk_link_status(3));
6204 if (phba
->cmf_active_mode
!= LPFC_CFG_OFF
)
6205 lpfc_cmf_signal_init(phba
);
6208 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6209 "3202 trunk error:0x%x (%s) seen on port0:%s "
6211 * SLI-4: We have only 0xA error codes
6212 * defined as of now. print an appropriate
6213 * message in case driver needs to be updated.
6215 "port1:%s port2:%s port3:%s\n", err
, err
> 0xA ?
6216 "UNDEFINED. update driver." : trunk_errmsg
[err
],
6217 trunk_port_fault(0), trunk_port_fault(1),
6218 trunk_port_fault(2), trunk_port_fault(3));
6223 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6224 * @phba: pointer to lpfc hba data structure.
6225 * @acqe_fc: pointer to the async fc completion queue entry.
6227 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6228 * that the event was received and then issue a read_topology mailbox command so
6229 * that the rest of the driver will treat it the same as SLI3.
6232 lpfc_sli4_async_fc_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_fc_la
*acqe_fc
)
6236 struct lpfc_mbx_read_top
*la
;
6240 if (bf_get(lpfc_trailer_type
, acqe_fc
) !=
6241 LPFC_FC_LA_EVENT_TYPE_FC_LINK
) {
6242 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6243 "2895 Non FC link Event detected.(%d)\n",
6244 bf_get(lpfc_trailer_type
, acqe_fc
));
6248 if (bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
) ==
6249 LPFC_FC_LA_TYPE_TRUNKING_EVENT
) {
6250 lpfc_update_trunk_link_status(phba
, acqe_fc
);
6254 /* Keep the link status for extra SLI4 state machine reference */
6255 phba
->sli4_hba
.link_state
.speed
=
6256 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_FC
,
6257 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
6258 phba
->sli4_hba
.link_state
.duplex
= LPFC_ASYNC_LINK_DUPLEX_FULL
;
6259 phba
->sli4_hba
.link_state
.topology
=
6260 bf_get(lpfc_acqe_fc_la_topology
, acqe_fc
);
6261 phba
->sli4_hba
.link_state
.status
=
6262 bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
);
6263 phba
->sli4_hba
.link_state
.type
=
6264 bf_get(lpfc_acqe_fc_la_port_type
, acqe_fc
);
6265 phba
->sli4_hba
.link_state
.number
=
6266 bf_get(lpfc_acqe_fc_la_port_number
, acqe_fc
);
6267 phba
->sli4_hba
.link_state
.fault
=
6268 bf_get(lpfc_acqe_link_fault
, acqe_fc
);
6269 phba
->sli4_hba
.link_state
.link_status
=
6270 bf_get(lpfc_acqe_fc_la_link_status
, acqe_fc
);
6273 * Only select attention types need logical speed modification to what
6274 * was previously set.
6276 if (phba
->sli4_hba
.link_state
.status
>= LPFC_FC_LA_TYPE_LINK_UP
&&
6277 phba
->sli4_hba
.link_state
.status
< LPFC_FC_LA_TYPE_ACTIVATE_FAIL
) {
6278 if (bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
) ==
6279 LPFC_FC_LA_TYPE_LINK_DOWN
)
6280 phba
->sli4_hba
.link_state
.logical_speed
= 0;
6281 else if (!phba
->sli4_hba
.conf_trunk
)
6282 phba
->sli4_hba
.link_state
.logical_speed
=
6283 bf_get(lpfc_acqe_fc_la_llink_spd
, acqe_fc
) * 10;
6286 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6287 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6288 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6289 "%dMbps Fault:x%x Link Status:x%x\n",
6290 phba
->sli4_hba
.link_state
.speed
,
6291 phba
->sli4_hba
.link_state
.topology
,
6292 phba
->sli4_hba
.link_state
.status
,
6293 phba
->sli4_hba
.link_state
.type
,
6294 phba
->sli4_hba
.link_state
.number
,
6295 phba
->sli4_hba
.link_state
.logical_speed
,
6296 phba
->sli4_hba
.link_state
.fault
,
6297 phba
->sli4_hba
.link_state
.link_status
);
6300 * The following attention types are informational only, providing
6301 * further details about link status. Overwrite the value of
6302 * link_state.status appropriately. No further action is required.
6304 if (phba
->sli4_hba
.link_state
.status
>= LPFC_FC_LA_TYPE_ACTIVATE_FAIL
) {
6305 switch (phba
->sli4_hba
.link_state
.status
) {
6306 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL
:
6307 log_level
= KERN_WARNING
;
6308 phba
->sli4_hba
.link_state
.status
=
6309 LPFC_FC_LA_TYPE_LINK_DOWN
;
6311 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT
:
6313 * During bb credit recovery establishment, receiving
6314 * this attention type is normal. Link Up attention
6315 * type is expected to occur before this informational
6316 * attention type so keep the Link Up status.
6318 log_level
= KERN_INFO
;
6319 phba
->sli4_hba
.link_state
.status
=
6320 LPFC_FC_LA_TYPE_LINK_UP
;
6323 log_level
= KERN_INFO
;
6326 lpfc_log_msg(phba
, log_level
, LOG_SLI
,
6327 "2992 Async FC event - Informational Link "
6328 "Attention Type x%x\n",
6329 bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
));
6333 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6335 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6336 "2897 The mboxq allocation failed\n");
6339 rc
= lpfc_mbox_rsrc_prep(phba
, pmb
);
6341 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6342 "2898 The mboxq prep failed\n");
6346 /* Cleanup any outstanding ELS commands */
6347 lpfc_els_flush_all_cmd(phba
);
6349 /* Block ELS IOCBs until we have done process link event */
6350 phba
->sli4_hba
.els_wq
->pring
->flag
|= LPFC_STOP_IOCB_EVENT
;
6352 /* Update link event statistics */
6353 phba
->sli
.slistat
.link_event
++;
6355 /* Create lpfc_handle_latt mailbox command from link ACQE */
6356 lpfc_read_topology(phba
, pmb
, pmb
->ctx_buf
);
6357 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
6358 pmb
->vport
= phba
->pport
;
6360 if (phba
->sli4_hba
.link_state
.status
!= LPFC_FC_LA_TYPE_LINK_UP
) {
6361 phba
->link_flag
&= ~(LS_MDS_LINK_DOWN
| LS_MDS_LOOPBACK
);
6363 switch (phba
->sli4_hba
.link_state
.status
) {
6364 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN
:
6365 phba
->link_flag
|= LS_MDS_LINK_DOWN
;
6367 case LPFC_FC_LA_TYPE_MDS_LOOPBACK
:
6368 phba
->link_flag
|= LS_MDS_LOOPBACK
;
6374 /* Initialize completion status */
6376 mb
->mbxStatus
= MBX_SUCCESS
;
6378 /* Parse port fault information field */
6379 lpfc_sli4_parse_latt_fault(phba
, (void *)acqe_fc
);
6381 /* Parse and translate link attention fields */
6382 la
= (struct lpfc_mbx_read_top
*)&pmb
->u
.mb
.un
.varReadTop
;
6383 la
->eventTag
= acqe_fc
->event_tag
;
6385 if (phba
->sli4_hba
.link_state
.status
==
6386 LPFC_FC_LA_TYPE_UNEXP_WWPN
) {
6387 bf_set(lpfc_mbx_read_top_att_type
, la
,
6388 LPFC_FC_LA_TYPE_UNEXP_WWPN
);
6390 bf_set(lpfc_mbx_read_top_att_type
, la
,
6391 LPFC_FC_LA_TYPE_LINK_DOWN
);
6393 /* Invoke the mailbox command callback function */
6394 lpfc_mbx_cmpl_read_topology(phba
, pmb
);
6399 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
6400 if (rc
== MBX_NOT_FINISHED
)
6405 lpfc_mbox_rsrc_cleanup(phba
, pmb
, MBOX_THD_UNLOCKED
);
6409 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6410 * @phba: pointer to lpfc hba data structure.
6411 * @acqe_sli: pointer to the async SLI completion queue entry.
6413 * This routine is to handle the SLI4 asynchronous SLI events.
6416 lpfc_sli4_async_sli_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_sli
*acqe_sli
)
6422 uint8_t operational
= 0;
6423 struct temp_event temp_event_data
;
6424 struct lpfc_acqe_misconfigured_event
*misconfigured
;
6425 struct lpfc_acqe_cgn_signal
*cgn_signal
;
6426 struct Scsi_Host
*shost
;
6427 struct lpfc_vport
**vports
;
6430 evt_type
= bf_get(lpfc_trailer_type
, acqe_sli
);
6432 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6433 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6434 "x%08x x%08x x%08x\n", evt_type
,
6435 acqe_sli
->event_data1
, acqe_sli
->event_data2
,
6436 acqe_sli
->event_data3
, acqe_sli
->trailer
);
6438 port_name
= phba
->Port
[0];
6439 if (port_name
== 0x00)
6440 port_name
= '?'; /* get port name is empty */
6443 case LPFC_SLI_EVENT_TYPE_OVER_TEMP
:
6444 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
6445 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
6446 temp_event_data
.data
= (uint32_t)acqe_sli
->event_data1
;
6448 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
6449 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6450 acqe_sli
->event_data1
, port_name
);
6452 phba
->sfp_warning
|= LPFC_TRANSGRESSION_HIGH_TEMPERATURE
;
6453 shost
= lpfc_shost_from_vport(phba
->pport
);
6454 fc_host_post_vendor_event(shost
, fc_get_event_number(),
6455 sizeof(temp_event_data
),
6456 (char *)&temp_event_data
,
6457 SCSI_NL_VID_TYPE_PCI
6458 | PCI_VENDOR_ID_EMULEX
);
6460 case LPFC_SLI_EVENT_TYPE_NORM_TEMP
:
6461 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
6462 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
6463 temp_event_data
.data
= (uint32_t)acqe_sli
->event_data1
;
6465 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
| LOG_LDS_EVENT
,
6466 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6467 acqe_sli
->event_data1
, port_name
);
6469 shost
= lpfc_shost_from_vport(phba
->pport
);
6470 fc_host_post_vendor_event(shost
, fc_get_event_number(),
6471 sizeof(temp_event_data
),
6472 (char *)&temp_event_data
,
6473 SCSI_NL_VID_TYPE_PCI
6474 | PCI_VENDOR_ID_EMULEX
);
6476 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED
:
6477 misconfigured
= (struct lpfc_acqe_misconfigured_event
*)
6478 &acqe_sli
->event_data1
;
6480 /* fetch the status for this port */
6481 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
6482 case LPFC_LINK_NUMBER_0
:
6483 status
= bf_get(lpfc_sli_misconfigured_port0_state
,
6484 &misconfigured
->theEvent
);
6485 operational
= bf_get(lpfc_sli_misconfigured_port0_op
,
6486 &misconfigured
->theEvent
);
6488 case LPFC_LINK_NUMBER_1
:
6489 status
= bf_get(lpfc_sli_misconfigured_port1_state
,
6490 &misconfigured
->theEvent
);
6491 operational
= bf_get(lpfc_sli_misconfigured_port1_op
,
6492 &misconfigured
->theEvent
);
6494 case LPFC_LINK_NUMBER_2
:
6495 status
= bf_get(lpfc_sli_misconfigured_port2_state
,
6496 &misconfigured
->theEvent
);
6497 operational
= bf_get(lpfc_sli_misconfigured_port2_op
,
6498 &misconfigured
->theEvent
);
6500 case LPFC_LINK_NUMBER_3
:
6501 status
= bf_get(lpfc_sli_misconfigured_port3_state
,
6502 &misconfigured
->theEvent
);
6503 operational
= bf_get(lpfc_sli_misconfigured_port3_op
,
6504 &misconfigured
->theEvent
);
6507 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6509 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6510 "event: Invalid link %d",
6511 phba
->sli4_hba
.lnk_info
.lnk_no
);
6515 /* Skip if optic state unchanged */
6516 if (phba
->sli4_hba
.lnk_info
.optic_state
== status
)
6520 case LPFC_SLI_EVENT_STATUS_VALID
:
6521 sprintf(message
, "Physical Link is functional");
6523 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT
:
6524 sprintf(message
, "Optics faulted/incorrectly "
6525 "installed/not installed - Reseat optics, "
6526 "if issue not resolved, replace.");
6528 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE
:
6530 "Optics of two types installed - Remove one "
6531 "optic or install matching pair of optics.");
6533 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED
:
6534 sprintf(message
, "Incompatible optics - Replace with "
6535 "compatible optics for card to function.");
6537 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED
:
6538 sprintf(message
, "Unqualified optics - Replace with "
6539 "Avago optics for Warranty and Technical "
6540 "Support - Link is%s operational",
6541 (operational
) ? " not" : "");
6543 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED
:
6544 sprintf(message
, "Uncertified optics - Replace with "
6545 "Avago-certified optics to enable link "
6546 "operation - Link is%s operational",
6547 (operational
) ? " not" : "");
6550 /* firmware is reporting a status we don't know about */
6551 sprintf(message
, "Unknown event status x%02x", status
);
6555 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6556 rc
= lpfc_sli4_read_config(phba
);
6559 lpfc_printf_log(phba
, KERN_ERR
,
6561 "3194 Unable to retrieve supported "
6562 "speeds, rc = 0x%x\n", rc
);
6564 rc
= lpfc_sli4_refresh_params(phba
);
6566 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6567 "3174 Unable to update pls support, "
6570 vports
= lpfc_create_vport_work_array(phba
);
6571 if (vports
!= NULL
) {
6572 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
;
6574 shost
= lpfc_shost_from_vport(vports
[i
]);
6575 lpfc_host_supported_speeds_set(shost
);
6578 lpfc_destroy_vport_work_array(phba
, vports
);
6580 phba
->sli4_hba
.lnk_info
.optic_state
= status
;
6581 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6582 "3176 Port Name %c %s\n", port_name
, message
);
6584 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT
:
6585 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6586 "3192 Remote DPort Test Initiated - "
6587 "Event Data1:x%08x Event Data2: x%08x\n",
6588 acqe_sli
->event_data1
, acqe_sli
->event_data2
);
6590 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG
:
6591 /* Call FW to obtain active parms */
6592 lpfc_sli4_cgn_parm_chg_evt(phba
);
6594 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN
:
6595 /* Misconfigured WWN. Reports that the SLI Port is configured
6596 * to use FA-WWN, but the attached device doesn’t support it.
6597 * Event Data1 - N.A, Event Data2 - N.A
6598 * This event only happens on the physical port.
6600 lpfc_log_msg(phba
, KERN_WARNING
, LOG_SLI
| LOG_DISCOVERY
,
6601 "2699 Misconfigured FA-PWWN - Attached device "
6602 "does not support FA-PWWN\n");
6603 phba
->sli4_hba
.fawwpn_flag
&= ~LPFC_FAWWPN_FABRIC
;
6604 memset(phba
->pport
->fc_portname
.u
.wwn
, 0,
6605 sizeof(struct lpfc_name
));
6607 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE
:
6608 /* EEPROM failure. No driver action is required */
6609 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
6610 "2518 EEPROM failure - "
6611 "Event Data1: x%08x Event Data2: x%08x\n",
6612 acqe_sli
->event_data1
, acqe_sli
->event_data2
);
6614 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL
:
6615 if (phba
->cmf_active_mode
== LPFC_CFG_OFF
)
6617 cgn_signal
= (struct lpfc_acqe_cgn_signal
*)
6618 &acqe_sli
->event_data1
;
6619 phba
->cgn_acqe_cnt
++;
6621 cnt
= bf_get(lpfc_warn_acqe
, cgn_signal
);
6622 atomic64_add(cnt
, &phba
->cgn_acqe_stat
.warn
);
6623 atomic64_add(cgn_signal
->alarm_cnt
, &phba
->cgn_acqe_stat
.alarm
);
6625 /* no threshold for CMF, even 1 signal will trigger an event */
6627 /* Alarm overrides warning, so check that first */
6628 if (cgn_signal
->alarm_cnt
) {
6629 if (phba
->cgn_reg_signal
== EDC_CG_SIG_WARN_ALARM
) {
6630 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6631 atomic_add(cgn_signal
->alarm_cnt
,
6632 &phba
->cgn_sync_alarm_cnt
);
6635 /* signal action needs to be taken */
6636 if (phba
->cgn_reg_signal
== EDC_CG_SIG_WARN_ONLY
||
6637 phba
->cgn_reg_signal
== EDC_CG_SIG_WARN_ALARM
) {
6638 /* Keep track of warning cnt for CMF_SYNC_WQE */
6639 atomic_add(cnt
, &phba
->cgn_sync_warn_cnt
);
6643 case LPFC_SLI_EVENT_TYPE_RD_SIGNAL
:
6644 /* May be accompanied by a temperature event */
6645 lpfc_printf_log(phba
, KERN_INFO
,
6646 LOG_SLI
| LOG_LINK_EVENT
| LOG_LDS_EVENT
,
6647 "2902 Remote Degrade Signaling: x%08x x%08x "
6649 acqe_sli
->event_data1
, acqe_sli
->event_data2
,
6650 acqe_sli
->event_data3
);
6652 case LPFC_SLI_EVENT_TYPE_RESET_CM_STATS
:
6653 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
6654 "2905 Reset CM statistics\n");
6655 lpfc_sli4_async_cmstat_evt(phba
);
6658 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6659 "3193 Unrecognized SLI event, type: 0x%x",
6666 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6667 * @vport: pointer to vport data structure.
6669 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6670 * response to a CVL event.
6672 * Return the pointer to the ndlp with the vport if successful, otherwise
6675 static struct lpfc_nodelist
*
6676 lpfc_sli4_perform_vport_cvl(struct lpfc_vport
*vport
)
6678 struct lpfc_nodelist
*ndlp
;
6679 struct Scsi_Host
*shost
;
6680 struct lpfc_hba
*phba
;
6687 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
6689 /* Cannot find existing Fabric ndlp, so allocate a new one */
6690 ndlp
= lpfc_nlp_init(vport
, Fabric_DID
);
6693 /* Set the node type */
6694 ndlp
->nlp_type
|= NLP_FABRIC
;
6695 /* Put ndlp onto node list */
6696 lpfc_enqueue_node(vport
, ndlp
);
6698 if ((phba
->pport
->port_state
< LPFC_FLOGI
) &&
6699 (phba
->pport
->port_state
!= LPFC_VPORT_FAILED
))
6701 /* If virtual link is not yet instantiated ignore CVL */
6702 if ((vport
!= phba
->pport
) && (vport
->port_state
< LPFC_FDISC
)
6703 && (vport
->port_state
!= LPFC_VPORT_FAILED
))
6705 shost
= lpfc_shost_from_vport(vport
);
6708 lpfc_linkdown_port(vport
);
6709 lpfc_cleanup_pending_mbox(vport
);
6710 set_bit(FC_VPORT_CVL_RCVD
, &vport
->fc_flag
);
6716 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6717 * @phba: pointer to lpfc hba data structure.
6719 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6720 * response to a FCF dead event.
6723 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba
*phba
)
6725 struct lpfc_vport
**vports
;
6728 vports
= lpfc_create_vport_work_array(phba
);
6730 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
6731 lpfc_sli4_perform_vport_cvl(vports
[i
]);
6732 lpfc_destroy_vport_work_array(phba
, vports
);
6736 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6737 * @phba: pointer to lpfc hba data structure.
6738 * @acqe_fip: pointer to the async fcoe completion queue entry.
6740 * This routine is to handle the SLI4 asynchronous fcoe event.
6743 lpfc_sli4_async_fip_evt(struct lpfc_hba
*phba
,
6744 struct lpfc_acqe_fip
*acqe_fip
)
6746 uint8_t event_type
= bf_get(lpfc_trailer_type
, acqe_fip
);
6748 struct lpfc_vport
*vport
;
6749 struct lpfc_nodelist
*ndlp
;
6750 int active_vlink_present
;
6751 struct lpfc_vport
**vports
;
6754 phba
->fc_eventTag
= acqe_fip
->event_tag
;
6755 phba
->fcoe_eventtag
= acqe_fip
->event_tag
;
6756 switch (event_type
) {
6757 case LPFC_FIP_EVENT_TYPE_NEW_FCF
:
6758 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD
:
6759 if (event_type
== LPFC_FIP_EVENT_TYPE_NEW_FCF
)
6760 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6761 "2546 New FCF event, evt_tag:x%x, "
6763 acqe_fip
->event_tag
,
6766 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
|
6768 "2788 FCF param modified event, "
6769 "evt_tag:x%x, index:x%x\n",
6770 acqe_fip
->event_tag
,
6772 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
6774 * During period of FCF discovery, read the FCF
6775 * table record indexed by the event to update
6776 * FCF roundrobin failover eligible FCF bmask.
6778 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
6780 "2779 Read FCF (x%x) for updating "
6781 "roundrobin FCF failover bmask\n",
6783 rc
= lpfc_sli4_read_fcf_rec(phba
, acqe_fip
->index
);
6786 /* If the FCF discovery is in progress, do nothing. */
6787 if (test_bit(FCF_TS_INPROG
, &phba
->hba_flag
))
6789 spin_lock_irq(&phba
->hbalock
);
6790 /* If fast FCF failover rescan event is pending, do nothing */
6791 if (phba
->fcf
.fcf_flag
& (FCF_REDISC_EVT
| FCF_REDISC_PEND
)) {
6792 spin_unlock_irq(&phba
->hbalock
);
6796 /* If the FCF has been in discovered state, do nothing. */
6797 if (phba
->fcf
.fcf_flag
& FCF_SCAN_DONE
) {
6798 spin_unlock_irq(&phba
->hbalock
);
6801 spin_unlock_irq(&phba
->hbalock
);
6803 /* Otherwise, scan the entire FCF table and re-discover SAN */
6804 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
6805 "2770 Start FCF table scan per async FCF "
6806 "event, evt_tag:x%x, index:x%x\n",
6807 acqe_fip
->event_tag
, acqe_fip
->index
);
6808 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
6809 LPFC_FCOE_FCF_GET_FIRST
);
6811 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6812 "2547 Issue FCF scan read FCF mailbox "
6813 "command failed (x%x)\n", rc
);
6816 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL
:
6817 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6818 "2548 FCF Table full count 0x%x tag 0x%x\n",
6819 bf_get(lpfc_acqe_fip_fcf_count
, acqe_fip
),
6820 acqe_fip
->event_tag
);
6823 case LPFC_FIP_EVENT_TYPE_FCF_DEAD
:
6824 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
6825 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6826 "2549 FCF (x%x) disconnected from network, "
6827 "tag:x%x\n", acqe_fip
->index
,
6828 acqe_fip
->event_tag
);
6830 * If we are in the middle of FCF failover process, clear
6831 * the corresponding FCF bit in the roundrobin bitmap.
6833 spin_lock_irq(&phba
->hbalock
);
6834 if ((phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) &&
6835 (phba
->fcf
.current_rec
.fcf_indx
!= acqe_fip
->index
)) {
6836 spin_unlock_irq(&phba
->hbalock
);
6837 /* Update FLOGI FCF failover eligible FCF bmask */
6838 lpfc_sli4_fcf_rr_index_clear(phba
, acqe_fip
->index
);
6841 spin_unlock_irq(&phba
->hbalock
);
6843 /* If the event is not for currently used fcf do nothing */
6844 if (phba
->fcf
.current_rec
.fcf_indx
!= acqe_fip
->index
)
6848 * Otherwise, request the port to rediscover the entire FCF
6849 * table for a fast recovery from case that the current FCF
6850 * is no longer valid as we are not in the middle of FCF
6851 * failover process already.
6853 spin_lock_irq(&phba
->hbalock
);
6854 /* Mark the fast failover process in progress */
6855 phba
->fcf
.fcf_flag
|= FCF_DEAD_DISC
;
6856 spin_unlock_irq(&phba
->hbalock
);
6858 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
6859 "2771 Start FCF fast failover process due to "
6860 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6861 "\n", acqe_fip
->event_tag
, acqe_fip
->index
);
6862 rc
= lpfc_sli4_redisc_fcf_table(phba
);
6864 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
6866 "2772 Issue FCF rediscover mailbox "
6867 "command failed, fail through to FCF "
6869 spin_lock_irq(&phba
->hbalock
);
6870 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
6871 spin_unlock_irq(&phba
->hbalock
);
6873 * Last resort will fail over by treating this
6874 * as a link down to FCF registration.
6876 lpfc_sli4_fcf_dead_failthrough(phba
);
6878 /* Reset FCF roundrobin bmask for new discovery */
6879 lpfc_sli4_clear_fcf_rr_bmask(phba
);
6881 * Handling fast FCF failover to a DEAD FCF event is
6882 * considered equalivant to receiving CVL to all vports.
6884 lpfc_sli4_perform_all_vport_cvl(phba
);
6887 case LPFC_FIP_EVENT_TYPE_CVL
:
6888 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
6889 lpfc_printf_log(phba
, KERN_ERR
,
6891 "2718 Clear Virtual Link Received for VPI 0x%x"
6892 " tag 0x%x\n", acqe_fip
->index
, acqe_fip
->event_tag
);
6894 vport
= lpfc_find_vport_by_vpid(phba
,
6896 ndlp
= lpfc_sli4_perform_vport_cvl(vport
);
6899 active_vlink_present
= 0;
6901 vports
= lpfc_create_vport_work_array(phba
);
6903 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
;
6905 if (!test_bit(FC_VPORT_CVL_RCVD
,
6906 &vports
[i
]->fc_flag
) &&
6907 vports
[i
]->port_state
> LPFC_FDISC
) {
6908 active_vlink_present
= 1;
6912 lpfc_destroy_vport_work_array(phba
, vports
);
6916 * Don't re-instantiate if vport is marked for deletion.
6917 * If we are here first then vport_delete is going to wait
6918 * for discovery to complete.
6920 if (!test_bit(FC_UNLOADING
, &vport
->load_flag
) &&
6921 active_vlink_present
) {
6923 * If there are other active VLinks present,
6924 * re-instantiate the Vlink using FDISC.
6926 mod_timer(&ndlp
->nlp_delayfunc
,
6927 jiffies
+ msecs_to_jiffies(1000));
6928 spin_lock_irq(&ndlp
->lock
);
6929 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
6930 spin_unlock_irq(&ndlp
->lock
);
6931 ndlp
->nlp_last_elscmd
= ELS_CMD_FDISC
;
6932 vport
->port_state
= LPFC_FDISC
;
6935 * Otherwise, we request port to rediscover
6936 * the entire FCF table for a fast recovery
6937 * from possible case that the current FCF
6938 * is no longer valid if we are not already
6939 * in the FCF failover process.
6941 spin_lock_irq(&phba
->hbalock
);
6942 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
6943 spin_unlock_irq(&phba
->hbalock
);
6946 /* Mark the fast failover process in progress */
6947 phba
->fcf
.fcf_flag
|= FCF_ACVL_DISC
;
6948 spin_unlock_irq(&phba
->hbalock
);
6949 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
6951 "2773 Start FCF failover per CVL, "
6952 "evt_tag:x%x\n", acqe_fip
->event_tag
);
6953 rc
= lpfc_sli4_redisc_fcf_table(phba
);
6955 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
6957 "2774 Issue FCF rediscover "
6958 "mailbox command failed, "
6959 "through to CVL event\n");
6960 spin_lock_irq(&phba
->hbalock
);
6961 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
6962 spin_unlock_irq(&phba
->hbalock
);
6964 * Last resort will be re-try on the
6965 * the current registered FCF entry.
6967 lpfc_retry_pport_discovery(phba
);
6970 * Reset FCF roundrobin bmask for new
6973 lpfc_sli4_clear_fcf_rr_bmask(phba
);
6977 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6978 "0288 Unknown FCoE event type 0x%x event tag "
6979 "0x%x\n", event_type
, acqe_fip
->event_tag
);
6985 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6986 * @phba: pointer to lpfc hba data structure.
6987 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6989 * This routine is to handle the SLI4 asynchronous dcbx event.
6992 lpfc_sli4_async_dcbx_evt(struct lpfc_hba
*phba
,
6993 struct lpfc_acqe_dcbx
*acqe_dcbx
)
6995 phba
->fc_eventTag
= acqe_dcbx
->event_tag
;
6996 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6997 "0290 The SLI4 DCBX asynchronous event is not "
7002 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
7003 * @phba: pointer to lpfc hba data structure.
7004 * @acqe_grp5: pointer to the async grp5 completion queue entry.
7006 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
7007 * is an asynchronous notified of a logical link speed change. The Port
7008 * reports the logical link speed in units of 10Mbps.
7011 lpfc_sli4_async_grp5_evt(struct lpfc_hba
*phba
,
7012 struct lpfc_acqe_grp5
*acqe_grp5
)
7014 uint16_t prev_ll_spd
;
7016 phba
->fc_eventTag
= acqe_grp5
->event_tag
;
7017 phba
->fcoe_eventtag
= acqe_grp5
->event_tag
;
7018 prev_ll_spd
= phba
->sli4_hba
.link_state
.logical_speed
;
7019 phba
->sli4_hba
.link_state
.logical_speed
=
7020 (bf_get(lpfc_acqe_grp5_llink_spd
, acqe_grp5
)) * 10;
7021 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
7022 "2789 GRP5 Async Event: Updating logical link speed "
7023 "from %dMbps to %dMbps\n", prev_ll_spd
,
7024 phba
->sli4_hba
.link_state
.logical_speed
);
7028 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7029 * @phba: pointer to lpfc hba data structure.
7031 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7032 * is an asynchronous notification of a request to reset CM stats.
7035 lpfc_sli4_async_cmstat_evt(struct lpfc_hba
*phba
)
7039 lpfc_init_congestion_stat(phba
);
7043 * lpfc_cgn_params_val - Validate FW congestion parameters.
7044 * @phba: pointer to lpfc hba data structure.
7045 * @p_cfg_param: pointer to FW provided congestion parameters.
7047 * This routine validates the congestion parameters passed
7048 * by the FW to the driver via an ACQE event.
7051 lpfc_cgn_params_val(struct lpfc_hba
*phba
, struct lpfc_cgn_param
*p_cfg_param
)
7053 spin_lock_irq(&phba
->hbalock
);
7055 if (!lpfc_rangecheck(p_cfg_param
->cgn_param_mode
, LPFC_CFG_OFF
,
7056 LPFC_CFG_MONITOR
)) {
7057 lpfc_printf_log(phba
, KERN_ERR
, LOG_CGN_MGMT
,
7058 "6225 CMF mode param out of range: %d\n",
7059 p_cfg_param
->cgn_param_mode
);
7060 p_cfg_param
->cgn_param_mode
= LPFC_CFG_OFF
;
7063 spin_unlock_irq(&phba
->hbalock
);
7066 static const char * const lpfc_cmf_mode_to_str
[] = {
7073 * lpfc_cgn_params_parse - Process a FW cong parm change event
7074 * @phba: pointer to lpfc hba data structure.
7075 * @p_cgn_param: pointer to a data buffer with the FW cong params.
7076 * @len: the size of pdata in bytes.
7078 * This routine validates the congestion management buffer signature
7079 * from the FW, validates the contents and makes corrections for
7080 * valid, in-range values. If the signature magic is correct and
7081 * after parameter validation, the contents are copied to the driver's
7082 * @phba structure. If the magic is incorrect, an error message is
7086 lpfc_cgn_params_parse(struct lpfc_hba
*phba
,
7087 struct lpfc_cgn_param
*p_cgn_param
, uint32_t len
)
7089 struct lpfc_cgn_info
*cp
;
7090 uint32_t crc
, oldmode
;
7091 char acr_string
[4] = {0};
7093 /* Make sure the FW has encoded the correct magic number to
7094 * validate the congestion parameter in FW memory.
7096 if (p_cgn_param
->cgn_param_magic
== LPFC_CFG_PARAM_MAGIC_NUM
) {
7097 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
| LOG_INIT
,
7098 "4668 FW cgn parm buffer data: "
7099 "magic 0x%x version %d mode %d "
7100 "level0 %d level1 %d "
7101 "level2 %d byte13 %d "
7102 "byte14 %d byte15 %d "
7103 "byte11 %d byte12 %d activeMode %d\n",
7104 p_cgn_param
->cgn_param_magic
,
7105 p_cgn_param
->cgn_param_version
,
7106 p_cgn_param
->cgn_param_mode
,
7107 p_cgn_param
->cgn_param_level0
,
7108 p_cgn_param
->cgn_param_level1
,
7109 p_cgn_param
->cgn_param_level2
,
7110 p_cgn_param
->byte13
,
7111 p_cgn_param
->byte14
,
7112 p_cgn_param
->byte15
,
7113 p_cgn_param
->byte11
,
7114 p_cgn_param
->byte12
,
7115 phba
->cmf_active_mode
);
7117 oldmode
= phba
->cmf_active_mode
;
7119 /* Any parameters out of range are corrected to defaults
7120 * by this routine. No need to fail.
7122 lpfc_cgn_params_val(phba
, p_cgn_param
);
7124 /* Parameters are verified, move them into driver storage */
7125 spin_lock_irq(&phba
->hbalock
);
7126 memcpy(&phba
->cgn_p
, p_cgn_param
,
7127 sizeof(struct lpfc_cgn_param
));
7129 /* Update parameters in congestion info buffer now */
7131 cp
= (struct lpfc_cgn_info
*)phba
->cgn_i
->virt
;
7132 cp
->cgn_info_mode
= phba
->cgn_p
.cgn_param_mode
;
7133 cp
->cgn_info_level0
= phba
->cgn_p
.cgn_param_level0
;
7134 cp
->cgn_info_level1
= phba
->cgn_p
.cgn_param_level1
;
7135 cp
->cgn_info_level2
= phba
->cgn_p
.cgn_param_level2
;
7136 crc
= lpfc_cgn_calc_crc32(cp
, LPFC_CGN_INFO_SZ
,
7137 LPFC_CGN_CRC32_SEED
);
7138 cp
->cgn_info_crc
= cpu_to_le32(crc
);
7140 spin_unlock_irq(&phba
->hbalock
);
7142 phba
->cmf_active_mode
= phba
->cgn_p
.cgn_param_mode
;
7146 if (phba
->cgn_p
.cgn_param_mode
!= LPFC_CFG_OFF
) {
7147 /* Turning CMF on */
7148 lpfc_cmf_start(phba
);
7150 if (phba
->link_state
>= LPFC_LINK_UP
) {
7151 phba
->cgn_reg_fpin
=
7152 phba
->cgn_init_reg_fpin
;
7153 phba
->cgn_reg_signal
=
7154 phba
->cgn_init_reg_signal
;
7155 lpfc_issue_els_edc(phba
->pport
, 0);
7159 case LPFC_CFG_MANAGED
:
7160 switch (phba
->cgn_p
.cgn_param_mode
) {
7162 /* Turning CMF off */
7163 lpfc_cmf_stop(phba
);
7164 if (phba
->link_state
>= LPFC_LINK_UP
)
7165 lpfc_issue_els_edc(phba
->pport
, 0);
7167 case LPFC_CFG_MONITOR
:
7168 phba
->cmf_max_bytes_per_interval
=
7169 phba
->cmf_link_byte_count
;
7171 /* Resume blocked IO - unblock on workqueue */
7172 queue_work(phba
->wq
,
7173 &phba
->unblock_request_work
);
7177 case LPFC_CFG_MONITOR
:
7178 switch (phba
->cgn_p
.cgn_param_mode
) {
7180 /* Turning CMF off */
7181 lpfc_cmf_stop(phba
);
7182 if (phba
->link_state
>= LPFC_LINK_UP
)
7183 lpfc_issue_els_edc(phba
->pport
, 0);
7185 case LPFC_CFG_MANAGED
:
7186 lpfc_cmf_signal_init(phba
);
7191 if (oldmode
!= LPFC_CFG_OFF
||
7192 oldmode
!= phba
->cgn_p
.cgn_param_mode
) {
7193 if (phba
->cgn_p
.cgn_param_mode
== LPFC_CFG_MANAGED
)
7194 scnprintf(acr_string
, sizeof(acr_string
), "%u",
7195 phba
->cgn_p
.cgn_param_level0
);
7197 scnprintf(acr_string
, sizeof(acr_string
), "NA");
7199 dev_info(&phba
->pcidev
->dev
, "%d: "
7200 "4663 CMF: Mode %s acr %s\n",
7202 lpfc_cmf_mode_to_str
7203 [phba
->cgn_p
.cgn_param_mode
],
7207 lpfc_printf_log(phba
, KERN_ERR
, LOG_CGN_MGMT
| LOG_INIT
,
7208 "4669 FW cgn parm buf wrong magic 0x%x "
7209 "version %d\n", p_cgn_param
->cgn_param_magic
,
7210 p_cgn_param
->cgn_param_version
);
7215 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7216 * @phba: pointer to lpfc hba data structure.
7218 * This routine issues a read_object mailbox command to
7219 * get the congestion management parameters from the FW
7220 * parses it and updates the driver maintained values.
7223 * 0 if the object was empty
7224 * -Eval if an error was encountered
7225 * Count if bytes were read from object
7228 lpfc_sli4_cgn_params_read(struct lpfc_hba
*phba
)
7231 struct lpfc_cgn_param
*p_cgn_param
= NULL
;
7235 /* Find out if the FW has a new set of congestion parameters. */
7236 len
= sizeof(struct lpfc_cgn_param
);
7237 pdata
= kzalloc(len
, GFP_KERNEL
);
7240 ret
= lpfc_read_object(phba
, (char *)LPFC_PORT_CFG_NAME
,
7243 /* 0 means no data. A negative means error. A positive means
7244 * bytes were copied.
7247 lpfc_printf_log(phba
, KERN_ERR
, LOG_CGN_MGMT
| LOG_INIT
,
7248 "4670 CGN RD OBJ returns no data\n");
7250 } else if (ret
< 0) {
7251 /* Some error. Just exit and return it to the caller.*/
7255 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
| LOG_INIT
,
7256 "6234 READ CGN PARAMS Successful %d\n", len
);
7258 /* Parse data pointer over len and update the phba congestion
7259 * parameters with values passed back. The receive rate values
7260 * may have been altered in FW, but take no action here.
7262 p_cgn_param
= (struct lpfc_cgn_param
*)pdata
;
7263 lpfc_cgn_params_parse(phba
, p_cgn_param
, len
);
7271 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7272 * @phba: pointer to lpfc hba data structure.
7274 * The FW generated Async ACQE SLI event calls this routine when
7275 * the event type is an SLI Internal Port Event and the Event Code
7276 * indicates a change to the FW maintained congestion parameters.
7278 * This routine executes a Read_Object mailbox call to obtain the
7279 * current congestion parameters maintained in FW and corrects
7280 * the driver's active congestion parameters.
7282 * The acqe event is not passed because there is no further data
7285 * Returns nonzero error if event processing encountered an error.
7286 * Zero otherwise for success.
7289 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba
*phba
)
7293 if (!phba
->sli4_hba
.pc_sli4_params
.cmf
) {
7294 lpfc_printf_log(phba
, KERN_ERR
, LOG_CGN_MGMT
| LOG_INIT
,
7295 "4664 Cgn Evt when E2E off. Drop event\n");
7299 /* If the event is claiming an empty object, it's ok. A write
7300 * could have cleared it. Only error is a negative return
7303 ret
= lpfc_sli4_cgn_params_read(phba
);
7305 lpfc_printf_log(phba
, KERN_ERR
, LOG_CGN_MGMT
| LOG_INIT
,
7306 "4667 Error reading Cgn Params (%d)\n",
7309 lpfc_printf_log(phba
, KERN_ERR
, LOG_CGN_MGMT
| LOG_INIT
,
7310 "4673 CGN Event empty object.\n");
7316 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7317 * @phba: pointer to lpfc hba data structure.
7319 * This routine is invoked by the worker thread to process all the pending
7320 * SLI4 asynchronous events.
7322 void lpfc_sli4_async_event_proc(struct lpfc_hba
*phba
)
7324 struct lpfc_cq_event
*cq_event
;
7325 unsigned long iflags
;
7327 /* First, declare the async event has been handled */
7328 clear_bit(ASYNC_EVENT
, &phba
->hba_flag
);
7330 /* Now, handle all the async events */
7331 spin_lock_irqsave(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
7332 while (!list_empty(&phba
->sli4_hba
.sp_asynce_work_queue
)) {
7333 list_remove_head(&phba
->sli4_hba
.sp_asynce_work_queue
,
7334 cq_event
, struct lpfc_cq_event
, list
);
7335 spin_unlock_irqrestore(&phba
->sli4_hba
.asynce_list_lock
,
7338 /* Process the asynchronous event */
7339 switch (bf_get(lpfc_trailer_code
, &cq_event
->cqe
.mcqe_cmpl
)) {
7340 case LPFC_TRAILER_CODE_LINK
:
7341 lpfc_sli4_async_link_evt(phba
,
7342 &cq_event
->cqe
.acqe_link
);
7344 case LPFC_TRAILER_CODE_FCOE
:
7345 lpfc_sli4_async_fip_evt(phba
, &cq_event
->cqe
.acqe_fip
);
7347 case LPFC_TRAILER_CODE_DCBX
:
7348 lpfc_sli4_async_dcbx_evt(phba
,
7349 &cq_event
->cqe
.acqe_dcbx
);
7351 case LPFC_TRAILER_CODE_GRP5
:
7352 lpfc_sli4_async_grp5_evt(phba
,
7353 &cq_event
->cqe
.acqe_grp5
);
7355 case LPFC_TRAILER_CODE_FC
:
7356 lpfc_sli4_async_fc_evt(phba
, &cq_event
->cqe
.acqe_fc
);
7358 case LPFC_TRAILER_CODE_SLI
:
7359 lpfc_sli4_async_sli_evt(phba
, &cq_event
->cqe
.acqe_sli
);
7362 lpfc_printf_log(phba
, KERN_ERR
,
7364 "1804 Invalid asynchronous event code: "
7365 "x%x\n", bf_get(lpfc_trailer_code
,
7366 &cq_event
->cqe
.mcqe_cmpl
));
7370 /* Free the completion event processed to the free pool */
7371 lpfc_sli4_cq_event_release(phba
, cq_event
);
7372 spin_lock_irqsave(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
7374 spin_unlock_irqrestore(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
7378 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7379 * @phba: pointer to lpfc hba data structure.
7381 * This routine is invoked by the worker thread to process FCF table
7382 * rediscovery pending completion event.
7384 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba
*phba
)
7388 spin_lock_irq(&phba
->hbalock
);
7389 /* Clear FCF rediscovery timeout event */
7390 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_EVT
;
7391 /* Clear driver fast failover FCF record flag */
7392 phba
->fcf
.failover_rec
.flag
= 0;
7393 /* Set state for FCF fast failover */
7394 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
7395 spin_unlock_irq(&phba
->hbalock
);
7397 /* Scan FCF table from the first entry to re-discover SAN */
7398 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
7399 "2777 Start post-quiescent FCF table scan\n");
7400 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
7402 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7403 "2747 Issue FCF scan read FCF mailbox "
7404 "command failed 0x%x\n", rc
);
7408 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7409 * @phba: pointer to lpfc hba data structure.
7410 * @dev_grp: The HBA PCI-Device group number.
7412 * This routine is invoked to set up the per HBA PCI-Device group function
7413 * API jump table entries.
7415 * Return: 0 if success, otherwise -ENODEV
7418 lpfc_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
7422 /* Set up lpfc PCI-device group */
7423 phba
->pci_dev_grp
= dev_grp
;
7425 /* The LPFC_PCI_DEV_OC uses SLI4 */
7426 if (dev_grp
== LPFC_PCI_DEV_OC
)
7427 phba
->sli_rev
= LPFC_SLI_REV4
;
7429 /* Set up device INIT API function jump table */
7430 rc
= lpfc_init_api_table_setup(phba
, dev_grp
);
7433 /* Set up SCSI API function jump table */
7434 rc
= lpfc_scsi_api_table_setup(phba
, dev_grp
);
7437 /* Set up SLI API function jump table */
7438 rc
= lpfc_sli_api_table_setup(phba
, dev_grp
);
7441 /* Set up MBOX API function jump table */
7442 rc
= lpfc_mbox_api_table_setup(phba
, dev_grp
);
7450 * lpfc_log_intr_mode - Log the active interrupt mode
7451 * @phba: pointer to lpfc hba data structure.
7452 * @intr_mode: active interrupt mode adopted.
7454 * This routine it invoked to log the currently used active interrupt mode
7457 static void lpfc_log_intr_mode(struct lpfc_hba
*phba
, uint32_t intr_mode
)
7459 switch (intr_mode
) {
7461 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7462 "0470 Enable INTx interrupt mode.\n");
7465 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7466 "0481 Enabled MSI interrupt mode.\n");
7469 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7470 "0480 Enabled MSI-X interrupt mode.\n");
7473 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7474 "0482 Illegal interrupt mode.\n");
7481 * lpfc_enable_pci_dev - Enable a generic PCI device.
7482 * @phba: pointer to lpfc hba data structure.
7484 * This routine is invoked to enable the PCI device that is common to all
7489 * other values - error
7492 lpfc_enable_pci_dev(struct lpfc_hba
*phba
)
7494 struct pci_dev
*pdev
;
7496 /* Obtain PCI device reference */
7500 pdev
= phba
->pcidev
;
7501 /* Enable PCI device */
7502 if (pci_enable_device_mem(pdev
))
7504 /* Request PCI resource for the device */
7505 if (pci_request_mem_regions(pdev
, LPFC_DRIVER_NAME
))
7506 goto out_disable_device
;
7507 /* Set up device as PCI master and save state for EEH */
7508 pci_set_master(pdev
);
7509 pci_try_set_mwi(pdev
);
7510 pci_save_state(pdev
);
7512 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7513 if (pci_is_pcie(pdev
))
7514 pdev
->needs_freset
= 1;
7519 pci_disable_device(pdev
);
7521 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7522 "1401 Failed to enable pci device\n");
7527 * lpfc_disable_pci_dev - Disable a generic PCI device.
7528 * @phba: pointer to lpfc hba data structure.
7530 * This routine is invoked to disable the PCI device that is common to all
7534 lpfc_disable_pci_dev(struct lpfc_hba
*phba
)
7536 struct pci_dev
*pdev
;
7538 /* Obtain PCI device reference */
7542 pdev
= phba
->pcidev
;
7543 /* Release PCI resource and disable PCI device */
7544 pci_release_mem_regions(pdev
);
7545 pci_disable_device(pdev
);
7551 * lpfc_reset_hba - Reset a hba
7552 * @phba: pointer to lpfc hba data structure.
7554 * This routine is invoked to reset a hba device. It brings the HBA
7555 * offline, performs a board restart, and then brings the board back
7556 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7557 * on outstanding mailbox commands.
7560 lpfc_reset_hba(struct lpfc_hba
*phba
)
7564 /* If resets are disabled then set error state and return. */
7565 if (!phba
->cfg_enable_hba_reset
) {
7566 phba
->link_state
= LPFC_HBA_ERROR
;
7570 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7571 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
) {
7572 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
7574 if (test_bit(MBX_TMO_ERR
, &phba
->bit_flags
)) {
7575 /* Perform a PCI function reset to start from clean */
7576 rc
= lpfc_pci_function_reset(phba
);
7577 lpfc_els_flush_all_cmd(phba
);
7579 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
7580 lpfc_sli_flush_io_rings(phba
);
7583 clear_bit(MBX_TMO_ERR
, &phba
->bit_flags
);
7585 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
7586 "8888 PCI function reset failed rc %x\n",
7589 lpfc_sli_brdrestart(phba
);
7591 lpfc_unblock_mgmt_io(phba
);
7596 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7597 * @phba: pointer to lpfc hba data structure.
7599 * This function enables the PCI SR-IOV virtual functions to a physical
7600 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7601 * enable the number of virtual functions to the physical function. As
7602 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7603 * API call does not considered as an error condition for most of the device.
7606 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba
*phba
)
7608 struct pci_dev
*pdev
= phba
->pcidev
;
7612 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
7616 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_TOTAL_VF
, &nr_virtfn
);
7621 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7622 * @phba: pointer to lpfc hba data structure.
7623 * @nr_vfn: number of virtual functions to be enabled.
7625 * This function enables the PCI SR-IOV virtual functions to a physical
7626 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7627 * enable the number of virtual functions to the physical function. As
7628 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7629 * API call does not considered as an error condition for most of the device.
7632 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba
*phba
, int nr_vfn
)
7634 struct pci_dev
*pdev
= phba
->pcidev
;
7635 uint16_t max_nr_vfn
;
7638 max_nr_vfn
= lpfc_sli_sriov_nr_virtfn_get(phba
);
7639 if (nr_vfn
> max_nr_vfn
) {
7640 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7641 "3057 Requested vfs (%d) greater than "
7642 "supported vfs (%d)", nr_vfn
, max_nr_vfn
);
7646 rc
= pci_enable_sriov(pdev
, nr_vfn
);
7648 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
7649 "2806 Failed to enable sriov on this device "
7650 "with vfn number nr_vf:%d, rc:%d\n",
7653 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
7654 "2807 Successful enable sriov on this device "
7655 "with vfn number nr_vf:%d\n", nr_vfn
);
7660 lpfc_unblock_requests_work(struct work_struct
*work
)
7662 struct lpfc_hba
*phba
= container_of(work
, struct lpfc_hba
,
7663 unblock_request_work
);
7665 lpfc_unblock_requests(phba
);
7669 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7670 * @phba: pointer to lpfc hba data structure.
7672 * This routine is invoked to set up the driver internal resources before the
7673 * device specific resource setup to support the HBA device it attached to.
7677 * other values - error
7680 lpfc_setup_driver_resource_phase1(struct lpfc_hba
*phba
)
7682 struct lpfc_sli
*psli
= &phba
->sli
;
7685 * Driver resources common to all SLI revisions
7687 atomic_set(&phba
->fast_event_count
, 0);
7688 atomic_set(&phba
->dbg_log_idx
, 0);
7689 atomic_set(&phba
->dbg_log_cnt
, 0);
7690 atomic_set(&phba
->dbg_log_dmping
, 0);
7691 spin_lock_init(&phba
->hbalock
);
7693 /* Initialize port_list spinlock */
7694 spin_lock_init(&phba
->port_list_lock
);
7695 INIT_LIST_HEAD(&phba
->port_list
);
7697 INIT_LIST_HEAD(&phba
->work_list
);
7699 /* Initialize the wait queue head for the kernel thread */
7700 init_waitqueue_head(&phba
->work_waitq
);
7702 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7703 "1403 Protocols supported %s %s %s\n",
7704 ((phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
) ?
7706 ((phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) ?
7708 (phba
->nvmet_support
? "NVMET" : " "));
7710 /* ras_fwlog state */
7711 spin_lock_init(&phba
->ras_fwlog_lock
);
7713 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7714 spin_lock_init(&phba
->scsi_buf_list_get_lock
);
7715 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_get
);
7716 spin_lock_init(&phba
->scsi_buf_list_put_lock
);
7717 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
7719 /* Initialize the fabric iocb list */
7720 INIT_LIST_HEAD(&phba
->fabric_iocb_list
);
7722 /* Initialize list to save ELS buffers */
7723 INIT_LIST_HEAD(&phba
->elsbuf
);
7725 /* Initialize FCF connection rec list */
7726 INIT_LIST_HEAD(&phba
->fcf_conn_rec_list
);
7728 /* Initialize OAS configuration list */
7729 spin_lock_init(&phba
->devicelock
);
7730 INIT_LIST_HEAD(&phba
->luns
);
7732 /* MBOX heartbeat timer */
7733 timer_setup(&psli
->mbox_tmo
, lpfc_mbox_timeout
, 0);
7734 /* Fabric block timer */
7735 timer_setup(&phba
->fabric_block_timer
, lpfc_fabric_block_timeout
, 0);
7736 /* EA polling mode timer */
7737 timer_setup(&phba
->eratt_poll
, lpfc_poll_eratt
, 0);
7738 /* Heartbeat timer */
7739 timer_setup(&phba
->hb_tmofunc
, lpfc_hb_timeout
, 0);
7741 INIT_DELAYED_WORK(&phba
->eq_delay_work
, lpfc_hb_eq_delay_work
);
7743 INIT_DELAYED_WORK(&phba
->idle_stat_delay_work
,
7744 lpfc_idle_stat_delay_work
);
7745 INIT_WORK(&phba
->unblock_request_work
, lpfc_unblock_requests_work
);
7750 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7751 * @phba: pointer to lpfc hba data structure.
7753 * This routine is invoked to set up the driver internal resources specific to
7754 * support the SLI-3 HBA device it attached to.
7758 * other values - error
7761 lpfc_sli_driver_resource_setup(struct lpfc_hba
*phba
)
7766 * Initialize timers used by driver
7769 /* FCP polling mode timer */
7770 timer_setup(&phba
->fcp_poll_timer
, lpfc_poll_timeout
, 0);
7772 /* Host attention work mask setup */
7773 phba
->work_ha_mask
= (HA_ERATT
| HA_MBATT
| HA_LATT
);
7774 phba
->work_ha_mask
|= (HA_RXMASK
<< (LPFC_ELS_RING
* 4));
7776 /* Get all the module params for configuring this host */
7777 lpfc_get_cfgparam(phba
);
7778 /* Set up phase-1 common device driver resources */
7780 rc
= lpfc_setup_driver_resource_phase1(phba
);
7784 if (!phba
->sli
.sli3_ring
)
7785 phba
->sli
.sli3_ring
= kcalloc(LPFC_SLI3_MAX_RING
,
7786 sizeof(struct lpfc_sli_ring
),
7788 if (!phba
->sli
.sli3_ring
)
7792 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7793 * used to create the sg_dma_buf_pool must be dynamically calculated.
7796 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7797 entry_sz
= sizeof(struct sli4_sge
);
7799 entry_sz
= sizeof(struct ulp_bde64
);
7801 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7802 if (phba
->cfg_enable_bg
) {
7804 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7805 * the FCP rsp, and a BDE for each. Sice we have no control
7806 * over how many protection data segments the SCSI Layer
7807 * will hand us (ie: there could be one for every block
7808 * in the IO), we just allocate enough BDEs to accomidate
7809 * our max amount and we need to limit lpfc_sg_seg_cnt to
7810 * minimize the risk of running out.
7812 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
7813 sizeof(struct fcp_rsp
) +
7814 (LPFC_MAX_SG_SEG_CNT
* entry_sz
);
7816 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SG_SEG_CNT_DIF
)
7817 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SG_SEG_CNT_DIF
;
7819 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7820 phba
->cfg_total_seg_cnt
= LPFC_MAX_SG_SEG_CNT
;
7823 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7824 * the FCP rsp, a BDE for each, and a BDE for up to
7825 * cfg_sg_seg_cnt data segments.
7827 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
7828 sizeof(struct fcp_rsp
) +
7829 ((phba
->cfg_sg_seg_cnt
+ 2) * entry_sz
);
7831 /* Total BDEs in BPL for scsi_sg_list */
7832 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ 2;
7835 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
7836 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7837 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
7838 phba
->cfg_total_seg_cnt
);
7840 phba
->max_vpi
= LPFC_MAX_VPI
;
7841 /* This will be set to correct value after config_port mbox */
7842 phba
->max_vports
= 0;
7845 * Initialize the SLI Layer to run with lpfc HBAs.
7847 lpfc_sli_setup(phba
);
7848 lpfc_sli_queue_init(phba
);
7850 /* Allocate device driver memory */
7851 if (lpfc_mem_alloc(phba
, BPL_ALIGN_SZ
))
7854 phba
->lpfc_sg_dma_buf_pool
=
7855 dma_pool_create("lpfc_sg_dma_buf_pool",
7856 &phba
->pcidev
->dev
, phba
->cfg_sg_dma_buf_size
,
7859 if (!phba
->lpfc_sg_dma_buf_pool
)
7862 phba
->lpfc_cmd_rsp_buf_pool
=
7863 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7865 sizeof(struct fcp_cmnd
) +
7866 sizeof(struct fcp_rsp
),
7869 if (!phba
->lpfc_cmd_rsp_buf_pool
)
7870 goto fail_free_dma_buf_pool
;
7873 * Enable sr-iov virtual functions if supported and configured
7874 * through the module parameter.
7876 if (phba
->cfg_sriov_nr_virtfn
> 0) {
7877 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
7878 phba
->cfg_sriov_nr_virtfn
);
7880 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
7881 "2808 Requested number of SR-IOV "
7882 "virtual functions (%d) is not "
7884 phba
->cfg_sriov_nr_virtfn
);
7885 phba
->cfg_sriov_nr_virtfn
= 0;
7891 fail_free_dma_buf_pool
:
7892 dma_pool_destroy(phba
->lpfc_sg_dma_buf_pool
);
7893 phba
->lpfc_sg_dma_buf_pool
= NULL
;
7895 lpfc_mem_free(phba
);
7900 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7901 * @phba: pointer to lpfc hba data structure.
7903 * This routine is invoked to unset the driver internal resources set up
7904 * specific for supporting the SLI-3 HBA device it attached to.
7907 lpfc_sli_driver_resource_unset(struct lpfc_hba
*phba
)
7909 /* Free device driver memory allocated */
7910 lpfc_mem_free_all(phba
);
7916 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7917 * @phba: pointer to lpfc hba data structure.
7919 * This routine is invoked to set up the driver internal resources specific to
7920 * support the SLI-4 HBA device it attached to.
7924 * other values - error
7927 lpfc_sli4_driver_resource_setup(struct lpfc_hba
*phba
)
7929 LPFC_MBOXQ_t
*mboxq
;
7931 int rc
, i
, max_buf_size
;
7938 phba
->sli4_hba
.num_present_cpu
= lpfc_present_cpu
;
7939 phba
->sli4_hba
.num_possible_cpu
= cpumask_last(cpu_possible_mask
) + 1;
7940 phba
->sli4_hba
.curr_disp_cpu
= 0;
7942 /* Get all the module params for configuring this host */
7943 lpfc_get_cfgparam(phba
);
7945 /* Set up phase-1 common device driver resources */
7946 rc
= lpfc_setup_driver_resource_phase1(phba
);
7950 /* Before proceed, wait for POST done and device ready */
7951 rc
= lpfc_sli4_post_status_check(phba
);
7955 /* Allocate all driver workqueues here */
7957 /* The lpfc_wq workqueue for deferred irq use */
7958 phba
->wq
= alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM
, 0);
7963 * Initialize timers used by driver
7966 timer_setup(&phba
->rrq_tmr
, lpfc_rrq_timeout
, 0);
7968 /* FCF rediscover timer */
7969 timer_setup(&phba
->fcf
.redisc_wait
, lpfc_sli4_fcf_redisc_wait_tmo
, 0);
7971 /* CMF congestion timer */
7972 hrtimer_init(&phba
->cmf_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
7973 phba
->cmf_timer
.function
= lpfc_cmf_timer
;
7974 /* CMF 1 minute stats collection timer */
7975 hrtimer_init(&phba
->cmf_stats_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
7976 phba
->cmf_stats_timer
.function
= lpfc_cmf_stats_timer
;
7979 * Control structure for handling external multi-buffer mailbox
7980 * command pass-through.
7982 memset((uint8_t *)&phba
->mbox_ext_buf_ctx
, 0,
7983 sizeof(struct lpfc_mbox_ext_buf_ctx
));
7984 INIT_LIST_HEAD(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
7986 phba
->max_vpi
= LPFC_MAX_VPI
;
7988 /* This will be set to correct value after the read_config mbox */
7989 phba
->max_vports
= 0;
7991 /* Program the default value of vlan_id and fc_map */
7992 phba
->valid_vlan
= 0;
7993 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
7994 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
7995 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
7998 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7999 * we will associate a new ring, for each EQ/CQ/WQ tuple.
8000 * The WQ create will allocate the ring.
8003 /* Initialize buffer queue management fields */
8004 INIT_LIST_HEAD(&phba
->hbqs
[LPFC_ELS_HBQ
].hbq_buffer_list
);
8005 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_sli4_rb_alloc
;
8006 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_sli4_rb_free
;
8008 /* for VMID idle timeout if VMID is enabled */
8009 if (lpfc_is_vmid_enabled(phba
))
8010 timer_setup(&phba
->inactive_vmid_poll
, lpfc_vmid_poll
, 0);
8013 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8015 /* Initialize the Abort buffer list used by driver */
8016 spin_lock_init(&phba
->sli4_hba
.abts_io_buf_list_lock
);
8017 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_io_buf_list
);
8019 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
8020 /* Initialize the Abort nvme buffer list used by driver */
8021 spin_lock_init(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
8022 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
8023 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_nvmet_io_wait_list
);
8024 spin_lock_init(&phba
->sli4_hba
.t_active_list_lock
);
8025 INIT_LIST_HEAD(&phba
->sli4_hba
.t_active_ctx_list
);
8028 /* This abort list used by worker thread */
8029 spin_lock_init(&phba
->sli4_hba
.sgl_list_lock
);
8030 spin_lock_init(&phba
->sli4_hba
.nvmet_io_wait_lock
);
8031 spin_lock_init(&phba
->sli4_hba
.asynce_list_lock
);
8032 spin_lock_init(&phba
->sli4_hba
.els_xri_abrt_list_lock
);
8035 * Initialize driver internal slow-path work queues
8038 /* Driver internel slow-path CQ Event pool */
8039 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_cqe_event_pool
);
8040 /* Response IOCB work queue list */
8041 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_queue_event
);
8042 /* Asynchronous event CQ Event work queue list */
8043 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_asynce_work_queue
);
8044 /* Slow-path XRI aborted CQ Event work queue list */
8045 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
8046 /* Receive queue CQ Event work queue list */
8047 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_unsol_work_queue
);
8049 /* Initialize extent block lists. */
8050 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_blk_list
);
8051 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_xri_blk_list
);
8052 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_vfi_blk_list
);
8053 INIT_LIST_HEAD(&phba
->lpfc_vpi_blk_list
);
8055 /* Initialize mboxq lists. If the early init routines fail
8056 * these lists need to be correctly initialized.
8058 INIT_LIST_HEAD(&phba
->sli
.mboxq
);
8059 INIT_LIST_HEAD(&phba
->sli
.mboxq_cmpl
);
8061 /* initialize optic_state to 0xFF */
8062 phba
->sli4_hba
.lnk_info
.optic_state
= 0xff;
8064 /* Allocate device driver memory */
8065 rc
= lpfc_mem_alloc(phba
, SGL_ALIGN_SZ
);
8067 goto out_destroy_workqueue
;
8069 /* IF Type 2 ports get initialized now. */
8070 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) >=
8071 LPFC_SLI_INTF_IF_TYPE_2
) {
8072 rc
= lpfc_pci_function_reset(phba
);
8077 phba
->temp_sensor_support
= 1;
8080 /* Create the bootstrap mailbox command */
8081 rc
= lpfc_create_bootstrap_mbox(phba
);
8085 /* Set up the host's endian order with the device. */
8086 rc
= lpfc_setup_endian_order(phba
);
8088 goto out_free_bsmbx
;
8090 /* Set up the hba's configuration parameters. */
8091 rc
= lpfc_sli4_read_config(phba
);
8093 goto out_free_bsmbx
;
8095 if (phba
->sli4_hba
.fawwpn_flag
& LPFC_FAWWPN_CONFIG
) {
8096 /* Right now the link is down, if FA-PWWN is configured the
8097 * firmware will try FLOGI before the driver gets a link up.
8098 * If it fails, the driver should get a MISCONFIGURED async
8099 * event which will clear this flag. The only notification
8100 * the driver gets is if it fails, if it succeeds there is no
8101 * notification given. Assume success.
8103 phba
->sli4_hba
.fawwpn_flag
|= LPFC_FAWWPN_FABRIC
;
8106 rc
= lpfc_mem_alloc_active_rrq_pool_s4(phba
);
8108 goto out_free_bsmbx
;
8110 /* IF Type 0 ports get initialized now. */
8111 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
8112 LPFC_SLI_INTF_IF_TYPE_0
) {
8113 rc
= lpfc_pci_function_reset(phba
);
8115 goto out_free_bsmbx
;
8118 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
8122 goto out_free_bsmbx
;
8125 /* Check for NVMET being configured */
8126 phba
->nvmet_support
= 0;
8127 if (lpfc_enable_nvmet_cnt
) {
8129 /* First get WWN of HBA instance */
8130 lpfc_read_nv(phba
, mboxq
);
8131 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
8132 if (rc
!= MBX_SUCCESS
) {
8133 lpfc_printf_log(phba
, KERN_ERR
,
8135 "6016 Mailbox failed , mbxCmd x%x "
8136 "READ_NV, mbxStatus x%x\n",
8137 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
8138 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
8139 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8141 goto out_free_bsmbx
;
8144 memcpy(&wwn
, (char *)mb
->un
.varRDnvp
.nodename
,
8146 wwn
= cpu_to_be64(wwn
);
8147 phba
->sli4_hba
.wwnn
.u
.name
= wwn
;
8148 memcpy(&wwn
, (char *)mb
->un
.varRDnvp
.portname
,
8150 /* wwn is WWPN of HBA instance */
8151 wwn
= cpu_to_be64(wwn
);
8152 phba
->sli4_hba
.wwpn
.u
.name
= wwn
;
8154 /* Check to see if it matches any module parameter */
8155 for (i
= 0; i
< lpfc_enable_nvmet_cnt
; i
++) {
8156 if (wwn
== lpfc_enable_nvmet
[i
]) {
8157 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8158 if (lpfc_nvmet_mem_alloc(phba
))
8161 phba
->nvmet_support
= 1; /* a match */
8163 lpfc_printf_log(phba
, KERN_ERR
,
8165 "6017 NVME Target %016llx\n",
8168 lpfc_printf_log(phba
, KERN_ERR
,
8170 "6021 Can't enable NVME Target."
8171 " NVME_TARGET_FC infrastructure"
8172 " is not in kernel\n");
8174 /* Not supported for NVMET */
8175 phba
->cfg_xri_rebalancing
= 0;
8176 if (phba
->irq_chann_mode
== NHT_MODE
) {
8177 phba
->cfg_irq_chann
=
8178 phba
->sli4_hba
.num_present_cpu
;
8179 phba
->cfg_hdw_queue
=
8180 phba
->sli4_hba
.num_present_cpu
;
8181 phba
->irq_chann_mode
= NORMAL_MODE
;
8188 lpfc_nvme_mod_param_dep(phba
);
8191 * Get sli4 parameters that override parameters from Port capabilities.
8192 * If this call fails, it isn't critical unless the SLI4 parameters come
8195 rc
= lpfc_get_sli4_parameters(phba
, mboxq
);
8197 if_type
= bf_get(lpfc_sli_intf_if_type
,
8198 &phba
->sli4_hba
.sli_intf
);
8199 if_fam
= bf_get(lpfc_sli_intf_sli_family
,
8200 &phba
->sli4_hba
.sli_intf
);
8201 if (phba
->sli4_hba
.extents_in_use
&&
8202 phba
->sli4_hba
.rpi_hdrs_in_use
) {
8203 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8204 "2999 Unsupported SLI4 Parameters "
8205 "Extents and RPI headers enabled.\n");
8206 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
&&
8207 if_fam
== LPFC_SLI_INTF_FAMILY_BE2
) {
8208 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8210 goto out_free_bsmbx
;
8213 if (!(if_type
== LPFC_SLI_INTF_IF_TYPE_0
&&
8214 if_fam
== LPFC_SLI_INTF_FAMILY_BE2
)) {
8215 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8217 goto out_free_bsmbx
;
8222 * 1 for cmd, 1 for rsp, NVME adds an extra one
8223 * for boundary conditions in its max_sgl_segment template.
8226 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
8230 * It doesn't matter what family our adapter is in, we are
8231 * limited to 2 Pages, 512 SGEs, for our SGL.
8232 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8234 max_buf_size
= (2 * SLI4_PAGE_SIZE
);
8237 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8238 * used to create the sg_dma_buf_pool must be calculated.
8240 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) {
8241 /* Both cfg_enable_bg and cfg_external_dif code paths */
8244 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8245 * the FCP rsp, and a SGE. Sice we have no control
8246 * over how many protection segments the SCSI Layer
8247 * will hand us (ie: there could be one for every block
8248 * in the IO), just allocate enough SGEs to accomidate
8249 * our max amount and we need to limit lpfc_sg_seg_cnt
8250 * to minimize the risk of running out.
8252 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd32
) +
8253 sizeof(struct fcp_rsp
) + max_buf_size
;
8255 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8256 phba
->cfg_total_seg_cnt
= LPFC_MAX_SGL_SEG_CNT
;
8259 * If supporting DIF, reduce the seg count for scsi to
8260 * allow room for the DIF sges.
8262 if (phba
->cfg_enable_bg
&&
8263 phba
->cfg_sg_seg_cnt
> LPFC_MAX_BG_SLI4_SEG_CNT_DIF
)
8264 phba
->cfg_scsi_seg_cnt
= LPFC_MAX_BG_SLI4_SEG_CNT_DIF
;
8266 phba
->cfg_scsi_seg_cnt
= phba
->cfg_sg_seg_cnt
;
8270 * The scsi_buf for a regular I/O holds the FCP cmnd,
8271 * the FCP rsp, a SGE for each, and a SGE for up to
8272 * cfg_sg_seg_cnt data segments.
8274 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd32
) +
8275 sizeof(struct fcp_rsp
) +
8276 ((phba
->cfg_sg_seg_cnt
+ extra
) *
8277 sizeof(struct sli4_sge
));
8279 /* Total SGEs for scsi_sg_list */
8280 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ extra
;
8281 phba
->cfg_scsi_seg_cnt
= phba
->cfg_sg_seg_cnt
;
8284 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8285 * need to post 1 page for the SGL.
8289 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
8290 phba
->cfg_sg_dma_buf_size
= LPFC_DEFAULT_XPSGL_SIZE
;
8291 else if (phba
->cfg_sg_dma_buf_size
<= LPFC_MIN_SG_SLI4_BUF_SZ
)
8292 phba
->cfg_sg_dma_buf_size
= LPFC_MIN_SG_SLI4_BUF_SZ
;
8294 phba
->cfg_sg_dma_buf_size
=
8295 SLI4_PAGE_ALIGN(phba
->cfg_sg_dma_buf_size
);
8297 phba
->border_sge_num
= phba
->cfg_sg_dma_buf_size
/
8298 sizeof(struct sli4_sge
);
8300 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8301 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
8302 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_NVME_SEG_CNT
) {
8303 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
8304 "6300 Reducing NVME sg segment "
8306 LPFC_MAX_NVME_SEG_CNT
);
8307 phba
->cfg_nvme_seg_cnt
= LPFC_MAX_NVME_SEG_CNT
;
8309 phba
->cfg_nvme_seg_cnt
= phba
->cfg_sg_seg_cnt
;
8312 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
8313 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8314 "total:%d scsi:%d nvme:%d\n",
8315 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
8316 phba
->cfg_total_seg_cnt
, phba
->cfg_scsi_seg_cnt
,
8317 phba
->cfg_nvme_seg_cnt
);
8319 if (phba
->cfg_sg_dma_buf_size
< SLI4_PAGE_SIZE
)
8320 i
= phba
->cfg_sg_dma_buf_size
;
8324 phba
->lpfc_sg_dma_buf_pool
=
8325 dma_pool_create("lpfc_sg_dma_buf_pool",
8327 phba
->cfg_sg_dma_buf_size
,
8329 if (!phba
->lpfc_sg_dma_buf_pool
) {
8331 goto out_free_bsmbx
;
8334 phba
->lpfc_cmd_rsp_buf_pool
=
8335 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8337 sizeof(struct fcp_cmnd32
) +
8338 sizeof(struct fcp_rsp
),
8340 if (!phba
->lpfc_cmd_rsp_buf_pool
) {
8342 goto out_free_sg_dma_buf
;
8345 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8347 /* Verify OAS is supported */
8348 lpfc_sli4_oas_verify(phba
);
8350 /* Verify RAS support on adapter */
8351 lpfc_sli4_ras_init(phba
);
8353 /* Verify all the SLI4 queues */
8354 rc
= lpfc_sli4_queue_verify(phba
);
8356 goto out_free_cmd_rsp_buf
;
8358 /* Create driver internal CQE event pool */
8359 rc
= lpfc_sli4_cq_event_pool_create(phba
);
8361 goto out_free_cmd_rsp_buf
;
8363 /* Initialize sgl lists per host */
8364 lpfc_init_sgl_list(phba
);
8366 /* Allocate and initialize active sgl array */
8367 rc
= lpfc_init_active_sgl_array(phba
);
8369 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8370 "1430 Failed to initialize sgl list.\n");
8371 goto out_destroy_cq_event_pool
;
8373 rc
= lpfc_sli4_init_rpi_hdrs(phba
);
8375 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8376 "1432 Failed to initialize rpi headers.\n");
8377 goto out_free_active_sgl
;
8380 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8381 longs
= (LPFC_SLI4_FCF_TBL_INDX_MAX
+ BITS_PER_LONG
- 1)/BITS_PER_LONG
;
8382 phba
->fcf
.fcf_rr_bmask
= kcalloc(longs
, sizeof(unsigned long),
8384 if (!phba
->fcf
.fcf_rr_bmask
) {
8385 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8386 "2759 Failed allocate memory for FCF round "
8387 "robin failover bmask\n");
8389 goto out_remove_rpi_hdrs
;
8392 phba
->sli4_hba
.hba_eq_hdl
= kcalloc(phba
->cfg_irq_chann
,
8393 sizeof(struct lpfc_hba_eq_hdl
),
8395 if (!phba
->sli4_hba
.hba_eq_hdl
) {
8396 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8397 "2572 Failed allocate memory for "
8398 "fast-path per-EQ handle array\n");
8400 goto out_free_fcf_rr_bmask
;
8403 phba
->sli4_hba
.cpu_map
= kcalloc(phba
->sli4_hba
.num_possible_cpu
,
8404 sizeof(struct lpfc_vector_map_info
),
8406 if (!phba
->sli4_hba
.cpu_map
) {
8407 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8408 "3327 Failed allocate memory for msi-x "
8409 "interrupt vector mapping\n");
8411 goto out_free_hba_eq_hdl
;
8414 phba
->sli4_hba
.eq_info
= alloc_percpu(struct lpfc_eq_intr_info
);
8415 if (!phba
->sli4_hba
.eq_info
) {
8416 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8417 "3321 Failed allocation for per_cpu stats\n");
8419 goto out_free_hba_cpu_map
;
8422 phba
->sli4_hba
.idle_stat
= kcalloc(phba
->sli4_hba
.num_possible_cpu
,
8423 sizeof(*phba
->sli4_hba
.idle_stat
),
8425 if (!phba
->sli4_hba
.idle_stat
) {
8426 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8427 "3390 Failed allocation for idle_stat\n");
8429 goto out_free_hba_eq_info
;
8432 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8433 phba
->sli4_hba
.c_stat
= alloc_percpu(struct lpfc_hdwq_stat
);
8434 if (!phba
->sli4_hba
.c_stat
) {
8435 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8436 "3332 Failed allocating per cpu hdwq stats\n");
8438 goto out_free_hba_idle_stat
;
8442 phba
->cmf_stat
= alloc_percpu(struct lpfc_cgn_stat
);
8443 if (!phba
->cmf_stat
) {
8444 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8445 "3331 Failed allocating per cpu cgn stats\n");
8447 goto out_free_hba_hdwq_info
;
8451 * Enable sr-iov virtual functions if supported and configured
8452 * through the module parameter.
8454 if (phba
->cfg_sriov_nr_virtfn
> 0) {
8455 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
8456 phba
->cfg_sriov_nr_virtfn
);
8458 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8459 "3020 Requested number of SR-IOV "
8460 "virtual functions (%d) is not "
8462 phba
->cfg_sriov_nr_virtfn
);
8463 phba
->cfg_sriov_nr_virtfn
= 0;
8469 out_free_hba_hdwq_info
:
8470 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8471 free_percpu(phba
->sli4_hba
.c_stat
);
8472 out_free_hba_idle_stat
:
8474 kfree(phba
->sli4_hba
.idle_stat
);
8475 out_free_hba_eq_info
:
8476 free_percpu(phba
->sli4_hba
.eq_info
);
8477 out_free_hba_cpu_map
:
8478 kfree(phba
->sli4_hba
.cpu_map
);
8479 out_free_hba_eq_hdl
:
8480 kfree(phba
->sli4_hba
.hba_eq_hdl
);
8481 out_free_fcf_rr_bmask
:
8482 kfree(phba
->fcf
.fcf_rr_bmask
);
8483 out_remove_rpi_hdrs
:
8484 lpfc_sli4_remove_rpi_hdrs(phba
);
8485 out_free_active_sgl
:
8486 lpfc_free_active_sgl(phba
);
8487 out_destroy_cq_event_pool
:
8488 lpfc_sli4_cq_event_pool_destroy(phba
);
8489 out_free_cmd_rsp_buf
:
8490 dma_pool_destroy(phba
->lpfc_cmd_rsp_buf_pool
);
8491 phba
->lpfc_cmd_rsp_buf_pool
= NULL
;
8492 out_free_sg_dma_buf
:
8493 dma_pool_destroy(phba
->lpfc_sg_dma_buf_pool
);
8494 phba
->lpfc_sg_dma_buf_pool
= NULL
;
8496 lpfc_destroy_bootstrap_mbox(phba
);
8498 lpfc_mem_free(phba
);
8499 out_destroy_workqueue
:
8500 destroy_workqueue(phba
->wq
);
8506 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8507 * @phba: pointer to lpfc hba data structure.
8509 * This routine is invoked to unset the driver internal resources set up
8510 * specific for supporting the SLI-4 HBA device it attached to.
8513 lpfc_sli4_driver_resource_unset(struct lpfc_hba
*phba
)
8515 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
8517 free_percpu(phba
->sli4_hba
.eq_info
);
8518 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8519 free_percpu(phba
->sli4_hba
.c_stat
);
8521 free_percpu(phba
->cmf_stat
);
8522 kfree(phba
->sli4_hba
.idle_stat
);
8524 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8525 kfree(phba
->sli4_hba
.cpu_map
);
8526 phba
->sli4_hba
.num_possible_cpu
= 0;
8527 phba
->sli4_hba
.num_present_cpu
= 0;
8528 phba
->sli4_hba
.curr_disp_cpu
= 0;
8529 cpumask_clear(&phba
->sli4_hba
.irq_aff_mask
);
8531 /* Free memory allocated for fast-path work queue handles */
8532 kfree(phba
->sli4_hba
.hba_eq_hdl
);
8534 /* Free the allocated rpi headers. */
8535 lpfc_sli4_remove_rpi_hdrs(phba
);
8536 lpfc_sli4_remove_rpis(phba
);
8538 /* Free eligible FCF index bmask */
8539 kfree(phba
->fcf
.fcf_rr_bmask
);
8541 /* Free the ELS sgl list */
8542 lpfc_free_active_sgl(phba
);
8543 lpfc_free_els_sgl_list(phba
);
8544 lpfc_free_nvmet_sgl_list(phba
);
8546 /* Free the completion queue EQ event pool */
8547 lpfc_sli4_cq_event_release_all(phba
);
8548 lpfc_sli4_cq_event_pool_destroy(phba
);
8550 /* Release resource identifiers. */
8551 lpfc_sli4_dealloc_resource_identifiers(phba
);
8553 /* Free the bsmbx region. */
8554 lpfc_destroy_bootstrap_mbox(phba
);
8556 /* Free the SLI Layer memory with SLI4 HBAs */
8557 lpfc_mem_free_all(phba
);
8559 /* Free the current connect table */
8560 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
8561 &phba
->fcf_conn_rec_list
, list
) {
8562 list_del_init(&conn_entry
->list
);
8570 * lpfc_init_api_table_setup - Set up init api function jump table
8571 * @phba: The hba struct for which this call is being executed.
8572 * @dev_grp: The HBA PCI-Device group number.
8574 * This routine sets up the device INIT interface API function jump table
8577 * Returns: 0 - success, -ENODEV - failure.
8580 lpfc_init_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
8582 phba
->lpfc_hba_init_link
= lpfc_hba_init_link
;
8583 phba
->lpfc_hba_down_link
= lpfc_hba_down_link
;
8584 phba
->lpfc_selective_reset
= lpfc_selective_reset
;
8586 case LPFC_PCI_DEV_LP
:
8587 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s3
;
8588 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s3
;
8589 phba
->lpfc_stop_port
= lpfc_stop_port_s3
;
8591 case LPFC_PCI_DEV_OC
:
8592 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s4
;
8593 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s4
;
8594 phba
->lpfc_stop_port
= lpfc_stop_port_s4
;
8597 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8598 "1431 Invalid HBA PCI-device group: 0x%x\n",
8606 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8607 * @phba: pointer to lpfc hba data structure.
8609 * This routine is invoked to set up the driver internal resources after the
8610 * device specific resource setup to support the HBA device it attached to.
8614 * other values - error
8617 lpfc_setup_driver_resource_phase2(struct lpfc_hba
*phba
)
8621 /* Startup the kernel thread for this host adapter. */
8622 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
8623 "lpfc_worker_%d", phba
->brd_no
);
8624 if (IS_ERR(phba
->worker_thread
)) {
8625 error
= PTR_ERR(phba
->worker_thread
);
8633 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8634 * @phba: pointer to lpfc hba data structure.
8636 * This routine is invoked to unset the driver internal resources set up after
8637 * the device specific resource setup for supporting the HBA device it
8641 lpfc_unset_driver_resource_phase2(struct lpfc_hba
*phba
)
8644 destroy_workqueue(phba
->wq
);
8648 /* Stop kernel worker thread */
8649 if (phba
->worker_thread
)
8650 kthread_stop(phba
->worker_thread
);
8654 * lpfc_free_iocb_list - Free iocb list.
8655 * @phba: pointer to lpfc hba data structure.
8657 * This routine is invoked to free the driver's IOCB list and memory.
8660 lpfc_free_iocb_list(struct lpfc_hba
*phba
)
8662 struct lpfc_iocbq
*iocbq_entry
= NULL
, *iocbq_next
= NULL
;
8664 spin_lock_irq(&phba
->hbalock
);
8665 list_for_each_entry_safe(iocbq_entry
, iocbq_next
,
8666 &phba
->lpfc_iocb_list
, list
) {
8667 list_del(&iocbq_entry
->list
);
8669 phba
->total_iocbq_bufs
--;
8671 spin_unlock_irq(&phba
->hbalock
);
8677 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8678 * @phba: pointer to lpfc hba data structure.
8679 * @iocb_count: number of requested iocbs
8681 * This routine is invoked to allocate and initizlize the driver's IOCB
8682 * list and set up the IOCB tag array accordingly.
8686 * other values - error
8689 lpfc_init_iocb_list(struct lpfc_hba
*phba
, int iocb_count
)
8691 struct lpfc_iocbq
*iocbq_entry
= NULL
;
8695 /* Initialize and populate the iocb list per host. */
8696 INIT_LIST_HEAD(&phba
->lpfc_iocb_list
);
8697 for (i
= 0; i
< iocb_count
; i
++) {
8698 iocbq_entry
= kzalloc(sizeof(struct lpfc_iocbq
), GFP_KERNEL
);
8699 if (iocbq_entry
== NULL
) {
8700 printk(KERN_ERR
"%s: only allocated %d iocbs of "
8701 "expected %d count. Unloading driver.\n",
8702 __func__
, i
, iocb_count
);
8703 goto out_free_iocbq
;
8706 iotag
= lpfc_sli_next_iotag(phba
, iocbq_entry
);
8709 printk(KERN_ERR
"%s: failed to allocate IOTAG. "
8710 "Unloading driver.\n", __func__
);
8711 goto out_free_iocbq
;
8713 iocbq_entry
->sli4_lxritag
= NO_XRI
;
8714 iocbq_entry
->sli4_xritag
= NO_XRI
;
8716 spin_lock_irq(&phba
->hbalock
);
8717 list_add(&iocbq_entry
->list
, &phba
->lpfc_iocb_list
);
8718 phba
->total_iocbq_bufs
++;
8719 spin_unlock_irq(&phba
->hbalock
);
8725 lpfc_free_iocb_list(phba
);
8731 * lpfc_free_sgl_list - Free a given sgl list.
8732 * @phba: pointer to lpfc hba data structure.
8733 * @sglq_list: pointer to the head of sgl list.
8735 * This routine is invoked to free a give sgl list and memory.
8738 lpfc_free_sgl_list(struct lpfc_hba
*phba
, struct list_head
*sglq_list
)
8740 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
8742 list_for_each_entry_safe(sglq_entry
, sglq_next
, sglq_list
, list
) {
8743 list_del(&sglq_entry
->list
);
8744 lpfc_mbuf_free(phba
, sglq_entry
->virt
, sglq_entry
->phys
);
8750 * lpfc_free_els_sgl_list - Free els sgl list.
8751 * @phba: pointer to lpfc hba data structure.
8753 * This routine is invoked to free the driver's els sgl list and memory.
8756 lpfc_free_els_sgl_list(struct lpfc_hba
*phba
)
8758 LIST_HEAD(sglq_list
);
8760 /* Retrieve all els sgls from driver list */
8761 spin_lock_irq(&phba
->sli4_hba
.sgl_list_lock
);
8762 list_splice_init(&phba
->sli4_hba
.lpfc_els_sgl_list
, &sglq_list
);
8763 spin_unlock_irq(&phba
->sli4_hba
.sgl_list_lock
);
8765 /* Now free the sgl list */
8766 lpfc_free_sgl_list(phba
, &sglq_list
);
8770 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8771 * @phba: pointer to lpfc hba data structure.
8773 * This routine is invoked to free the driver's nvmet sgl list and memory.
8776 lpfc_free_nvmet_sgl_list(struct lpfc_hba
*phba
)
8778 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
8779 LIST_HEAD(sglq_list
);
8781 /* Retrieve all nvmet sgls from driver list */
8782 spin_lock_irq(&phba
->hbalock
);
8783 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
8784 list_splice_init(&phba
->sli4_hba
.lpfc_nvmet_sgl_list
, &sglq_list
);
8785 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
8786 spin_unlock_irq(&phba
->hbalock
);
8788 /* Now free the sgl list */
8789 list_for_each_entry_safe(sglq_entry
, sglq_next
, &sglq_list
, list
) {
8790 list_del(&sglq_entry
->list
);
8791 lpfc_nvmet_buf_free(phba
, sglq_entry
->virt
, sglq_entry
->phys
);
8795 /* Update the nvmet_xri_cnt to reflect no current sgls.
8796 * The next initialization cycle sets the count and allocates
8797 * the sgls over again.
8799 phba
->sli4_hba
.nvmet_xri_cnt
= 0;
8803 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8804 * @phba: pointer to lpfc hba data structure.
8806 * This routine is invoked to allocate the driver's active sgl memory.
8807 * This array will hold the sglq_entry's for active IOs.
8810 lpfc_init_active_sgl_array(struct lpfc_hba
*phba
)
8813 size
= sizeof(struct lpfc_sglq
*);
8814 size
*= phba
->sli4_hba
.max_cfg_param
.max_xri
;
8816 phba
->sli4_hba
.lpfc_sglq_active_list
=
8817 kzalloc(size
, GFP_KERNEL
);
8818 if (!phba
->sli4_hba
.lpfc_sglq_active_list
)
8824 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8825 * @phba: pointer to lpfc hba data structure.
8827 * This routine is invoked to walk through the array of active sglq entries
8828 * and free all of the resources.
8829 * This is just a place holder for now.
8832 lpfc_free_active_sgl(struct lpfc_hba
*phba
)
8834 kfree(phba
->sli4_hba
.lpfc_sglq_active_list
);
8838 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8839 * @phba: pointer to lpfc hba data structure.
8841 * This routine is invoked to allocate and initizlize the driver's sgl
8842 * list and set up the sgl xritag tag array accordingly.
8846 lpfc_init_sgl_list(struct lpfc_hba
*phba
)
8848 /* Initialize and populate the sglq list per host/VF. */
8849 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_els_sgl_list
);
8850 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
8851 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
8852 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
8854 /* els xri-sgl book keeping */
8855 phba
->sli4_hba
.els_xri_cnt
= 0;
8857 /* nvme xri-buffer book keeping */
8858 phba
->sli4_hba
.io_xri_cnt
= 0;
8862 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8863 * @phba: pointer to lpfc hba data structure.
8865 * This routine is invoked to post rpi header templates to the
8866 * port for those SLI4 ports that do not support extents. This routine
8867 * posts a PAGE_SIZE memory region to the port to hold up to
8868 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8869 * and should be called only when interrupts are disabled.
8873 * -ERROR - otherwise.
8876 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba
*phba
)
8879 struct lpfc_rpi_hdr
*rpi_hdr
;
8881 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_hdr_list
);
8882 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
8884 if (phba
->sli4_hba
.extents_in_use
)
8887 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
8889 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8890 "0391 Error during rpi post operation\n");
8891 lpfc_sli4_remove_rpis(phba
);
8899 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8900 * @phba: pointer to lpfc hba data structure.
8902 * This routine is invoked to allocate a single 4KB memory region to
8903 * support rpis and stores them in the phba. This single region
8904 * provides support for up to 64 rpis. The region is used globally
8908 * A valid rpi hdr on success.
8909 * A NULL pointer on any failure.
8911 struct lpfc_rpi_hdr
*
8912 lpfc_sli4_create_rpi_hdr(struct lpfc_hba
*phba
)
8914 uint16_t rpi_limit
, curr_rpi_range
;
8915 struct lpfc_dmabuf
*dmabuf
;
8916 struct lpfc_rpi_hdr
*rpi_hdr
;
8919 * If the SLI4 port supports extents, posting the rpi header isn't
8920 * required. Set the expected maximum count and let the actual value
8921 * get set when extents are fully allocated.
8923 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
8925 if (phba
->sli4_hba
.extents_in_use
)
8928 /* The limit on the logical index is just the max_rpi count. */
8929 rpi_limit
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
8931 spin_lock_irq(&phba
->hbalock
);
8933 * Establish the starting RPI in this header block. The starting
8934 * rpi is normalized to a zero base because the physical rpi is
8937 curr_rpi_range
= phba
->sli4_hba
.next_rpi
;
8938 spin_unlock_irq(&phba
->hbalock
);
8940 /* Reached full RPI range */
8941 if (curr_rpi_range
== rpi_limit
)
8945 * First allocate the protocol header region for the port. The
8946 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8948 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
8952 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
8953 LPFC_HDR_TEMPLATE_SIZE
,
8954 &dmabuf
->phys
, GFP_KERNEL
);
8955 if (!dmabuf
->virt
) {
8957 goto err_free_dmabuf
;
8960 if (!IS_ALIGNED(dmabuf
->phys
, LPFC_HDR_TEMPLATE_SIZE
)) {
8962 goto err_free_coherent
;
8965 /* Save the rpi header data for cleanup later. */
8966 rpi_hdr
= kzalloc(sizeof(struct lpfc_rpi_hdr
), GFP_KERNEL
);
8968 goto err_free_coherent
;
8970 rpi_hdr
->dmabuf
= dmabuf
;
8971 rpi_hdr
->len
= LPFC_HDR_TEMPLATE_SIZE
;
8972 rpi_hdr
->page_count
= 1;
8973 spin_lock_irq(&phba
->hbalock
);
8975 /* The rpi_hdr stores the logical index only. */
8976 rpi_hdr
->start_rpi
= curr_rpi_range
;
8977 rpi_hdr
->next_rpi
= phba
->sli4_hba
.next_rpi
+ LPFC_RPI_HDR_COUNT
;
8978 list_add_tail(&rpi_hdr
->list
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
);
8980 spin_unlock_irq(&phba
->hbalock
);
8984 dma_free_coherent(&phba
->pcidev
->dev
, LPFC_HDR_TEMPLATE_SIZE
,
8985 dmabuf
->virt
, dmabuf
->phys
);
8992 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8993 * @phba: pointer to lpfc hba data structure.
8995 * This routine is invoked to remove all memory resources allocated
8996 * to support rpis for SLI4 ports not supporting extents. This routine
8997 * presumes the caller has released all rpis consumed by fabric or port
8998 * logins and is prepared to have the header pages removed.
9001 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba
*phba
)
9003 struct lpfc_rpi_hdr
*rpi_hdr
, *next_rpi_hdr
;
9005 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
9008 list_for_each_entry_safe(rpi_hdr
, next_rpi_hdr
,
9009 &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
9010 list_del(&rpi_hdr
->list
);
9011 dma_free_coherent(&phba
->pcidev
->dev
, rpi_hdr
->len
,
9012 rpi_hdr
->dmabuf
->virt
, rpi_hdr
->dmabuf
->phys
);
9013 kfree(rpi_hdr
->dmabuf
);
9017 /* There are no rpis available to the port now. */
9018 phba
->sli4_hba
.next_rpi
= 0;
9022 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9023 * @pdev: pointer to pci device data structure.
9025 * This routine is invoked to allocate the driver hba data structure for an
9026 * HBA device. If the allocation is successful, the phba reference to the
9027 * PCI device data structure is set.
9030 * pointer to @phba - successful
9033 static struct lpfc_hba
*
9034 lpfc_hba_alloc(struct pci_dev
*pdev
)
9036 struct lpfc_hba
*phba
;
9038 /* Allocate memory for HBA structure */
9039 phba
= kzalloc(sizeof(struct lpfc_hba
), GFP_KERNEL
);
9041 dev_err(&pdev
->dev
, "failed to allocate hba struct\n");
9045 /* Set reference to PCI device in HBA structure */
9046 phba
->pcidev
= pdev
;
9048 /* Assign an unused board number */
9049 phba
->brd_no
= lpfc_get_instance();
9050 if (phba
->brd_no
< 0) {
9054 phba
->eratt_poll_interval
= LPFC_ERATT_POLL_INTERVAL
;
9056 spin_lock_init(&phba
->ct_ev_lock
);
9057 INIT_LIST_HEAD(&phba
->ct_ev_waiters
);
9063 * lpfc_hba_free - Free driver hba data structure with a device.
9064 * @phba: pointer to lpfc hba data structure.
9066 * This routine is invoked to free the driver hba data structure with an
9070 lpfc_hba_free(struct lpfc_hba
*phba
)
9072 if (phba
->sli_rev
== LPFC_SLI_REV4
)
9073 kfree(phba
->sli4_hba
.hdwq
);
9075 /* Release the driver assigned board number */
9076 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
9078 /* Free memory allocated with sli3 rings */
9079 kfree(phba
->sli
.sli3_ring
);
9080 phba
->sli
.sli3_ring
= NULL
;
9087 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9088 * @vport: pointer to lpfc vport data structure.
9090 * This routine is will setup initial FDMI attribute masks for
9091 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9092 * to get these attributes first before falling back, the attribute
9093 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9096 lpfc_setup_fdmi_mask(struct lpfc_vport
*vport
)
9098 struct lpfc_hba
*phba
= vport
->phba
;
9100 set_bit(FC_ALLOW_FDMI
, &vport
->load_flag
);
9101 if (phba
->cfg_enable_SmartSAN
||
9102 phba
->cfg_fdmi_on
== LPFC_FDMI_SUPPORT
) {
9103 /* Setup appropriate attribute masks */
9104 vport
->fdmi_hba_mask
= LPFC_FDMI2_HBA_ATTR
;
9105 if (phba
->cfg_enable_SmartSAN
)
9106 vport
->fdmi_port_mask
= LPFC_FDMI2_SMART_ATTR
;
9108 vport
->fdmi_port_mask
= LPFC_FDMI2_PORT_ATTR
;
9111 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
9112 "6077 Setup FDMI mask: hba x%x port x%x\n",
9113 vport
->fdmi_hba_mask
, vport
->fdmi_port_mask
);
9117 * lpfc_create_shost - Create hba physical port with associated scsi host.
9118 * @phba: pointer to lpfc hba data structure.
9120 * This routine is invoked to create HBA physical port and associate a SCSI
9125 * other values - error
9128 lpfc_create_shost(struct lpfc_hba
*phba
)
9130 struct lpfc_vport
*vport
;
9131 struct Scsi_Host
*shost
;
9133 /* Initialize HBA FC structure */
9134 phba
->fc_edtov
= FF_DEF_EDTOV
;
9135 phba
->fc_ratov
= FF_DEF_RATOV
;
9136 phba
->fc_altov
= FF_DEF_ALTOV
;
9137 phba
->fc_arbtov
= FF_DEF_ARBTOV
;
9139 atomic_set(&phba
->sdev_cnt
, 0);
9140 vport
= lpfc_create_port(phba
, phba
->brd_no
, &phba
->pcidev
->dev
);
9144 shost
= lpfc_shost_from_vport(vport
);
9145 phba
->pport
= vport
;
9147 if (phba
->nvmet_support
) {
9148 /* Only 1 vport (pport) will support NVME target */
9149 phba
->targetport
= NULL
;
9150 phba
->cfg_enable_fc4_type
= LPFC_ENABLE_NVME
;
9151 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_NVME_DISC
,
9152 "6076 NVME Target Found\n");
9155 lpfc_debugfs_initialize(vport
);
9156 /* Put reference to SCSI host to driver's device private data */
9157 pci_set_drvdata(phba
->pcidev
, shost
);
9159 lpfc_setup_fdmi_mask(vport
);
9162 * At this point we are fully registered with PSA. In addition,
9163 * any initial discovery should be completed.
9169 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9170 * @phba: pointer to lpfc hba data structure.
9172 * This routine is invoked to destroy HBA physical port and the associated
9176 lpfc_destroy_shost(struct lpfc_hba
*phba
)
9178 struct lpfc_vport
*vport
= phba
->pport
;
9180 /* Destroy physical port that associated with the SCSI host */
9181 destroy_port(vport
);
9187 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9188 * @phba: pointer to lpfc hba data structure.
9189 * @shost: the shost to be used to detect Block guard settings.
9191 * This routine sets up the local Block guard protocol settings for @shost.
9192 * This routine also allocates memory for debugging bg buffers.
9195 lpfc_setup_bg(struct lpfc_hba
*phba
, struct Scsi_Host
*shost
)
9200 if (phba
->cfg_prot_mask
&& phba
->cfg_prot_guard
) {
9201 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9202 "1478 Registering BlockGuard with the "
9205 old_mask
= phba
->cfg_prot_mask
;
9206 old_guard
= phba
->cfg_prot_guard
;
9208 /* Only allow supported values */
9209 phba
->cfg_prot_mask
&= (SHOST_DIF_TYPE1_PROTECTION
|
9210 SHOST_DIX_TYPE0_PROTECTION
|
9211 SHOST_DIX_TYPE1_PROTECTION
);
9212 phba
->cfg_prot_guard
&= (SHOST_DIX_GUARD_IP
|
9213 SHOST_DIX_GUARD_CRC
);
9215 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9216 if (phba
->cfg_prot_mask
== SHOST_DIX_TYPE1_PROTECTION
)
9217 phba
->cfg_prot_mask
|= SHOST_DIF_TYPE1_PROTECTION
;
9219 if (phba
->cfg_prot_mask
&& phba
->cfg_prot_guard
) {
9220 if ((old_mask
!= phba
->cfg_prot_mask
) ||
9221 (old_guard
!= phba
->cfg_prot_guard
))
9222 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9223 "1475 Registering BlockGuard with the "
9224 "SCSI layer: mask %d guard %d\n",
9225 phba
->cfg_prot_mask
,
9226 phba
->cfg_prot_guard
);
9228 scsi_host_set_prot(shost
, phba
->cfg_prot_mask
);
9229 scsi_host_set_guard(shost
, phba
->cfg_prot_guard
);
9231 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9232 "1479 Not Registering BlockGuard with the SCSI "
9233 "layer, Bad protection parameters: %d %d\n",
9234 old_mask
, old_guard
);
9239 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9240 * @phba: pointer to lpfc hba data structure.
9242 * This routine is invoked to perform all the necessary post initialization
9243 * setup for the device.
9246 lpfc_post_init_setup(struct lpfc_hba
*phba
)
9248 struct Scsi_Host
*shost
;
9249 struct lpfc_adapter_event_header adapter_event
;
9251 /* Get the default values for Model Name and Description */
9252 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
9255 * hba setup may have changed the hba_queue_depth so we need to
9256 * adjust the value of can_queue.
9258 shost
= pci_get_drvdata(phba
->pcidev
);
9259 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
9261 lpfc_host_attrib_init(shost
);
9263 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
9264 spin_lock_irq(shost
->host_lock
);
9265 lpfc_poll_start_timer(phba
);
9266 spin_unlock_irq(shost
->host_lock
);
9269 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9270 "0428 Perform SCSI scan\n");
9271 /* Send board arrival event to upper layer */
9272 adapter_event
.event_type
= FC_REG_ADAPTER_EVENT
;
9273 adapter_event
.subcategory
= LPFC_EVENT_ARRIVAL
;
9274 fc_host_post_vendor_event(shost
, fc_get_event_number(),
9275 sizeof(adapter_event
),
9276 (char *) &adapter_event
,
9282 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9283 * @phba: pointer to lpfc hba data structure.
9285 * This routine is invoked to set up the PCI device memory space for device
9286 * with SLI-3 interface spec.
9290 * other values - error
9293 lpfc_sli_pci_mem_setup(struct lpfc_hba
*phba
)
9295 struct pci_dev
*pdev
= phba
->pcidev
;
9296 unsigned long bar0map_len
, bar2map_len
;
9304 /* Set the device DMA mask size */
9305 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
9307 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
9312 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9313 * required by each mapping.
9315 phba
->pci_bar0_map
= pci_resource_start(pdev
, 0);
9316 bar0map_len
= pci_resource_len(pdev
, 0);
9318 phba
->pci_bar2_map
= pci_resource_start(pdev
, 2);
9319 bar2map_len
= pci_resource_len(pdev
, 2);
9321 /* Map HBA SLIM to a kernel virtual address. */
9322 phba
->slim_memmap_p
= ioremap(phba
->pci_bar0_map
, bar0map_len
);
9323 if (!phba
->slim_memmap_p
) {
9324 dev_printk(KERN_ERR
, &pdev
->dev
,
9325 "ioremap failed for SLIM memory.\n");
9329 /* Map HBA Control Registers to a kernel virtual address. */
9330 phba
->ctrl_regs_memmap_p
= ioremap(phba
->pci_bar2_map
, bar2map_len
);
9331 if (!phba
->ctrl_regs_memmap_p
) {
9332 dev_printk(KERN_ERR
, &pdev
->dev
,
9333 "ioremap failed for HBA control registers.\n");
9334 goto out_iounmap_slim
;
9337 /* Allocate memory for SLI-2 structures */
9338 phba
->slim2p
.virt
= dma_alloc_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
9339 &phba
->slim2p
.phys
, GFP_KERNEL
);
9340 if (!phba
->slim2p
.virt
)
9343 phba
->mbox
= phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, mbx
);
9344 phba
->mbox_ext
= (phba
->slim2p
.virt
+
9345 offsetof(struct lpfc_sli2_slim
, mbx_ext_words
));
9346 phba
->pcb
= (phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, pcb
));
9347 phba
->IOCBs
= (phba
->slim2p
.virt
+
9348 offsetof(struct lpfc_sli2_slim
, IOCBs
));
9350 phba
->hbqslimp
.virt
= dma_alloc_coherent(&pdev
->dev
,
9351 lpfc_sli_hbq_size(),
9352 &phba
->hbqslimp
.phys
,
9354 if (!phba
->hbqslimp
.virt
)
9357 hbq_count
= lpfc_sli_hbq_count();
9358 ptr
= phba
->hbqslimp
.virt
;
9359 for (i
= 0; i
< hbq_count
; ++i
) {
9360 phba
->hbqs
[i
].hbq_virt
= ptr
;
9361 INIT_LIST_HEAD(&phba
->hbqs
[i
].hbq_buffer_list
);
9362 ptr
+= (lpfc_hbq_defs
[i
]->entry_count
*
9363 sizeof(struct lpfc_hbq_entry
));
9365 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_els_hbq_alloc
;
9366 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_els_hbq_free
;
9368 memset(phba
->hbqslimp
.virt
, 0, lpfc_sli_hbq_size());
9370 phba
->MBslimaddr
= phba
->slim_memmap_p
;
9371 phba
->HAregaddr
= phba
->ctrl_regs_memmap_p
+ HA_REG_OFFSET
;
9372 phba
->CAregaddr
= phba
->ctrl_regs_memmap_p
+ CA_REG_OFFSET
;
9373 phba
->HSregaddr
= phba
->ctrl_regs_memmap_p
+ HS_REG_OFFSET
;
9374 phba
->HCregaddr
= phba
->ctrl_regs_memmap_p
+ HC_REG_OFFSET
;
9379 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
9380 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
9382 iounmap(phba
->ctrl_regs_memmap_p
);
9384 iounmap(phba
->slim_memmap_p
);
9390 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9391 * @phba: pointer to lpfc hba data structure.
9393 * This routine is invoked to unset the PCI device memory space for device
9394 * with SLI-3 interface spec.
9397 lpfc_sli_pci_mem_unset(struct lpfc_hba
*phba
)
9399 struct pci_dev
*pdev
;
9401 /* Obtain PCI device reference */
9405 pdev
= phba
->pcidev
;
9407 /* Free coherent DMA memory allocated */
9408 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
9409 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
9410 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
9411 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
9413 /* I/O memory unmap */
9414 iounmap(phba
->ctrl_regs_memmap_p
);
9415 iounmap(phba
->slim_memmap_p
);
9421 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9422 * @phba: pointer to lpfc hba data structure.
9424 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9425 * done and check status.
9427 * Return 0 if successful, otherwise -ENODEV.
9430 lpfc_sli4_post_status_check(struct lpfc_hba
*phba
)
9432 struct lpfc_register portsmphr_reg
, uerrlo_reg
, uerrhi_reg
;
9433 struct lpfc_register reg_data
;
9434 int i
, port_error
= 0;
9437 memset(&portsmphr_reg
, 0, sizeof(portsmphr_reg
));
9438 memset(®_data
, 0, sizeof(reg_data
));
9439 if (!phba
->sli4_hba
.PSMPHRregaddr
)
9442 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9443 for (i
= 0; i
< 3000; i
++) {
9444 if (lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
9445 &portsmphr_reg
.word0
) ||
9446 (bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
))) {
9447 /* Port has a fatal POST error, break out */
9448 port_error
= -ENODEV
;
9451 if (LPFC_POST_STAGE_PORT_READY
==
9452 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
))
9458 * If there was a port error during POST, then don't proceed with
9459 * other register reads as the data may not be valid. Just exit.
9462 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9463 "1408 Port Failed POST - portsmphr=0x%x, "
9464 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9465 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9466 portsmphr_reg
.word0
,
9467 bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
),
9468 bf_get(lpfc_port_smphr_sfi
, &portsmphr_reg
),
9469 bf_get(lpfc_port_smphr_nip
, &portsmphr_reg
),
9470 bf_get(lpfc_port_smphr_ipc
, &portsmphr_reg
),
9471 bf_get(lpfc_port_smphr_scr1
, &portsmphr_reg
),
9472 bf_get(lpfc_port_smphr_scr2
, &portsmphr_reg
),
9473 bf_get(lpfc_port_smphr_host_scratch
, &portsmphr_reg
),
9474 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
));
9476 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9477 "2534 Device Info: SLIFamily=0x%x, "
9478 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9479 "SLIHint_2=0x%x, FT=0x%x\n",
9480 bf_get(lpfc_sli_intf_sli_family
,
9481 &phba
->sli4_hba
.sli_intf
),
9482 bf_get(lpfc_sli_intf_slirev
,
9483 &phba
->sli4_hba
.sli_intf
),
9484 bf_get(lpfc_sli_intf_if_type
,
9485 &phba
->sli4_hba
.sli_intf
),
9486 bf_get(lpfc_sli_intf_sli_hint1
,
9487 &phba
->sli4_hba
.sli_intf
),
9488 bf_get(lpfc_sli_intf_sli_hint2
,
9489 &phba
->sli4_hba
.sli_intf
),
9490 bf_get(lpfc_sli_intf_func_type
,
9491 &phba
->sli4_hba
.sli_intf
));
9493 * Check for other Port errors during the initialization
9494 * process. Fail the load if the port did not come up
9497 if_type
= bf_get(lpfc_sli_intf_if_type
,
9498 &phba
->sli4_hba
.sli_intf
);
9500 case LPFC_SLI_INTF_IF_TYPE_0
:
9501 phba
->sli4_hba
.ue_mask_lo
=
9502 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
);
9503 phba
->sli4_hba
.ue_mask_hi
=
9504 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
);
9506 readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
);
9508 readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
);
9509 if ((~phba
->sli4_hba
.ue_mask_lo
& uerrlo_reg
.word0
) ||
9510 (~phba
->sli4_hba
.ue_mask_hi
& uerrhi_reg
.word0
)) {
9511 lpfc_printf_log(phba
, KERN_ERR
,
9513 "1422 Unrecoverable Error "
9514 "Detected during POST "
9515 "uerr_lo_reg=0x%x, "
9516 "uerr_hi_reg=0x%x, "
9517 "ue_mask_lo_reg=0x%x, "
9518 "ue_mask_hi_reg=0x%x\n",
9521 phba
->sli4_hba
.ue_mask_lo
,
9522 phba
->sli4_hba
.ue_mask_hi
);
9523 port_error
= -ENODEV
;
9526 case LPFC_SLI_INTF_IF_TYPE_2
:
9527 case LPFC_SLI_INTF_IF_TYPE_6
:
9528 /* Final checks. The port status should be clean. */
9529 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
9531 lpfc_sli4_unrecoverable_port(®_data
)) {
9532 phba
->work_status
[0] =
9533 readl(phba
->sli4_hba
.u
.if_type2
.
9535 phba
->work_status
[1] =
9536 readl(phba
->sli4_hba
.u
.if_type2
.
9538 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9539 "2888 Unrecoverable port error "
9540 "following POST: port status reg "
9541 "0x%x, port_smphr reg 0x%x, "
9542 "error 1=0x%x, error 2=0x%x\n",
9544 portsmphr_reg
.word0
,
9545 phba
->work_status
[0],
9546 phba
->work_status
[1]);
9547 port_error
= -ENODEV
;
9551 if (lpfc_pldv_detect
&&
9552 bf_get(lpfc_sli_intf_sli_family
,
9553 &phba
->sli4_hba
.sli_intf
) ==
9554 LPFC_SLI_INTF_FAMILY_G6
)
9555 pci_write_config_byte(phba
->pcidev
,
9556 LPFC_SLI_INTF
, CFG_PLD
);
9558 case LPFC_SLI_INTF_IF_TYPE_1
:
9567 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9568 * @phba: pointer to lpfc hba data structure.
9569 * @if_type: The SLI4 interface type getting configured.
9571 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9575 lpfc_sli4_bar0_register_memmap(struct lpfc_hba
*phba
, uint32_t if_type
)
9578 case LPFC_SLI_INTF_IF_TYPE_0
:
9579 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
=
9580 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_LO
;
9581 phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
=
9582 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_HI
;
9583 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
=
9584 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_LO
;
9585 phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
=
9586 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_HI
;
9587 phba
->sli4_hba
.SLIINTFregaddr
=
9588 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
9590 case LPFC_SLI_INTF_IF_TYPE_2
:
9591 phba
->sli4_hba
.u
.if_type2
.EQDregaddr
=
9592 phba
->sli4_hba
.conf_regs_memmap_p
+
9593 LPFC_CTL_PORT_EQ_DELAY_OFFSET
;
9594 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
=
9595 phba
->sli4_hba
.conf_regs_memmap_p
+
9596 LPFC_CTL_PORT_ER1_OFFSET
;
9597 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
=
9598 phba
->sli4_hba
.conf_regs_memmap_p
+
9599 LPFC_CTL_PORT_ER2_OFFSET
;
9600 phba
->sli4_hba
.u
.if_type2
.CTRLregaddr
=
9601 phba
->sli4_hba
.conf_regs_memmap_p
+
9602 LPFC_CTL_PORT_CTL_OFFSET
;
9603 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
=
9604 phba
->sli4_hba
.conf_regs_memmap_p
+
9605 LPFC_CTL_PORT_STA_OFFSET
;
9606 phba
->sli4_hba
.SLIINTFregaddr
=
9607 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
9608 phba
->sli4_hba
.PSMPHRregaddr
=
9609 phba
->sli4_hba
.conf_regs_memmap_p
+
9610 LPFC_CTL_PORT_SEM_OFFSET
;
9611 phba
->sli4_hba
.RQDBregaddr
=
9612 phba
->sli4_hba
.conf_regs_memmap_p
+
9613 LPFC_ULP0_RQ_DOORBELL
;
9614 phba
->sli4_hba
.WQDBregaddr
=
9615 phba
->sli4_hba
.conf_regs_memmap_p
+
9616 LPFC_ULP0_WQ_DOORBELL
;
9617 phba
->sli4_hba
.CQDBregaddr
=
9618 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_EQCQ_DOORBELL
;
9619 phba
->sli4_hba
.EQDBregaddr
= phba
->sli4_hba
.CQDBregaddr
;
9620 phba
->sli4_hba
.MQDBregaddr
=
9621 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_MQ_DOORBELL
;
9622 phba
->sli4_hba
.BMBXregaddr
=
9623 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_BMBX
;
9625 case LPFC_SLI_INTF_IF_TYPE_6
:
9626 phba
->sli4_hba
.u
.if_type2
.EQDregaddr
=
9627 phba
->sli4_hba
.conf_regs_memmap_p
+
9628 LPFC_CTL_PORT_EQ_DELAY_OFFSET
;
9629 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
=
9630 phba
->sli4_hba
.conf_regs_memmap_p
+
9631 LPFC_CTL_PORT_ER1_OFFSET
;
9632 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
=
9633 phba
->sli4_hba
.conf_regs_memmap_p
+
9634 LPFC_CTL_PORT_ER2_OFFSET
;
9635 phba
->sli4_hba
.u
.if_type2
.CTRLregaddr
=
9636 phba
->sli4_hba
.conf_regs_memmap_p
+
9637 LPFC_CTL_PORT_CTL_OFFSET
;
9638 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
=
9639 phba
->sli4_hba
.conf_regs_memmap_p
+
9640 LPFC_CTL_PORT_STA_OFFSET
;
9641 phba
->sli4_hba
.PSMPHRregaddr
=
9642 phba
->sli4_hba
.conf_regs_memmap_p
+
9643 LPFC_CTL_PORT_SEM_OFFSET
;
9644 phba
->sli4_hba
.BMBXregaddr
=
9645 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_BMBX
;
9647 case LPFC_SLI_INTF_IF_TYPE_1
:
9649 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
9650 "FATAL - unsupported SLI4 interface type - %d\n",
9657 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9658 * @phba: pointer to lpfc hba data structure.
9659 * @if_type: sli if type to operate on.
9661 * This routine is invoked to set up SLI4 BAR1 register memory map.
9664 lpfc_sli4_bar1_register_memmap(struct lpfc_hba
*phba
, uint32_t if_type
)
9667 case LPFC_SLI_INTF_IF_TYPE_0
:
9668 phba
->sli4_hba
.PSMPHRregaddr
=
9669 phba
->sli4_hba
.ctrl_regs_memmap_p
+
9670 LPFC_SLIPORT_IF0_SMPHR
;
9671 phba
->sli4_hba
.ISRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
9673 phba
->sli4_hba
.IMRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
9675 phba
->sli4_hba
.ISCRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
9678 case LPFC_SLI_INTF_IF_TYPE_6
:
9679 phba
->sli4_hba
.RQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
9680 LPFC_IF6_RQ_DOORBELL
;
9681 phba
->sli4_hba
.WQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
9682 LPFC_IF6_WQ_DOORBELL
;
9683 phba
->sli4_hba
.CQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
9684 LPFC_IF6_CQ_DOORBELL
;
9685 phba
->sli4_hba
.EQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
9686 LPFC_IF6_EQ_DOORBELL
;
9687 phba
->sli4_hba
.MQDBregaddr
= phba
->sli4_hba
.drbl_regs_memmap_p
+
9688 LPFC_IF6_MQ_DOORBELL
;
9690 case LPFC_SLI_INTF_IF_TYPE_2
:
9691 case LPFC_SLI_INTF_IF_TYPE_1
:
9693 dev_err(&phba
->pcidev
->dev
,
9694 "FATAL - unsupported SLI4 interface type - %d\n",
9701 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9702 * @phba: pointer to lpfc hba data structure.
9703 * @vf: virtual function number
9705 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9706 * based on the given viftual function number, @vf.
9708 * Return 0 if successful, otherwise -ENODEV.
9711 lpfc_sli4_bar2_register_memmap(struct lpfc_hba
*phba
, uint32_t vf
)
9713 if (vf
> LPFC_VIR_FUNC_MAX
)
9716 phba
->sli4_hba
.RQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
9717 vf
* LPFC_VFR_PAGE_SIZE
+
9718 LPFC_ULP0_RQ_DOORBELL
);
9719 phba
->sli4_hba
.WQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
9720 vf
* LPFC_VFR_PAGE_SIZE
+
9721 LPFC_ULP0_WQ_DOORBELL
);
9722 phba
->sli4_hba
.CQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
9723 vf
* LPFC_VFR_PAGE_SIZE
+
9724 LPFC_EQCQ_DOORBELL
);
9725 phba
->sli4_hba
.EQDBregaddr
= phba
->sli4_hba
.CQDBregaddr
;
9726 phba
->sli4_hba
.MQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
9727 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_MQ_DOORBELL
);
9728 phba
->sli4_hba
.BMBXregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
9729 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_BMBX
);
9734 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9735 * @phba: pointer to lpfc hba data structure.
9737 * This routine is invoked to create the bootstrap mailbox
9738 * region consistent with the SLI-4 interface spec. This
9739 * routine allocates all memory necessary to communicate
9740 * mailbox commands to the port and sets up all alignment
9741 * needs. No locks are expected to be held when calling
9746 * -ENOMEM - could not allocated memory.
9749 lpfc_create_bootstrap_mbox(struct lpfc_hba
*phba
)
9752 struct lpfc_dmabuf
*dmabuf
;
9753 struct dma_address
*dma_address
;
9757 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
9762 * The bootstrap mailbox region is comprised of 2 parts
9763 * plus an alignment restriction of 16 bytes.
9765 bmbx_size
= sizeof(struct lpfc_bmbx_create
) + (LPFC_ALIGN_16_BYTE
- 1);
9766 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
, bmbx_size
,
9767 &dmabuf
->phys
, GFP_KERNEL
);
9768 if (!dmabuf
->virt
) {
9774 * Initialize the bootstrap mailbox pointers now so that the register
9775 * operations are simple later. The mailbox dma address is required
9776 * to be 16-byte aligned. Also align the virtual memory as each
9777 * maibox is copied into the bmbx mailbox region before issuing the
9778 * command to the port.
9780 phba
->sli4_hba
.bmbx
.dmabuf
= dmabuf
;
9781 phba
->sli4_hba
.bmbx
.bmbx_size
= bmbx_size
;
9783 phba
->sli4_hba
.bmbx
.avirt
= PTR_ALIGN(dmabuf
->virt
,
9784 LPFC_ALIGN_16_BYTE
);
9785 phba
->sli4_hba
.bmbx
.aphys
= ALIGN(dmabuf
->phys
,
9786 LPFC_ALIGN_16_BYTE
);
9789 * Set the high and low physical addresses now. The SLI4 alignment
9790 * requirement is 16 bytes and the mailbox is posted to the port
9791 * as two 30-bit addresses. The other data is a bit marking whether
9792 * the 30-bit address is the high or low address.
9793 * Upcast bmbx aphys to 64bits so shift instruction compiles
9794 * clean on 32 bit machines.
9796 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
9797 phys_addr
= (uint64_t)phba
->sli4_hba
.bmbx
.aphys
;
9798 pa_addr
= (uint32_t) ((phys_addr
>> 34) & 0x3fffffff);
9799 dma_address
->addr_hi
= (uint32_t) ((pa_addr
<< 2) |
9800 LPFC_BMBX_BIT1_ADDR_HI
);
9802 pa_addr
= (uint32_t) ((phba
->sli4_hba
.bmbx
.aphys
>> 4) & 0x3fffffff);
9803 dma_address
->addr_lo
= (uint32_t) ((pa_addr
<< 2) |
9804 LPFC_BMBX_BIT1_ADDR_LO
);
9809 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9810 * @phba: pointer to lpfc hba data structure.
9812 * This routine is invoked to teardown the bootstrap mailbox
9813 * region and release all host resources. This routine requires
9814 * the caller to ensure all mailbox commands recovered, no
9815 * additional mailbox comands are sent, and interrupts are disabled
9816 * before calling this routine.
9820 lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*phba
)
9822 dma_free_coherent(&phba
->pcidev
->dev
,
9823 phba
->sli4_hba
.bmbx
.bmbx_size
,
9824 phba
->sli4_hba
.bmbx
.dmabuf
->virt
,
9825 phba
->sli4_hba
.bmbx
.dmabuf
->phys
);
9827 kfree(phba
->sli4_hba
.bmbx
.dmabuf
);
9828 memset(&phba
->sli4_hba
.bmbx
, 0, sizeof(struct lpfc_bmbx
));
9831 static const char * const lpfc_topo_to_str
[] = {
9841 #define LINK_FLAGS_DEF 0x0
9842 #define LINK_FLAGS_P2P 0x1
9843 #define LINK_FLAGS_LOOP 0x2
9845 * lpfc_map_topology - Map the topology read from READ_CONFIG
9846 * @phba: pointer to lpfc hba data structure.
9847 * @rd_config: pointer to read config data
9849 * This routine is invoked to map the topology values as read
9850 * from the read config mailbox command. If the persistent
9851 * topology feature is supported, the firmware will provide the
9852 * saved topology information to be used in INIT_LINK
9855 lpfc_map_topology(struct lpfc_hba
*phba
, struct lpfc_mbx_read_config
*rd_config
)
9859 ptv
= bf_get(lpfc_mbx_rd_conf_ptv
, rd_config
);
9860 tf
= bf_get(lpfc_mbx_rd_conf_tf
, rd_config
);
9861 pt
= bf_get(lpfc_mbx_rd_conf_pt
, rd_config
);
9863 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
9864 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9867 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9868 "2019 FW does not support persistent topology "
9869 "Using driver parameter defined value [%s]",
9870 lpfc_topo_to_str
[phba
->cfg_topology
]);
9873 /* FW supports persistent topology - override module parameter value */
9874 set_bit(HBA_PERSISTENT_TOPO
, &phba
->hba_flag
);
9876 /* if ASIC_GEN_NUM >= 0xC) */
9877 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
9878 LPFC_SLI_INTF_IF_TYPE_6
) ||
9879 (bf_get(lpfc_sli_intf_sli_family
, &phba
->sli4_hba
.sli_intf
) ==
9880 LPFC_SLI_INTF_FAMILY_G6
)) {
9882 phba
->cfg_topology
= ((pt
== LINK_FLAGS_LOOP
)
9883 ? FLAGS_TOPOLOGY_MODE_LOOP
9884 : FLAGS_TOPOLOGY_MODE_PT_PT
);
9886 clear_bit(HBA_PERSISTENT_TOPO
, &phba
->hba_flag
);
9889 /* If topology failover set - pt is '0' or '1' */
9890 phba
->cfg_topology
= (pt
? FLAGS_TOPOLOGY_MODE_PT_LOOP
:
9891 FLAGS_TOPOLOGY_MODE_LOOP_PT
);
9893 phba
->cfg_topology
= ((pt
== LINK_FLAGS_P2P
)
9894 ? FLAGS_TOPOLOGY_MODE_PT_PT
9895 : FLAGS_TOPOLOGY_MODE_LOOP
);
9897 if (test_bit(HBA_PERSISTENT_TOPO
, &phba
->hba_flag
))
9898 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
9899 "2020 Using persistent topology value [%s]",
9900 lpfc_topo_to_str
[phba
->cfg_topology
]);
9902 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9903 "2021 Invalid topology values from FW "
9904 "Using driver parameter defined value [%s]",
9905 lpfc_topo_to_str
[phba
->cfg_topology
]);
9909 * lpfc_sli4_read_config - Get the config parameters.
9910 * @phba: pointer to lpfc hba data structure.
9912 * This routine is invoked to read the configuration parameters from the HBA.
9913 * The configuration parameters are used to set the base and maximum values
9914 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9915 * allocation for the port.
9919 * -ENOMEM - No available memory
9920 * -EIO - The mailbox failed to complete successfully.
9923 lpfc_sli4_read_config(struct lpfc_hba
*phba
)
9926 struct lpfc_mbx_read_config
*rd_config
;
9927 union lpfc_sli4_cfg_shdr
*shdr
;
9928 uint32_t shdr_status
, shdr_add_status
;
9929 struct lpfc_mbx_get_func_cfg
*get_func_cfg
;
9930 struct lpfc_rsrc_desc_fcfcoe
*desc
;
9932 uint16_t forced_link_speed
;
9933 uint32_t if_type
, qmin
, fawwpn
;
9934 int length
, i
, rc
= 0, rc2
;
9936 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
9938 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9939 "2011 Unable to allocate memory for issuing "
9940 "SLI_CONFIG_SPECIAL mailbox command\n");
9944 lpfc_read_config(phba
, pmb
);
9946 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
9947 if (rc
!= MBX_SUCCESS
) {
9948 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9949 "2012 Mailbox failed , mbxCmd x%x "
9950 "READ_CONFIG, mbxStatus x%x\n",
9951 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
9952 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
9955 rd_config
= &pmb
->u
.mqe
.un
.rd_config
;
9956 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv
, rd_config
)) {
9957 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
9958 phba
->sli4_hba
.lnk_info
.lnk_tp
=
9959 bf_get(lpfc_mbx_rd_conf_lnk_type
, rd_config
);
9960 phba
->sli4_hba
.lnk_info
.lnk_no
=
9961 bf_get(lpfc_mbx_rd_conf_lnk_numb
, rd_config
);
9962 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
9963 "3081 lnk_type:%d, lnk_numb:%d\n",
9964 phba
->sli4_hba
.lnk_info
.lnk_tp
,
9965 phba
->sli4_hba
.lnk_info
.lnk_no
);
9967 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9968 "3082 Mailbox (x%x) returned ldv:x0\n",
9969 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
));
9970 if (bf_get(lpfc_mbx_rd_conf_bbscn_def
, rd_config
)) {
9971 phba
->bbcredit_support
= 1;
9972 phba
->sli4_hba
.bbscn_params
.word0
= rd_config
->word8
;
9975 fawwpn
= bf_get(lpfc_mbx_rd_conf_fawwpn
, rd_config
);
9978 lpfc_printf_log(phba
, KERN_INFO
,
9979 LOG_INIT
| LOG_DISCOVERY
,
9980 "2702 READ_CONFIG: FA-PWWN is "
9982 phba
->sli4_hba
.fawwpn_flag
|= LPFC_FAWWPN_CONFIG
;
9984 /* Clear FW configured flag, preserve driver flag */
9985 phba
->sli4_hba
.fawwpn_flag
&= ~LPFC_FAWWPN_CONFIG
;
9988 phba
->sli4_hba
.conf_trunk
=
9989 bf_get(lpfc_mbx_rd_conf_trunk
, rd_config
);
9990 phba
->sli4_hba
.extents_in_use
=
9991 bf_get(lpfc_mbx_rd_conf_extnts_inuse
, rd_config
);
9993 phba
->sli4_hba
.max_cfg_param
.max_xri
=
9994 bf_get(lpfc_mbx_rd_conf_xri_count
, rd_config
);
9995 /* Reduce resource usage in kdump environment */
9996 if (is_kdump_kernel() &&
9997 phba
->sli4_hba
.max_cfg_param
.max_xri
> 512)
9998 phba
->sli4_hba
.max_cfg_param
.max_xri
= 512;
9999 phba
->sli4_hba
.max_cfg_param
.xri_base
=
10000 bf_get(lpfc_mbx_rd_conf_xri_base
, rd_config
);
10001 phba
->sli4_hba
.max_cfg_param
.max_vpi
=
10002 bf_get(lpfc_mbx_rd_conf_vpi_count
, rd_config
);
10003 /* Limit the max we support */
10004 if (phba
->sli4_hba
.max_cfg_param
.max_vpi
> LPFC_MAX_VPORTS
)
10005 phba
->sli4_hba
.max_cfg_param
.max_vpi
= LPFC_MAX_VPORTS
;
10006 phba
->sli4_hba
.max_cfg_param
.vpi_base
=
10007 bf_get(lpfc_mbx_rd_conf_vpi_base
, rd_config
);
10008 phba
->sli4_hba
.max_cfg_param
.max_rpi
=
10009 bf_get(lpfc_mbx_rd_conf_rpi_count
, rd_config
);
10010 phba
->sli4_hba
.max_cfg_param
.rpi_base
=
10011 bf_get(lpfc_mbx_rd_conf_rpi_base
, rd_config
);
10012 phba
->sli4_hba
.max_cfg_param
.max_vfi
=
10013 bf_get(lpfc_mbx_rd_conf_vfi_count
, rd_config
);
10014 phba
->sli4_hba
.max_cfg_param
.vfi_base
=
10015 bf_get(lpfc_mbx_rd_conf_vfi_base
, rd_config
);
10016 phba
->sli4_hba
.max_cfg_param
.max_fcfi
=
10017 bf_get(lpfc_mbx_rd_conf_fcfi_count
, rd_config
);
10018 phba
->sli4_hba
.max_cfg_param
.max_eq
=
10019 bf_get(lpfc_mbx_rd_conf_eq_count
, rd_config
);
10020 phba
->sli4_hba
.max_cfg_param
.max_rq
=
10021 bf_get(lpfc_mbx_rd_conf_rq_count
, rd_config
);
10022 phba
->sli4_hba
.max_cfg_param
.max_wq
=
10023 bf_get(lpfc_mbx_rd_conf_wq_count
, rd_config
);
10024 phba
->sli4_hba
.max_cfg_param
.max_cq
=
10025 bf_get(lpfc_mbx_rd_conf_cq_count
, rd_config
);
10026 phba
->lmt
= bf_get(lpfc_mbx_rd_conf_lmt
, rd_config
);
10027 phba
->sli4_hba
.next_xri
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
10028 phba
->vpi_base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
10029 phba
->vfi_base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
10030 phba
->max_vpi
= (phba
->sli4_hba
.max_cfg_param
.max_vpi
> 0) ?
10031 (phba
->sli4_hba
.max_cfg_param
.max_vpi
- 1) : 0;
10032 phba
->max_vports
= phba
->max_vpi
;
10034 /* Next decide on FPIN or Signal E2E CGN support
10035 * For congestion alarms and warnings valid combination are:
10036 * 1. FPIN alarms / FPIN warnings
10037 * 2. Signal alarms / Signal warnings
10038 * 3. FPIN alarms / Signal warnings
10039 * 4. Signal alarms / FPIN warnings
10041 * Initialize the adapter frequency to 100 mSecs
10043 phba
->cgn_reg_fpin
= LPFC_CGN_FPIN_BOTH
;
10044 phba
->cgn_reg_signal
= EDC_CG_SIG_NOTSUPPORTED
;
10045 phba
->cgn_sig_freq
= lpfc_fabric_cgn_frequency
;
10047 if (lpfc_use_cgn_signal
) {
10048 if (bf_get(lpfc_mbx_rd_conf_wcs
, rd_config
)) {
10049 phba
->cgn_reg_signal
= EDC_CG_SIG_WARN_ONLY
;
10050 phba
->cgn_reg_fpin
&= ~LPFC_CGN_FPIN_WARN
;
10052 if (bf_get(lpfc_mbx_rd_conf_acs
, rd_config
)) {
10053 /* MUST support both alarm and warning
10054 * because EDC does not support alarm alone.
10056 if (phba
->cgn_reg_signal
!=
10057 EDC_CG_SIG_WARN_ONLY
) {
10058 /* Must support both or none */
10059 phba
->cgn_reg_fpin
= LPFC_CGN_FPIN_BOTH
;
10060 phba
->cgn_reg_signal
=
10061 EDC_CG_SIG_NOTSUPPORTED
;
10063 phba
->cgn_reg_signal
=
10064 EDC_CG_SIG_WARN_ALARM
;
10065 phba
->cgn_reg_fpin
=
10066 LPFC_CGN_FPIN_NONE
;
10071 /* Set the congestion initial signal and fpin values. */
10072 phba
->cgn_init_reg_fpin
= phba
->cgn_reg_fpin
;
10073 phba
->cgn_init_reg_signal
= phba
->cgn_reg_signal
;
10075 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
10076 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10077 phba
->cgn_reg_signal
, phba
->cgn_reg_fpin
);
10079 lpfc_map_topology(phba
, rd_config
);
10080 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10081 "2003 cfg params Extents? %d "
10086 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10087 phba
->sli4_hba
.extents_in_use
,
10088 phba
->sli4_hba
.max_cfg_param
.xri_base
,
10089 phba
->sli4_hba
.max_cfg_param
.max_xri
,
10090 phba
->sli4_hba
.max_cfg_param
.vpi_base
,
10091 phba
->sli4_hba
.max_cfg_param
.max_vpi
,
10092 phba
->sli4_hba
.max_cfg_param
.vfi_base
,
10093 phba
->sli4_hba
.max_cfg_param
.max_vfi
,
10094 phba
->sli4_hba
.max_cfg_param
.rpi_base
,
10095 phba
->sli4_hba
.max_cfg_param
.max_rpi
,
10096 phba
->sli4_hba
.max_cfg_param
.max_fcfi
,
10097 phba
->sli4_hba
.max_cfg_param
.max_eq
,
10098 phba
->sli4_hba
.max_cfg_param
.max_cq
,
10099 phba
->sli4_hba
.max_cfg_param
.max_wq
,
10100 phba
->sli4_hba
.max_cfg_param
.max_rq
,
10104 * Calculate queue resources based on how
10105 * many WQ/CQ/EQs are available.
10107 qmin
= phba
->sli4_hba
.max_cfg_param
.max_wq
;
10108 if (phba
->sli4_hba
.max_cfg_param
.max_cq
< qmin
)
10109 qmin
= phba
->sli4_hba
.max_cfg_param
.max_cq
;
10111 * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and
10112 * the remainder can be used for NVME / FCP.
10115 if (phba
->sli4_hba
.max_cfg_param
.max_eq
< qmin
)
10116 qmin
= phba
->sli4_hba
.max_cfg_param
.max_eq
;
10118 /* Check to see if there is enough for default cfg */
10119 if ((phba
->cfg_irq_chann
> qmin
) ||
10120 (phba
->cfg_hdw_queue
> qmin
)) {
10121 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10122 "2005 Reducing Queues - "
10123 "FW resource limitation: "
10124 "WQ %d CQ %d EQ %d: min %d: "
10125 "IRQ %d HDWQ %d\n",
10126 phba
->sli4_hba
.max_cfg_param
.max_wq
,
10127 phba
->sli4_hba
.max_cfg_param
.max_cq
,
10128 phba
->sli4_hba
.max_cfg_param
.max_eq
,
10129 qmin
, phba
->cfg_irq_chann
,
10130 phba
->cfg_hdw_queue
);
10132 if (phba
->cfg_irq_chann
> qmin
)
10133 phba
->cfg_irq_chann
= qmin
;
10134 if (phba
->cfg_hdw_queue
> qmin
)
10135 phba
->cfg_hdw_queue
= qmin
;
10142 /* Update link speed if forced link speed is supported */
10143 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
10144 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
10145 forced_link_speed
=
10146 bf_get(lpfc_mbx_rd_conf_link_speed
, rd_config
);
10147 if (forced_link_speed
) {
10148 set_bit(HBA_FORCED_LINK_SPEED
, &phba
->hba_flag
);
10150 switch (forced_link_speed
) {
10151 case LINK_SPEED_1G
:
10152 phba
->cfg_link_speed
=
10153 LPFC_USER_LINK_SPEED_1G
;
10155 case LINK_SPEED_2G
:
10156 phba
->cfg_link_speed
=
10157 LPFC_USER_LINK_SPEED_2G
;
10159 case LINK_SPEED_4G
:
10160 phba
->cfg_link_speed
=
10161 LPFC_USER_LINK_SPEED_4G
;
10163 case LINK_SPEED_8G
:
10164 phba
->cfg_link_speed
=
10165 LPFC_USER_LINK_SPEED_8G
;
10167 case LINK_SPEED_10G
:
10168 phba
->cfg_link_speed
=
10169 LPFC_USER_LINK_SPEED_10G
;
10171 case LINK_SPEED_16G
:
10172 phba
->cfg_link_speed
=
10173 LPFC_USER_LINK_SPEED_16G
;
10175 case LINK_SPEED_32G
:
10176 phba
->cfg_link_speed
=
10177 LPFC_USER_LINK_SPEED_32G
;
10179 case LINK_SPEED_64G
:
10180 phba
->cfg_link_speed
=
10181 LPFC_USER_LINK_SPEED_64G
;
10184 phba
->cfg_link_speed
=
10185 LPFC_USER_LINK_SPEED_AUTO
;
10188 lpfc_printf_log(phba
, KERN_ERR
,
10190 "0047 Unrecognized link "
10192 forced_link_speed
);
10193 phba
->cfg_link_speed
=
10194 LPFC_USER_LINK_SPEED_AUTO
;
10199 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
10200 length
= phba
->sli4_hba
.max_cfg_param
.max_xri
-
10201 lpfc_sli4_get_els_iocb_cnt(phba
);
10202 if (phba
->cfg_hba_queue_depth
> length
) {
10203 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
10204 "3361 HBA queue depth changed from %d to %d\n",
10205 phba
->cfg_hba_queue_depth
, length
);
10206 phba
->cfg_hba_queue_depth
= length
;
10209 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) <
10210 LPFC_SLI_INTF_IF_TYPE_2
)
10213 /* get the pf# and vf# for SLI4 if_type 2 port */
10214 length
= (sizeof(struct lpfc_mbx_get_func_cfg
) -
10215 sizeof(struct lpfc_sli4_cfg_mhdr
));
10216 lpfc_sli4_config(phba
, pmb
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10217 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG
,
10218 length
, LPFC_SLI4_MBX_EMBED
);
10220 rc2
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
10221 shdr
= (union lpfc_sli4_cfg_shdr
*)
10222 &pmb
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
10223 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10224 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10225 if (rc2
|| shdr_status
|| shdr_add_status
) {
10226 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10227 "3026 Mailbox failed , mbxCmd x%x "
10228 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10229 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
10230 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
10234 /* search for fc_fcoe resrouce descriptor */
10235 get_func_cfg
= &pmb
->u
.mqe
.un
.get_func_cfg
;
10237 pdesc_0
= (char *)&get_func_cfg
->func_cfg
.desc
[0];
10238 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)pdesc_0
;
10239 length
= bf_get(lpfc_rsrc_desc_fcfcoe_length
, desc
);
10240 if (length
== LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD
)
10241 length
= LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH
;
10242 else if (length
!= LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH
)
10245 for (i
= 0; i
< LPFC_RSRC_DESC_MAX_NUM
; i
++) {
10246 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)(pdesc_0
+ length
* i
);
10247 if (LPFC_RSRC_DESC_TYPE_FCFCOE
==
10248 bf_get(lpfc_rsrc_desc_fcfcoe_type
, desc
)) {
10249 phba
->sli4_hba
.iov
.pf_number
=
10250 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum
, desc
);
10251 phba
->sli4_hba
.iov
.vf_number
=
10252 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum
, desc
);
10257 if (i
< LPFC_RSRC_DESC_MAX_NUM
)
10258 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10259 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10260 "vf_number:%d\n", phba
->sli4_hba
.iov
.pf_number
,
10261 phba
->sli4_hba
.iov
.vf_number
);
10263 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10264 "3028 GET_FUNCTION_CONFIG: failed to find "
10265 "Resource Descriptor:x%x\n",
10266 LPFC_RSRC_DESC_TYPE_FCFCOE
);
10269 mempool_free(pmb
, phba
->mbox_mem_pool
);
10274 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10275 * @phba: pointer to lpfc hba data structure.
10277 * This routine is invoked to setup the port-side endian order when
10278 * the port if_type is 0. This routine has no function for other
10283 * -ENOMEM - No available memory
10284 * -EIO - The mailbox failed to complete successfully.
10287 lpfc_setup_endian_order(struct lpfc_hba
*phba
)
10289 LPFC_MBOXQ_t
*mboxq
;
10290 uint32_t if_type
, rc
= 0;
10291 uint32_t endian_mb_data
[2] = {HOST_ENDIAN_LOW_WORD0
,
10292 HOST_ENDIAN_HIGH_WORD1
};
10294 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
10296 case LPFC_SLI_INTF_IF_TYPE_0
:
10297 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
10300 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10301 "0492 Unable to allocate memory for "
10302 "issuing SLI_CONFIG_SPECIAL mailbox "
10308 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10309 * two words to contain special data values and no other data.
10311 memset(mboxq
, 0, sizeof(LPFC_MBOXQ_t
));
10312 memcpy(&mboxq
->u
.mqe
, &endian_mb_data
, sizeof(endian_mb_data
));
10313 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
10314 if (rc
!= MBX_SUCCESS
) {
10315 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10316 "0493 SLI_CONFIG_SPECIAL mailbox "
10317 "failed with status x%x\n",
10321 mempool_free(mboxq
, phba
->mbox_mem_pool
);
10323 case LPFC_SLI_INTF_IF_TYPE_6
:
10324 case LPFC_SLI_INTF_IF_TYPE_2
:
10325 case LPFC_SLI_INTF_IF_TYPE_1
:
10333 * lpfc_sli4_queue_verify - Verify and update EQ counts
10334 * @phba: pointer to lpfc hba data structure.
10336 * This routine is invoked to check the user settable queue counts for EQs.
10337 * After this routine is called the counts will be set to valid values that
10338 * adhere to the constraints of the system's interrupt vectors and the port's
10343 * -ENOMEM - No available memory
10346 lpfc_sli4_queue_verify(struct lpfc_hba
*phba
)
10349 * Sanity check for configured queue parameters against the run-time
10350 * device parameters
10353 if (phba
->nvmet_support
) {
10354 if (phba
->cfg_hdw_queue
< phba
->cfg_nvmet_mrq
)
10355 phba
->cfg_nvmet_mrq
= phba
->cfg_hdw_queue
;
10356 if (phba
->cfg_nvmet_mrq
> LPFC_NVMET_MRQ_MAX
)
10357 phba
->cfg_nvmet_mrq
= LPFC_NVMET_MRQ_MAX
;
10360 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10361 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10362 phba
->cfg_hdw_queue
, phba
->cfg_irq_chann
,
10363 phba
->cfg_nvmet_mrq
);
10365 /* Get EQ depth from module parameter, fake the default for now */
10366 phba
->sli4_hba
.eq_esize
= LPFC_EQE_SIZE_4B
;
10367 phba
->sli4_hba
.eq_ecount
= LPFC_EQE_DEF_COUNT
;
10369 /* Get CQ depth from module parameter, fake the default for now */
10370 phba
->sli4_hba
.cq_esize
= LPFC_CQE_SIZE
;
10371 phba
->sli4_hba
.cq_ecount
= LPFC_CQE_DEF_COUNT
;
10376 lpfc_alloc_io_wq_cq(struct lpfc_hba
*phba
, int idx
)
10378 struct lpfc_queue
*qdesc
;
10382 cpu
= lpfc_find_cpu_handle(phba
, idx
, LPFC_FIND_BY_HDWQ
);
10383 /* Create Fast Path IO CQs */
10384 if (phba
->enab_exp_wqcq_pages
)
10385 /* Increase the CQ size when WQEs contain an embedded cdb */
10386 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_EXPANDED_PAGE_SIZE
,
10387 phba
->sli4_hba
.cq_esize
,
10388 LPFC_CQE_EXP_COUNT
, cpu
);
10391 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10392 phba
->sli4_hba
.cq_esize
,
10393 phba
->sli4_hba
.cq_ecount
, cpu
);
10395 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10396 "0499 Failed allocate fast-path IO CQ (%d)\n",
10400 qdesc
->qe_valid
= 1;
10402 qdesc
->chann
= cpu
;
10403 phba
->sli4_hba
.hdwq
[idx
].io_cq
= qdesc
;
10405 /* Create Fast Path IO WQs */
10406 if (phba
->enab_exp_wqcq_pages
) {
10407 /* Increase the WQ size when WQEs contain an embedded cdb */
10408 wqesize
= (phba
->fcp_embed_io
) ?
10409 LPFC_WQE128_SIZE
: phba
->sli4_hba
.wq_esize
;
10410 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_EXPANDED_PAGE_SIZE
,
10412 LPFC_WQE_EXP_COUNT
, cpu
);
10414 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10415 phba
->sli4_hba
.wq_esize
,
10416 phba
->sli4_hba
.wq_ecount
, cpu
);
10419 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10420 "0503 Failed allocate fast-path IO WQ (%d)\n",
10425 qdesc
->chann
= cpu
;
10426 phba
->sli4_hba
.hdwq
[idx
].io_wq
= qdesc
;
10427 list_add_tail(&qdesc
->wq_list
, &phba
->sli4_hba
.lpfc_wq_list
);
10432 * lpfc_sli4_queue_create - Create all the SLI4 queues
10433 * @phba: pointer to lpfc hba data structure.
10435 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10436 * operation. For each SLI4 queue type, the parameters such as queue entry
10437 * count (queue depth) shall be taken from the module parameter. For now,
10438 * we just use some constant number as place holder.
10442 * -ENOMEM - No availble memory
10443 * -EIO - The mailbox failed to complete successfully.
10446 lpfc_sli4_queue_create(struct lpfc_hba
*phba
)
10448 struct lpfc_queue
*qdesc
;
10449 int idx
, cpu
, eqcpu
;
10450 struct lpfc_sli4_hdw_queue
*qp
;
10451 struct lpfc_vector_map_info
*cpup
;
10452 struct lpfc_vector_map_info
*eqcpup
;
10453 struct lpfc_eq_intr_info
*eqi
;
10457 * Create HBA Record arrays.
10458 * Both NVME and FCP will share that same vectors / EQs
10460 phba
->sli4_hba
.mq_esize
= LPFC_MQE_SIZE
;
10461 phba
->sli4_hba
.mq_ecount
= LPFC_MQE_DEF_COUNT
;
10462 phba
->sli4_hba
.wq_esize
= LPFC_WQE_SIZE
;
10463 phba
->sli4_hba
.wq_ecount
= LPFC_WQE_DEF_COUNT
;
10464 phba
->sli4_hba
.rq_esize
= LPFC_RQE_SIZE
;
10465 phba
->sli4_hba
.rq_ecount
= LPFC_RQE_DEF_COUNT
;
10466 phba
->sli4_hba
.eq_esize
= LPFC_EQE_SIZE_4B
;
10467 phba
->sli4_hba
.eq_ecount
= LPFC_EQE_DEF_COUNT
;
10468 phba
->sli4_hba
.cq_esize
= LPFC_CQE_SIZE
;
10469 phba
->sli4_hba
.cq_ecount
= LPFC_CQE_DEF_COUNT
;
10471 if (!phba
->sli4_hba
.hdwq
) {
10472 phba
->sli4_hba
.hdwq
= kcalloc(
10473 phba
->cfg_hdw_queue
, sizeof(struct lpfc_sli4_hdw_queue
),
10475 if (!phba
->sli4_hba
.hdwq
) {
10476 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10477 "6427 Failed allocate memory for "
10478 "fast-path Hardware Queue array\n");
10481 /* Prepare hardware queues to take IO buffers */
10482 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
10483 qp
= &phba
->sli4_hba
.hdwq
[idx
];
10484 spin_lock_init(&qp
->io_buf_list_get_lock
);
10485 spin_lock_init(&qp
->io_buf_list_put_lock
);
10486 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_get
);
10487 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_put
);
10488 qp
->get_io_bufs
= 0;
10489 qp
->put_io_bufs
= 0;
10490 qp
->total_io_bufs
= 0;
10491 spin_lock_init(&qp
->abts_io_buf_list_lock
);
10492 INIT_LIST_HEAD(&qp
->lpfc_abts_io_buf_list
);
10493 qp
->abts_scsi_io_bufs
= 0;
10494 qp
->abts_nvme_io_bufs
= 0;
10495 INIT_LIST_HEAD(&qp
->sgl_list
);
10496 INIT_LIST_HEAD(&qp
->cmd_rsp_buf_list
);
10497 spin_lock_init(&qp
->hdwq_lock
);
10501 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
10502 if (phba
->nvmet_support
) {
10503 phba
->sli4_hba
.nvmet_cqset
= kcalloc(
10504 phba
->cfg_nvmet_mrq
,
10505 sizeof(struct lpfc_queue
*),
10507 if (!phba
->sli4_hba
.nvmet_cqset
) {
10508 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10509 "3121 Fail allocate memory for "
10510 "fast-path CQ set array\n");
10513 phba
->sli4_hba
.nvmet_mrq_hdr
= kcalloc(
10514 phba
->cfg_nvmet_mrq
,
10515 sizeof(struct lpfc_queue
*),
10517 if (!phba
->sli4_hba
.nvmet_mrq_hdr
) {
10518 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10519 "3122 Fail allocate memory for "
10520 "fast-path RQ set hdr array\n");
10523 phba
->sli4_hba
.nvmet_mrq_data
= kcalloc(
10524 phba
->cfg_nvmet_mrq
,
10525 sizeof(struct lpfc_queue
*),
10527 if (!phba
->sli4_hba
.nvmet_mrq_data
) {
10528 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10529 "3124 Fail allocate memory for "
10530 "fast-path RQ set data array\n");
10536 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_wq_list
);
10538 /* Create HBA Event Queues (EQs) */
10539 for_each_present_cpu(cpu
) {
10540 /* We only want to create 1 EQ per vector, even though
10541 * multiple CPUs might be using that vector. so only
10542 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10544 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
10545 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
10548 /* Get a ptr to the Hardware Queue associated with this CPU */
10549 qp
= &phba
->sli4_hba
.hdwq
[cpup
->hdwq
];
10551 /* Allocate an EQ */
10552 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10553 phba
->sli4_hba
.eq_esize
,
10554 phba
->sli4_hba
.eq_ecount
, cpu
);
10556 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10557 "0497 Failed allocate EQ (%d)\n",
10561 qdesc
->qe_valid
= 1;
10562 qdesc
->hdwq
= cpup
->hdwq
;
10563 qdesc
->chann
= cpu
; /* First CPU this EQ is affinitized to */
10564 qdesc
->last_cpu
= qdesc
->chann
;
10566 /* Save the allocated EQ in the Hardware Queue */
10567 qp
->hba_eq
= qdesc
;
10569 eqi
= per_cpu_ptr(phba
->sli4_hba
.eq_info
, qdesc
->last_cpu
);
10570 list_add(&qdesc
->cpu_list
, &eqi
->list
);
10573 /* Now we need to populate the other Hardware Queues, that share
10574 * an IRQ vector, with the associated EQ ptr.
10576 for_each_present_cpu(cpu
) {
10577 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
10579 /* Check for EQ already allocated in previous loop */
10580 if (cpup
->flag
& LPFC_CPU_FIRST_IRQ
)
10583 /* Check for multiple CPUs per hdwq */
10584 qp
= &phba
->sli4_hba
.hdwq
[cpup
->hdwq
];
10588 /* We need to share an EQ for this hdwq */
10589 eqcpu
= lpfc_find_cpu_handle(phba
, cpup
->eq
, LPFC_FIND_BY_EQ
);
10590 eqcpup
= &phba
->sli4_hba
.cpu_map
[eqcpu
];
10591 qp
->hba_eq
= phba
->sli4_hba
.hdwq
[eqcpup
->hdwq
].hba_eq
;
10594 /* Allocate IO Path SLI4 CQ/WQs */
10595 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
10596 if (lpfc_alloc_io_wq_cq(phba
, idx
))
10600 if (phba
->nvmet_support
) {
10601 for (idx
= 0; idx
< phba
->cfg_nvmet_mrq
; idx
++) {
10602 cpu
= lpfc_find_cpu_handle(phba
, idx
,
10603 LPFC_FIND_BY_HDWQ
);
10604 qdesc
= lpfc_sli4_queue_alloc(phba
,
10605 LPFC_DEFAULT_PAGE_SIZE
,
10606 phba
->sli4_hba
.cq_esize
,
10607 phba
->sli4_hba
.cq_ecount
,
10610 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10611 "3142 Failed allocate NVME "
10612 "CQ Set (%d)\n", idx
);
10615 qdesc
->qe_valid
= 1;
10617 qdesc
->chann
= cpu
;
10618 phba
->sli4_hba
.nvmet_cqset
[idx
] = qdesc
;
10623 * Create Slow Path Completion Queues (CQs)
10626 cpu
= lpfc_find_cpu_handle(phba
, 0, LPFC_FIND_BY_EQ
);
10627 /* Create slow-path Mailbox Command Complete Queue */
10628 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10629 phba
->sli4_hba
.cq_esize
,
10630 phba
->sli4_hba
.cq_ecount
, cpu
);
10632 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10633 "0500 Failed allocate slow-path mailbox CQ\n");
10636 qdesc
->qe_valid
= 1;
10637 phba
->sli4_hba
.mbx_cq
= qdesc
;
10639 /* Create slow-path ELS Complete Queue */
10640 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10641 phba
->sli4_hba
.cq_esize
,
10642 phba
->sli4_hba
.cq_ecount
, cpu
);
10644 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10645 "0501 Failed allocate slow-path ELS CQ\n");
10648 qdesc
->qe_valid
= 1;
10649 qdesc
->chann
= cpu
;
10650 phba
->sli4_hba
.els_cq
= qdesc
;
10654 * Create Slow Path Work Queues (WQs)
10657 /* Create Mailbox Command Queue */
10659 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10660 phba
->sli4_hba
.mq_esize
,
10661 phba
->sli4_hba
.mq_ecount
, cpu
);
10663 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10664 "0505 Failed allocate slow-path MQ\n");
10667 qdesc
->chann
= cpu
;
10668 phba
->sli4_hba
.mbx_wq
= qdesc
;
10671 * Create ELS Work Queues
10675 * Create slow-path ELS Work Queue.
10676 * Increase the ELS WQ size when WQEs contain an embedded cdb
10678 wqesize
= (phba
->fcp_embed_io
) ?
10679 LPFC_WQE128_SIZE
: phba
->sli4_hba
.wq_esize
;
10681 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10683 phba
->sli4_hba
.wq_ecount
, cpu
);
10685 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10686 "0504 Failed allocate slow-path ELS WQ\n");
10689 qdesc
->chann
= cpu
;
10690 phba
->sli4_hba
.els_wq
= qdesc
;
10691 list_add_tail(&qdesc
->wq_list
, &phba
->sli4_hba
.lpfc_wq_list
);
10693 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
10694 /* Create NVME LS Complete Queue */
10695 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10696 phba
->sli4_hba
.cq_esize
,
10697 phba
->sli4_hba
.cq_ecount
, cpu
);
10699 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10700 "6079 Failed allocate NVME LS CQ\n");
10703 qdesc
->chann
= cpu
;
10704 qdesc
->qe_valid
= 1;
10705 phba
->sli4_hba
.nvmels_cq
= qdesc
;
10707 /* Create NVME LS Work Queue */
10708 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10709 phba
->sli4_hba
.wq_esize
,
10710 phba
->sli4_hba
.wq_ecount
, cpu
);
10712 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10713 "6080 Failed allocate NVME LS WQ\n");
10716 qdesc
->chann
= cpu
;
10717 phba
->sli4_hba
.nvmels_wq
= qdesc
;
10718 list_add_tail(&qdesc
->wq_list
, &phba
->sli4_hba
.lpfc_wq_list
);
10722 * Create Receive Queue (RQ)
10725 /* Create Receive Queue for header */
10726 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10727 phba
->sli4_hba
.rq_esize
,
10728 phba
->sli4_hba
.rq_ecount
, cpu
);
10730 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10731 "0506 Failed allocate receive HRQ\n");
10734 phba
->sli4_hba
.hdr_rq
= qdesc
;
10736 /* Create Receive Queue for data */
10737 qdesc
= lpfc_sli4_queue_alloc(phba
, LPFC_DEFAULT_PAGE_SIZE
,
10738 phba
->sli4_hba
.rq_esize
,
10739 phba
->sli4_hba
.rq_ecount
, cpu
);
10741 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10742 "0507 Failed allocate receive DRQ\n");
10745 phba
->sli4_hba
.dat_rq
= qdesc
;
10747 if ((phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) &&
10748 phba
->nvmet_support
) {
10749 for (idx
= 0; idx
< phba
->cfg_nvmet_mrq
; idx
++) {
10750 cpu
= lpfc_find_cpu_handle(phba
, idx
,
10751 LPFC_FIND_BY_HDWQ
);
10752 /* Create NVMET Receive Queue for header */
10753 qdesc
= lpfc_sli4_queue_alloc(phba
,
10754 LPFC_DEFAULT_PAGE_SIZE
,
10755 phba
->sli4_hba
.rq_esize
,
10756 LPFC_NVMET_RQE_DEF_COUNT
,
10759 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10760 "3146 Failed allocate "
10765 phba
->sli4_hba
.nvmet_mrq_hdr
[idx
] = qdesc
;
10767 /* Only needed for header of RQ pair */
10768 qdesc
->rqbp
= kzalloc_node(sizeof(*qdesc
->rqbp
),
10771 if (qdesc
->rqbp
== NULL
) {
10772 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10773 "6131 Failed allocate "
10778 /* Put list in known state in case driver load fails. */
10779 INIT_LIST_HEAD(&qdesc
->rqbp
->rqb_buffer_list
);
10781 /* Create NVMET Receive Queue for data */
10782 qdesc
= lpfc_sli4_queue_alloc(phba
,
10783 LPFC_DEFAULT_PAGE_SIZE
,
10784 phba
->sli4_hba
.rq_esize
,
10785 LPFC_NVMET_RQE_DEF_COUNT
,
10788 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10789 "3156 Failed allocate "
10794 phba
->sli4_hba
.nvmet_mrq_data
[idx
] = qdesc
;
10798 /* Clear NVME stats */
10799 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
10800 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
10801 memset(&phba
->sli4_hba
.hdwq
[idx
].nvme_cstat
, 0,
10802 sizeof(phba
->sli4_hba
.hdwq
[idx
].nvme_cstat
));
10806 /* Clear SCSI stats */
10807 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
) {
10808 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
10809 memset(&phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
, 0,
10810 sizeof(phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
));
10817 lpfc_sli4_queue_destroy(phba
);
10822 __lpfc_sli4_release_queue(struct lpfc_queue
**qp
)
10825 lpfc_sli4_queue_free(*qp
);
10831 lpfc_sli4_release_queues(struct lpfc_queue
***qs
, int max
)
10838 for (idx
= 0; idx
< max
; idx
++)
10839 __lpfc_sli4_release_queue(&(*qs
)[idx
]);
10846 lpfc_sli4_release_hdwq(struct lpfc_hba
*phba
)
10848 struct lpfc_sli4_hdw_queue
*hdwq
;
10849 struct lpfc_queue
*eq
;
10852 hdwq
= phba
->sli4_hba
.hdwq
;
10854 /* Loop thru all Hardware Queues */
10855 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
10856 /* Free the CQ/WQ corresponding to the Hardware Queue */
10857 lpfc_sli4_queue_free(hdwq
[idx
].io_cq
);
10858 lpfc_sli4_queue_free(hdwq
[idx
].io_wq
);
10859 hdwq
[idx
].hba_eq
= NULL
;
10860 hdwq
[idx
].io_cq
= NULL
;
10861 hdwq
[idx
].io_wq
= NULL
;
10862 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
)
10863 lpfc_free_sgl_per_hdwq(phba
, &hdwq
[idx
]);
10864 lpfc_free_cmd_rsp_buf_per_hdwq(phba
, &hdwq
[idx
]);
10866 /* Loop thru all IRQ vectors */
10867 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
10868 /* Free the EQ corresponding to the IRQ vector */
10869 eq
= phba
->sli4_hba
.hba_eq_hdl
[idx
].eq
;
10870 lpfc_sli4_queue_free(eq
);
10871 phba
->sli4_hba
.hba_eq_hdl
[idx
].eq
= NULL
;
10876 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10877 * @phba: pointer to lpfc hba data structure.
10879 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10884 * -ENOMEM - No available memory
10885 * -EIO - The mailbox failed to complete successfully.
10888 lpfc_sli4_queue_destroy(struct lpfc_hba
*phba
)
10891 * Set FREE_INIT before beginning to free the queues.
10892 * Wait until the users of queues to acknowledge to
10893 * release queues by clearing FREE_WAIT.
10895 spin_lock_irq(&phba
->hbalock
);
10896 phba
->sli
.sli_flag
|= LPFC_QUEUE_FREE_INIT
;
10897 while (phba
->sli
.sli_flag
& LPFC_QUEUE_FREE_WAIT
) {
10898 spin_unlock_irq(&phba
->hbalock
);
10900 spin_lock_irq(&phba
->hbalock
);
10902 spin_unlock_irq(&phba
->hbalock
);
10904 lpfc_sli4_cleanup_poll_list(phba
);
10906 /* Release HBA eqs */
10907 if (phba
->sli4_hba
.hdwq
)
10908 lpfc_sli4_release_hdwq(phba
);
10910 if (phba
->nvmet_support
) {
10911 lpfc_sli4_release_queues(&phba
->sli4_hba
.nvmet_cqset
,
10912 phba
->cfg_nvmet_mrq
);
10914 lpfc_sli4_release_queues(&phba
->sli4_hba
.nvmet_mrq_hdr
,
10915 phba
->cfg_nvmet_mrq
);
10916 lpfc_sli4_release_queues(&phba
->sli4_hba
.nvmet_mrq_data
,
10917 phba
->cfg_nvmet_mrq
);
10920 /* Release mailbox command work queue */
10921 __lpfc_sli4_release_queue(&phba
->sli4_hba
.mbx_wq
);
10923 /* Release ELS work queue */
10924 __lpfc_sli4_release_queue(&phba
->sli4_hba
.els_wq
);
10926 /* Release ELS work queue */
10927 __lpfc_sli4_release_queue(&phba
->sli4_hba
.nvmels_wq
);
10929 /* Release unsolicited receive queue */
10930 __lpfc_sli4_release_queue(&phba
->sli4_hba
.hdr_rq
);
10931 __lpfc_sli4_release_queue(&phba
->sli4_hba
.dat_rq
);
10933 /* Release ELS complete queue */
10934 __lpfc_sli4_release_queue(&phba
->sli4_hba
.els_cq
);
10936 /* Release NVME LS complete queue */
10937 __lpfc_sli4_release_queue(&phba
->sli4_hba
.nvmels_cq
);
10939 /* Release mailbox command complete queue */
10940 __lpfc_sli4_release_queue(&phba
->sli4_hba
.mbx_cq
);
10942 /* Everything on this list has been freed */
10943 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_wq_list
);
10945 /* Done with freeing the queues */
10946 spin_lock_irq(&phba
->hbalock
);
10947 phba
->sli
.sli_flag
&= ~LPFC_QUEUE_FREE_INIT
;
10948 spin_unlock_irq(&phba
->hbalock
);
10952 lpfc_free_rq_buffer(struct lpfc_hba
*phba
, struct lpfc_queue
*rq
)
10954 struct lpfc_rqb
*rqbp
;
10955 struct lpfc_dmabuf
*h_buf
;
10956 struct rqb_dmabuf
*rqb_buffer
;
10959 while (!list_empty(&rqbp
->rqb_buffer_list
)) {
10960 list_remove_head(&rqbp
->rqb_buffer_list
, h_buf
,
10961 struct lpfc_dmabuf
, list
);
10963 rqb_buffer
= container_of(h_buf
, struct rqb_dmabuf
, hbuf
);
10964 (rqbp
->rqb_free_buffer
)(phba
, rqb_buffer
);
10965 rqbp
->buffer_count
--;
10971 lpfc_create_wq_cq(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
10972 struct lpfc_queue
*cq
, struct lpfc_queue
*wq
, uint16_t *cq_map
,
10973 int qidx
, uint32_t qtype
)
10975 struct lpfc_sli_ring
*pring
;
10978 if (!eq
|| !cq
|| !wq
) {
10979 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10980 "6085 Fast-path %s (%d) not allocated\n",
10981 ((eq
) ? ((cq
) ? "WQ" : "CQ") : "EQ"), qidx
);
10985 /* create the Cq first */
10986 rc
= lpfc_cq_create(phba
, cq
, eq
,
10987 (qtype
== LPFC_MBOX
) ? LPFC_MCQ
: LPFC_WCQ
, qtype
);
10989 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10990 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10991 qidx
, (uint32_t)rc
);
10995 if (qtype
!= LPFC_MBOX
) {
10996 /* Setup cq_map for fast lookup */
10998 *cq_map
= cq
->queue_id
;
11000 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11001 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
11002 qidx
, cq
->queue_id
, qidx
, eq
->queue_id
);
11004 /* create the wq */
11005 rc
= lpfc_wq_create(phba
, wq
, cq
, qtype
);
11007 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11008 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
11009 qidx
, (uint32_t)rc
);
11010 /* no need to tear down cq - caller will do so */
11014 /* Bind this CQ/WQ to the NVME ring */
11016 pring
->sli
.sli4
.wqp
= (void *)wq
;
11019 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11020 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11021 qidx
, wq
->queue_id
, wq
->assoc_qid
, qidx
, cq
->queue_id
);
11023 rc
= lpfc_mq_create(phba
, wq
, cq
, LPFC_MBOX
);
11025 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11026 "0539 Failed setup of slow-path MQ: "
11027 "rc = 0x%x\n", rc
);
11028 /* no need to tear down cq - caller will do so */
11032 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11033 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11034 phba
->sli4_hba
.mbx_wq
->queue_id
,
11035 phba
->sli4_hba
.mbx_cq
->queue_id
);
11042 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11043 * @phba: pointer to lpfc hba data structure.
11045 * This routine will populate the cq_lookup table by all
11046 * available CQ queue_id's.
11049 lpfc_setup_cq_lookup(struct lpfc_hba
*phba
)
11051 struct lpfc_queue
*eq
, *childq
;
11054 memset(phba
->sli4_hba
.cq_lookup
, 0,
11055 (sizeof(struct lpfc_queue
*) * (phba
->sli4_hba
.cq_max
+ 1)));
11056 /* Loop thru all IRQ vectors */
11057 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
11058 /* Get the EQ corresponding to the IRQ vector */
11059 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
11062 /* Loop through all CQs associated with that EQ */
11063 list_for_each_entry(childq
, &eq
->child_list
, list
) {
11064 if (childq
->queue_id
> phba
->sli4_hba
.cq_max
)
11066 if (childq
->subtype
== LPFC_IO
)
11067 phba
->sli4_hba
.cq_lookup
[childq
->queue_id
] =
11074 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11075 * @phba: pointer to lpfc hba data structure.
11077 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11082 * -ENOMEM - No available memory
11083 * -EIO - The mailbox failed to complete successfully.
11086 lpfc_sli4_queue_setup(struct lpfc_hba
*phba
)
11088 uint32_t shdr_status
, shdr_add_status
;
11089 union lpfc_sli4_cfg_shdr
*shdr
;
11090 struct lpfc_vector_map_info
*cpup
;
11091 struct lpfc_sli4_hdw_queue
*qp
;
11092 LPFC_MBOXQ_t
*mboxq
;
11094 uint32_t length
, usdelay
;
11097 /* Check for dual-ULP support */
11098 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
11100 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11101 "3249 Unable to allocate memory for "
11102 "QUERY_FW_CFG mailbox command\n");
11105 length
= (sizeof(struct lpfc_mbx_query_fw_config
) -
11106 sizeof(struct lpfc_sli4_cfg_mhdr
));
11107 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
11108 LPFC_MBOX_OPCODE_QUERY_FW_CFG
,
11109 length
, LPFC_SLI4_MBX_EMBED
);
11111 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
11113 shdr
= (union lpfc_sli4_cfg_shdr
*)
11114 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
11115 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11116 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11117 if (shdr_status
|| shdr_add_status
|| rc
) {
11118 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11119 "3250 QUERY_FW_CFG mailbox failed with status "
11120 "x%x add_status x%x, mbx status x%x\n",
11121 shdr_status
, shdr_add_status
, rc
);
11122 mempool_free(mboxq
, phba
->mbox_mem_pool
);
11127 phba
->sli4_hba
.fw_func_mode
=
11128 mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.function_mode
;
11129 phba
->sli4_hba
.ulp0_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp0_mode
;
11130 phba
->sli4_hba
.ulp1_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp1_mode
;
11131 phba
->sli4_hba
.physical_port
=
11132 mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.physical_port
;
11133 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11134 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11135 "ulp1_mode:x%x\n", phba
->sli4_hba
.fw_func_mode
,
11136 phba
->sli4_hba
.ulp0_mode
, phba
->sli4_hba
.ulp1_mode
);
11138 mempool_free(mboxq
, phba
->mbox_mem_pool
);
11141 * Set up HBA Event Queues (EQs)
11143 qp
= phba
->sli4_hba
.hdwq
;
11145 /* Set up HBA event queue */
11147 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11148 "3147 Fast-path EQs not allocated\n");
11153 /* Loop thru all IRQ vectors */
11154 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
11155 /* Create HBA Event Queues (EQs) in order */
11156 for_each_present_cpu(cpu
) {
11157 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11159 /* Look for the CPU thats using that vector with
11160 * LPFC_CPU_FIRST_IRQ set.
11162 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
11164 if (qidx
!= cpup
->eq
)
11167 /* Create an EQ for that vector */
11168 rc
= lpfc_eq_create(phba
, qp
[cpup
->hdwq
].hba_eq
,
11169 phba
->cfg_fcp_imax
);
11171 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11172 "0523 Failed setup of fast-path"
11173 " EQ (%d), rc = 0x%x\n",
11174 cpup
->eq
, (uint32_t)rc
);
11178 /* Save the EQ for that vector in the hba_eq_hdl */
11179 phba
->sli4_hba
.hba_eq_hdl
[cpup
->eq
].eq
=
11180 qp
[cpup
->hdwq
].hba_eq
;
11182 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11183 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11185 qp
[cpup
->hdwq
].hba_eq
->queue_id
);
11189 /* Loop thru all Hardware Queues */
11190 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
11191 cpu
= lpfc_find_cpu_handle(phba
, qidx
, LPFC_FIND_BY_HDWQ
);
11192 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
11194 /* Create the CQ/WQ corresponding to the Hardware Queue */
11195 rc
= lpfc_create_wq_cq(phba
,
11196 phba
->sli4_hba
.hdwq
[cpup
->hdwq
].hba_eq
,
11199 &phba
->sli4_hba
.hdwq
[qidx
].io_cq_map
,
11203 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11204 "0535 Failed to setup fastpath "
11205 "IO WQ/CQ (%d), rc = 0x%x\n",
11206 qidx
, (uint32_t)rc
);
11212 * Set up Slow Path Complete Queues (CQs)
11215 /* Set up slow-path MBOX CQ/MQ */
11217 if (!phba
->sli4_hba
.mbx_cq
|| !phba
->sli4_hba
.mbx_wq
) {
11218 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11219 "0528 %s not allocated\n",
11220 phba
->sli4_hba
.mbx_cq
?
11221 "Mailbox WQ" : "Mailbox CQ");
11226 rc
= lpfc_create_wq_cq(phba
, qp
[0].hba_eq
,
11227 phba
->sli4_hba
.mbx_cq
,
11228 phba
->sli4_hba
.mbx_wq
,
11229 NULL
, 0, LPFC_MBOX
);
11231 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11232 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11236 if (phba
->nvmet_support
) {
11237 if (!phba
->sli4_hba
.nvmet_cqset
) {
11238 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11239 "3165 Fast-path NVME CQ Set "
11240 "array not allocated\n");
11244 if (phba
->cfg_nvmet_mrq
> 1) {
11245 rc
= lpfc_cq_create_set(phba
,
11246 phba
->sli4_hba
.nvmet_cqset
,
11248 LPFC_WCQ
, LPFC_NVMET
);
11250 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11251 "3164 Failed setup of NVME CQ "
11252 "Set, rc = 0x%x\n",
11257 /* Set up NVMET Receive Complete Queue */
11258 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.nvmet_cqset
[0],
11260 LPFC_WCQ
, LPFC_NVMET
);
11262 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11263 "6089 Failed setup NVMET CQ: "
11264 "rc = 0x%x\n", (uint32_t)rc
);
11267 phba
->sli4_hba
.nvmet_cqset
[0]->chann
= 0;
11269 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11270 "6090 NVMET CQ setup: cq-id=%d, "
11271 "parent eq-id=%d\n",
11272 phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
,
11273 qp
[0].hba_eq
->queue_id
);
11277 /* Set up slow-path ELS WQ/CQ */
11278 if (!phba
->sli4_hba
.els_cq
|| !phba
->sli4_hba
.els_wq
) {
11279 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11280 "0530 ELS %s not allocated\n",
11281 phba
->sli4_hba
.els_cq
? "WQ" : "CQ");
11285 rc
= lpfc_create_wq_cq(phba
, qp
[0].hba_eq
,
11286 phba
->sli4_hba
.els_cq
,
11287 phba
->sli4_hba
.els_wq
,
11288 NULL
, 0, LPFC_ELS
);
11290 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11291 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11295 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11296 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11297 phba
->sli4_hba
.els_wq
->queue_id
,
11298 phba
->sli4_hba
.els_cq
->queue_id
);
11300 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
11301 /* Set up NVME LS Complete Queue */
11302 if (!phba
->sli4_hba
.nvmels_cq
|| !phba
->sli4_hba
.nvmels_wq
) {
11303 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11304 "6091 LS %s not allocated\n",
11305 phba
->sli4_hba
.nvmels_cq
? "WQ" : "CQ");
11309 rc
= lpfc_create_wq_cq(phba
, qp
[0].hba_eq
,
11310 phba
->sli4_hba
.nvmels_cq
,
11311 phba
->sli4_hba
.nvmels_wq
,
11312 NULL
, 0, LPFC_NVME_LS
);
11314 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11315 "0526 Failed setup of NVVME LS WQ/CQ: "
11316 "rc = 0x%x\n", (uint32_t)rc
);
11320 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11321 "6096 ELS WQ setup: wq-id=%d, "
11322 "parent cq-id=%d\n",
11323 phba
->sli4_hba
.nvmels_wq
->queue_id
,
11324 phba
->sli4_hba
.nvmels_cq
->queue_id
);
11328 * Create NVMET Receive Queue (RQ)
11330 if (phba
->nvmet_support
) {
11331 if ((!phba
->sli4_hba
.nvmet_cqset
) ||
11332 (!phba
->sli4_hba
.nvmet_mrq_hdr
) ||
11333 (!phba
->sli4_hba
.nvmet_mrq_data
)) {
11334 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11335 "6130 MRQ CQ Queues not "
11340 if (phba
->cfg_nvmet_mrq
> 1) {
11341 rc
= lpfc_mrq_create(phba
,
11342 phba
->sli4_hba
.nvmet_mrq_hdr
,
11343 phba
->sli4_hba
.nvmet_mrq_data
,
11344 phba
->sli4_hba
.nvmet_cqset
,
11347 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11348 "6098 Failed setup of NVMET "
11349 "MRQ: rc = 0x%x\n",
11355 rc
= lpfc_rq_create(phba
,
11356 phba
->sli4_hba
.nvmet_mrq_hdr
[0],
11357 phba
->sli4_hba
.nvmet_mrq_data
[0],
11358 phba
->sli4_hba
.nvmet_cqset
[0],
11361 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11362 "6057 Failed setup of NVMET "
11363 "Receive Queue: rc = 0x%x\n",
11369 phba
, KERN_INFO
, LOG_INIT
,
11370 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11371 "dat-rq-id=%d parent cq-id=%d\n",
11372 phba
->sli4_hba
.nvmet_mrq_hdr
[0]->queue_id
,
11373 phba
->sli4_hba
.nvmet_mrq_data
[0]->queue_id
,
11374 phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
);
11379 if (!phba
->sli4_hba
.hdr_rq
|| !phba
->sli4_hba
.dat_rq
) {
11380 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11381 "0540 Receive Queue not allocated\n");
11386 rc
= lpfc_rq_create(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
11387 phba
->sli4_hba
.els_cq
, LPFC_USOL
);
11389 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11390 "0541 Failed setup of Receive Queue: "
11391 "rc = 0x%x\n", (uint32_t)rc
);
11395 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
11396 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11397 "parent cq-id=%d\n",
11398 phba
->sli4_hba
.hdr_rq
->queue_id
,
11399 phba
->sli4_hba
.dat_rq
->queue_id
,
11400 phba
->sli4_hba
.els_cq
->queue_id
);
11402 if (phba
->cfg_fcp_imax
)
11403 usdelay
= LPFC_SEC_TO_USEC
/ phba
->cfg_fcp_imax
;
11407 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
;
11408 qidx
+= LPFC_MAX_EQ_DELAY_EQID_CNT
)
11409 lpfc_modify_hba_eq_delay(phba
, qidx
, LPFC_MAX_EQ_DELAY_EQID_CNT
,
11412 if (phba
->sli4_hba
.cq_max
) {
11413 kfree(phba
->sli4_hba
.cq_lookup
);
11414 phba
->sli4_hba
.cq_lookup
= kcalloc((phba
->sli4_hba
.cq_max
+ 1),
11415 sizeof(struct lpfc_queue
*), GFP_KERNEL
);
11416 if (!phba
->sli4_hba
.cq_lookup
) {
11417 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11418 "0549 Failed setup of CQ Lookup table: "
11419 "size 0x%x\n", phba
->sli4_hba
.cq_max
);
11423 lpfc_setup_cq_lookup(phba
);
11428 lpfc_sli4_queue_unset(phba
);
11434 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11435 * @phba: pointer to lpfc hba data structure.
11437 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11442 * -ENOMEM - No available memory
11443 * -EIO - The mailbox failed to complete successfully.
11446 lpfc_sli4_queue_unset(struct lpfc_hba
*phba
)
11448 struct lpfc_sli4_hdw_queue
*qp
;
11449 struct lpfc_queue
*eq
;
11452 /* Unset mailbox command work queue */
11453 if (phba
->sli4_hba
.mbx_wq
)
11454 lpfc_mq_destroy(phba
, phba
->sli4_hba
.mbx_wq
);
11456 /* Unset NVME LS work queue */
11457 if (phba
->sli4_hba
.nvmels_wq
)
11458 lpfc_wq_destroy(phba
, phba
->sli4_hba
.nvmels_wq
);
11460 /* Unset ELS work queue */
11461 if (phba
->sli4_hba
.els_wq
)
11462 lpfc_wq_destroy(phba
, phba
->sli4_hba
.els_wq
);
11464 /* Unset unsolicited receive queue */
11465 if (phba
->sli4_hba
.hdr_rq
)
11466 lpfc_rq_destroy(phba
, phba
->sli4_hba
.hdr_rq
,
11467 phba
->sli4_hba
.dat_rq
);
11469 /* Unset mailbox command complete queue */
11470 if (phba
->sli4_hba
.mbx_cq
)
11471 lpfc_cq_destroy(phba
, phba
->sli4_hba
.mbx_cq
);
11473 /* Unset ELS complete queue */
11474 if (phba
->sli4_hba
.els_cq
)
11475 lpfc_cq_destroy(phba
, phba
->sli4_hba
.els_cq
);
11477 /* Unset NVME LS complete queue */
11478 if (phba
->sli4_hba
.nvmels_cq
)
11479 lpfc_cq_destroy(phba
, phba
->sli4_hba
.nvmels_cq
);
11481 if (phba
->nvmet_support
) {
11482 /* Unset NVMET MRQ queue */
11483 if (phba
->sli4_hba
.nvmet_mrq_hdr
) {
11484 for (qidx
= 0; qidx
< phba
->cfg_nvmet_mrq
; qidx
++)
11487 phba
->sli4_hba
.nvmet_mrq_hdr
[qidx
],
11488 phba
->sli4_hba
.nvmet_mrq_data
[qidx
]);
11491 /* Unset NVMET CQ Set complete queue */
11492 if (phba
->sli4_hba
.nvmet_cqset
) {
11493 for (qidx
= 0; qidx
< phba
->cfg_nvmet_mrq
; qidx
++)
11495 phba
, phba
->sli4_hba
.nvmet_cqset
[qidx
]);
11499 /* Unset fast-path SLI4 queues */
11500 if (phba
->sli4_hba
.hdwq
) {
11501 /* Loop thru all Hardware Queues */
11502 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
11503 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11504 qp
= &phba
->sli4_hba
.hdwq
[qidx
];
11505 lpfc_wq_destroy(phba
, qp
->io_wq
);
11506 lpfc_cq_destroy(phba
, qp
->io_cq
);
11508 /* Loop thru all IRQ vectors */
11509 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
11510 /* Destroy the EQ corresponding to the IRQ vector */
11511 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
11512 lpfc_eq_destroy(phba
, eq
);
11516 kfree(phba
->sli4_hba
.cq_lookup
);
11517 phba
->sli4_hba
.cq_lookup
= NULL
;
11518 phba
->sli4_hba
.cq_max
= 0;
11522 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11523 * @phba: pointer to lpfc hba data structure.
11525 * This routine is invoked to allocate and set up a pool of completion queue
11526 * events. The body of the completion queue event is a completion queue entry
11527 * CQE. For now, this pool is used for the interrupt service routine to queue
11528 * the following HBA completion queue events for the worker thread to process:
11529 * - Mailbox asynchronous events
11530 * - Receive queue completion unsolicited events
11531 * Later, this can be used for all the slow-path events.
11535 * -ENOMEM - No available memory
11538 lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*phba
)
11540 struct lpfc_cq_event
*cq_event
;
11543 for (i
= 0; i
< (4 * phba
->sli4_hba
.cq_ecount
); i
++) {
11544 cq_event
= kmalloc(sizeof(struct lpfc_cq_event
), GFP_KERNEL
);
11546 goto out_pool_create_fail
;
11547 list_add_tail(&cq_event
->list
,
11548 &phba
->sli4_hba
.sp_cqe_event_pool
);
11552 out_pool_create_fail
:
11553 lpfc_sli4_cq_event_pool_destroy(phba
);
11558 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11559 * @phba: pointer to lpfc hba data structure.
11561 * This routine is invoked to free the pool of completion queue events at
11562 * driver unload time. Note that, it is the responsibility of the driver
11563 * cleanup routine to free all the outstanding completion-queue events
11564 * allocated from this pool back into the pool before invoking this routine
11565 * to destroy the pool.
11568 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*phba
)
11570 struct lpfc_cq_event
*cq_event
, *next_cq_event
;
11572 list_for_each_entry_safe(cq_event
, next_cq_event
,
11573 &phba
->sli4_hba
.sp_cqe_event_pool
, list
) {
11574 list_del(&cq_event
->list
);
11580 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11581 * @phba: pointer to lpfc hba data structure.
11583 * This routine is the lock free version of the API invoked to allocate a
11584 * completion-queue event from the free pool.
11586 * Return: Pointer to the newly allocated completion-queue event if successful
11589 struct lpfc_cq_event
*
11590 __lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
11592 struct lpfc_cq_event
*cq_event
= NULL
;
11594 list_remove_head(&phba
->sli4_hba
.sp_cqe_event_pool
, cq_event
,
11595 struct lpfc_cq_event
, list
);
11600 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11601 * @phba: pointer to lpfc hba data structure.
11603 * This routine is the lock version of the API invoked to allocate a
11604 * completion-queue event from the free pool.
11606 * Return: Pointer to the newly allocated completion-queue event if successful
11609 struct lpfc_cq_event
*
11610 lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
11612 struct lpfc_cq_event
*cq_event
;
11613 unsigned long iflags
;
11615 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11616 cq_event
= __lpfc_sli4_cq_event_alloc(phba
);
11617 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11622 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11623 * @phba: pointer to lpfc hba data structure.
11624 * @cq_event: pointer to the completion queue event to be freed.
11626 * This routine is the lock free version of the API invoked to release a
11627 * completion-queue event back into the free pool.
11630 __lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
11631 struct lpfc_cq_event
*cq_event
)
11633 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_cqe_event_pool
);
11637 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11638 * @phba: pointer to lpfc hba data structure.
11639 * @cq_event: pointer to the completion queue event to be freed.
11641 * This routine is the lock version of the API invoked to release a
11642 * completion-queue event back into the free pool.
11645 lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
11646 struct lpfc_cq_event
*cq_event
)
11648 unsigned long iflags
;
11649 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11650 __lpfc_sli4_cq_event_release(phba
, cq_event
);
11651 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11655 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11656 * @phba: pointer to lpfc hba data structure.
11658 * This routine is to free all the pending completion-queue events to the
11659 * back into the free pool for device reset.
11662 lpfc_sli4_cq_event_release_all(struct lpfc_hba
*phba
)
11664 LIST_HEAD(cq_event_list
);
11665 struct lpfc_cq_event
*cq_event
;
11666 unsigned long iflags
;
11668 /* Retrieve all the pending WCQEs from pending WCQE lists */
11670 /* Pending ELS XRI abort events */
11671 spin_lock_irqsave(&phba
->sli4_hba
.els_xri_abrt_list_lock
, iflags
);
11672 list_splice_init(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
11674 spin_unlock_irqrestore(&phba
->sli4_hba
.els_xri_abrt_list_lock
, iflags
);
11676 /* Pending asynnc events */
11677 spin_lock_irqsave(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
11678 list_splice_init(&phba
->sli4_hba
.sp_asynce_work_queue
,
11680 spin_unlock_irqrestore(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
11682 while (!list_empty(&cq_event_list
)) {
11683 list_remove_head(&cq_event_list
, cq_event
,
11684 struct lpfc_cq_event
, list
);
11685 lpfc_sli4_cq_event_release(phba
, cq_event
);
11690 * lpfc_pci_function_reset - Reset pci function.
11691 * @phba: pointer to lpfc hba data structure.
11693 * This routine is invoked to request a PCI function reset. It will destroys
11694 * all resources assigned to the PCI function which originates this request.
11698 * -ENOMEM - No available memory
11699 * -EIO - The mailbox failed to complete successfully.
11702 lpfc_pci_function_reset(struct lpfc_hba
*phba
)
11704 LPFC_MBOXQ_t
*mboxq
;
11705 uint32_t rc
= 0, if_type
;
11706 uint32_t shdr_status
, shdr_add_status
;
11708 uint32_t port_reset
= 0;
11709 union lpfc_sli4_cfg_shdr
*shdr
;
11710 struct lpfc_register reg_data
;
11713 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
11715 case LPFC_SLI_INTF_IF_TYPE_0
:
11716 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
11719 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11720 "0494 Unable to allocate memory for "
11721 "issuing SLI_FUNCTION_RESET mailbox "
11726 /* Setup PCI function reset mailbox-ioctl command */
11727 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
11728 LPFC_MBOX_OPCODE_FUNCTION_RESET
, 0,
11729 LPFC_SLI4_MBX_EMBED
);
11730 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
11731 shdr
= (union lpfc_sli4_cfg_shdr
*)
11732 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
11733 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11734 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
11736 mempool_free(mboxq
, phba
->mbox_mem_pool
);
11737 if (shdr_status
|| shdr_add_status
|| rc
) {
11738 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11739 "0495 SLI_FUNCTION_RESET mailbox "
11740 "failed with status x%x add_status x%x,"
11741 " mbx status x%x\n",
11742 shdr_status
, shdr_add_status
, rc
);
11746 case LPFC_SLI_INTF_IF_TYPE_2
:
11747 case LPFC_SLI_INTF_IF_TYPE_6
:
11750 * Poll the Port Status Register and wait for RDY for
11751 * up to 30 seconds. If the port doesn't respond, treat
11754 for (rdy_chk
= 0; rdy_chk
< 1500; rdy_chk
++) {
11755 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.
11756 STATUSregaddr
, ®_data
.word0
)) {
11760 if (bf_get(lpfc_sliport_status_rdy
, ®_data
))
11765 if (!bf_get(lpfc_sliport_status_rdy
, ®_data
)) {
11766 phba
->work_status
[0] = readl(
11767 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
11768 phba
->work_status
[1] = readl(
11769 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
11770 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11771 "2890 Port not ready, port status reg "
11772 "0x%x error 1=0x%x, error 2=0x%x\n",
11774 phba
->work_status
[0],
11775 phba
->work_status
[1]);
11780 if (bf_get(lpfc_sliport_status_pldv
, ®_data
))
11781 lpfc_pldv_detect
= true;
11785 * Reset the port now
11787 reg_data
.word0
= 0;
11788 bf_set(lpfc_sliport_ctrl_end
, ®_data
,
11789 LPFC_SLIPORT_LITTLE_ENDIAN
);
11790 bf_set(lpfc_sliport_ctrl_ip
, ®_data
,
11791 LPFC_SLIPORT_INIT_PORT
);
11792 writel(reg_data
.word0
, phba
->sli4_hba
.u
.if_type2
.
11795 pci_read_config_word(phba
->pcidev
,
11796 PCI_DEVICE_ID
, &devid
);
11801 } else if (bf_get(lpfc_sliport_status_rn
, ®_data
)) {
11807 case LPFC_SLI_INTF_IF_TYPE_1
:
11813 /* Catch the not-ready port failure after a port reset. */
11815 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11816 "3317 HBA not functional: IP Reset Failed "
11817 "try: echo fw_reset > board_mode\n");
11825 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11826 * @phba: pointer to lpfc hba data structure.
11828 * This routine is invoked to set up the PCI device memory space for device
11829 * with SLI-4 interface spec.
11833 * other values - error
11836 lpfc_sli4_pci_mem_setup(struct lpfc_hba
*phba
)
11838 struct pci_dev
*pdev
= phba
->pcidev
;
11839 unsigned long bar0map_len
, bar1map_len
, bar2map_len
;
11846 /* Set the device DMA mask size */
11847 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
11849 error
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
11854 * The BARs and register set definitions and offset locations are
11855 * dependent on the if_type.
11857 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
,
11858 &phba
->sli4_hba
.sli_intf
.word0
)) {
11862 /* There is no SLI3 failback for SLI4 devices. */
11863 if (bf_get(lpfc_sli_intf_valid
, &phba
->sli4_hba
.sli_intf
) !=
11864 LPFC_SLI_INTF_VALID
) {
11865 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11866 "2894 SLI_INTF reg contents invalid "
11867 "sli_intf reg 0x%x\n",
11868 phba
->sli4_hba
.sli_intf
.word0
);
11872 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
11874 * Get the bus address of SLI4 device Bar regions and the
11875 * number of bytes required by each mapping. The mapping of the
11876 * particular PCI BARs regions is dependent on the type of
11879 if (pci_resource_start(pdev
, PCI_64BIT_BAR0
)) {
11880 phba
->pci_bar0_map
= pci_resource_start(pdev
, PCI_64BIT_BAR0
);
11881 bar0map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR0
);
11884 * Map SLI4 PCI Config Space Register base to a kernel virtual
11887 phba
->sli4_hba
.conf_regs_memmap_p
=
11888 ioremap(phba
->pci_bar0_map
, bar0map_len
);
11889 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
11890 dev_printk(KERN_ERR
, &pdev
->dev
,
11891 "ioremap failed for SLI4 PCI config "
11895 phba
->pci_bar0_memmap_p
= phba
->sli4_hba
.conf_regs_memmap_p
;
11896 /* Set up BAR0 PCI config space register memory map */
11897 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
11899 phba
->pci_bar0_map
= pci_resource_start(pdev
, 1);
11900 bar0map_len
= pci_resource_len(pdev
, 1);
11901 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
11902 dev_printk(KERN_ERR
, &pdev
->dev
,
11903 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11906 phba
->sli4_hba
.conf_regs_memmap_p
=
11907 ioremap(phba
->pci_bar0_map
, bar0map_len
);
11908 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
11909 dev_printk(KERN_ERR
, &pdev
->dev
,
11910 "ioremap failed for SLI4 PCI config "
11914 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
11917 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
) {
11918 if (pci_resource_start(pdev
, PCI_64BIT_BAR2
)) {
11920 * Map SLI4 if type 0 HBA Control Register base to a
11921 * kernel virtual address and setup the registers.
11923 phba
->pci_bar1_map
= pci_resource_start(pdev
,
11925 bar1map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR2
);
11926 phba
->sli4_hba
.ctrl_regs_memmap_p
=
11927 ioremap(phba
->pci_bar1_map
,
11929 if (!phba
->sli4_hba
.ctrl_regs_memmap_p
) {
11930 dev_err(&pdev
->dev
,
11931 "ioremap failed for SLI4 HBA "
11932 "control registers.\n");
11934 goto out_iounmap_conf
;
11936 phba
->pci_bar2_memmap_p
=
11937 phba
->sli4_hba
.ctrl_regs_memmap_p
;
11938 lpfc_sli4_bar1_register_memmap(phba
, if_type
);
11941 goto out_iounmap_conf
;
11945 if ((if_type
== LPFC_SLI_INTF_IF_TYPE_6
) &&
11946 (pci_resource_start(pdev
, PCI_64BIT_BAR2
))) {
11948 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11949 * virtual address and setup the registers.
11951 phba
->pci_bar1_map
= pci_resource_start(pdev
, PCI_64BIT_BAR2
);
11952 bar1map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR2
);
11953 phba
->sli4_hba
.drbl_regs_memmap_p
=
11954 ioremap(phba
->pci_bar1_map
, bar1map_len
);
11955 if (!phba
->sli4_hba
.drbl_regs_memmap_p
) {
11956 dev_err(&pdev
->dev
,
11957 "ioremap failed for SLI4 HBA doorbell registers.\n");
11959 goto out_iounmap_conf
;
11961 phba
->pci_bar2_memmap_p
= phba
->sli4_hba
.drbl_regs_memmap_p
;
11962 lpfc_sli4_bar1_register_memmap(phba
, if_type
);
11965 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
) {
11966 if (pci_resource_start(pdev
, PCI_64BIT_BAR4
)) {
11968 * Map SLI4 if type 0 HBA Doorbell Register base to
11969 * a kernel virtual address and setup the registers.
11971 phba
->pci_bar2_map
= pci_resource_start(pdev
,
11973 bar2map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR4
);
11974 phba
->sli4_hba
.drbl_regs_memmap_p
=
11975 ioremap(phba
->pci_bar2_map
,
11977 if (!phba
->sli4_hba
.drbl_regs_memmap_p
) {
11978 dev_err(&pdev
->dev
,
11979 "ioremap failed for SLI4 HBA"
11980 " doorbell registers.\n");
11982 goto out_iounmap_ctrl
;
11984 phba
->pci_bar4_memmap_p
=
11985 phba
->sli4_hba
.drbl_regs_memmap_p
;
11986 error
= lpfc_sli4_bar2_register_memmap(phba
, LPFC_VF0
);
11988 goto out_iounmap_all
;
11991 goto out_iounmap_ctrl
;
11995 if (if_type
== LPFC_SLI_INTF_IF_TYPE_6
&&
11996 pci_resource_start(pdev
, PCI_64BIT_BAR4
)) {
11998 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11999 * virtual address and setup the registers.
12001 phba
->pci_bar2_map
= pci_resource_start(pdev
, PCI_64BIT_BAR4
);
12002 bar2map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR4
);
12003 phba
->sli4_hba
.dpp_regs_memmap_p
=
12004 ioremap(phba
->pci_bar2_map
, bar2map_len
);
12005 if (!phba
->sli4_hba
.dpp_regs_memmap_p
) {
12006 dev_err(&pdev
->dev
,
12007 "ioremap failed for SLI4 HBA dpp registers.\n");
12009 goto out_iounmap_all
;
12011 phba
->pci_bar4_memmap_p
= phba
->sli4_hba
.dpp_regs_memmap_p
;
12014 /* Set up the EQ/CQ register handeling functions now */
12016 case LPFC_SLI_INTF_IF_TYPE_0
:
12017 case LPFC_SLI_INTF_IF_TYPE_2
:
12018 phba
->sli4_hba
.sli4_eq_clr_intr
= lpfc_sli4_eq_clr_intr
;
12019 phba
->sli4_hba
.sli4_write_eq_db
= lpfc_sli4_write_eq_db
;
12020 phba
->sli4_hba
.sli4_write_cq_db
= lpfc_sli4_write_cq_db
;
12022 case LPFC_SLI_INTF_IF_TYPE_6
:
12023 phba
->sli4_hba
.sli4_eq_clr_intr
= lpfc_sli4_if6_eq_clr_intr
;
12024 phba
->sli4_hba
.sli4_write_eq_db
= lpfc_sli4_if6_write_eq_db
;
12025 phba
->sli4_hba
.sli4_write_cq_db
= lpfc_sli4_if6_write_cq_db
;
12034 if (phba
->sli4_hba
.drbl_regs_memmap_p
)
12035 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
12037 if (phba
->sli4_hba
.ctrl_regs_memmap_p
)
12038 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
12040 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
12046 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12047 * @phba: pointer to lpfc hba data structure.
12049 * This routine is invoked to unset the PCI device memory space for device
12050 * with SLI-4 interface spec.
12053 lpfc_sli4_pci_mem_unset(struct lpfc_hba
*phba
)
12056 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
12059 case LPFC_SLI_INTF_IF_TYPE_0
:
12060 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
12061 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
12062 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
12064 case LPFC_SLI_INTF_IF_TYPE_2
:
12065 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
12067 case LPFC_SLI_INTF_IF_TYPE_6
:
12068 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
12069 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
12070 if (phba
->sli4_hba
.dpp_regs_memmap_p
)
12071 iounmap(phba
->sli4_hba
.dpp_regs_memmap_p
);
12073 case LPFC_SLI_INTF_IF_TYPE_1
:
12076 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
12077 "FATAL - unsupported SLI4 interface type - %d\n",
12084 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12085 * @phba: pointer to lpfc hba data structure.
12087 * This routine is invoked to enable the MSI-X interrupt vectors to device
12088 * with SLI-3 interface specs.
12092 * other values - error
12095 lpfc_sli_enable_msix(struct lpfc_hba
*phba
)
12100 /* Set up MSI-X multi-message vectors */
12101 rc
= pci_alloc_irq_vectors(phba
->pcidev
,
12102 LPFC_MSIX_VECTORS
, LPFC_MSIX_VECTORS
, PCI_IRQ_MSIX
);
12104 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12105 "0420 PCI enable MSI-X failed (%d)\n", rc
);
12110 * Assign MSI-X vectors to interrupt handlers
12113 /* vector-0 is associated to slow-path handler */
12114 rc
= request_irq(pci_irq_vector(phba
->pcidev
, 0),
12115 &lpfc_sli_sp_intr_handler
, 0,
12116 LPFC_SP_DRIVER_HANDLER_NAME
, phba
);
12118 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
12119 "0421 MSI-X slow-path request_irq failed "
12124 /* vector-1 is associated to fast-path handler */
12125 rc
= request_irq(pci_irq_vector(phba
->pcidev
, 1),
12126 &lpfc_sli_fp_intr_handler
, 0,
12127 LPFC_FP_DRIVER_HANDLER_NAME
, phba
);
12130 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
12131 "0429 MSI-X fast-path request_irq failed "
12137 * Configure HBA MSI-X attention conditions to messages
12139 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12143 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12144 "0474 Unable to allocate memory for issuing "
12145 "MBOX_CONFIG_MSI command\n");
12148 rc
= lpfc_config_msi(phba
, pmb
);
12151 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
12152 if (rc
!= MBX_SUCCESS
) {
12153 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
12154 "0351 Config MSI mailbox command failed, "
12155 "mbxCmd x%x, mbxStatus x%x\n",
12156 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
);
12160 /* Free memory allocated for mailbox command */
12161 mempool_free(pmb
, phba
->mbox_mem_pool
);
12165 /* Free memory allocated for mailbox command */
12166 mempool_free(pmb
, phba
->mbox_mem_pool
);
12169 /* free the irq already requested */
12170 free_irq(pci_irq_vector(phba
->pcidev
, 1), phba
);
12173 /* free the irq already requested */
12174 free_irq(pci_irq_vector(phba
->pcidev
, 0), phba
);
12177 /* Unconfigure MSI-X capability structure */
12178 pci_free_irq_vectors(phba
->pcidev
);
12185 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12186 * @phba: pointer to lpfc hba data structure.
12188 * This routine is invoked to enable the MSI interrupt mode to device with
12189 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12190 * enable the MSI vector. The device driver is responsible for calling the
12191 * request_irq() to register MSI vector with a interrupt the handler, which
12192 * is done in this function.
12196 * other values - error
12199 lpfc_sli_enable_msi(struct lpfc_hba
*phba
)
12203 rc
= pci_enable_msi(phba
->pcidev
);
12205 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12206 "0012 PCI enable MSI mode success.\n");
12208 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12209 "0471 PCI enable MSI mode failed (%d)\n", rc
);
12213 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
12214 0, LPFC_DRIVER_NAME
, phba
);
12216 pci_disable_msi(phba
->pcidev
);
12217 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
12218 "0478 MSI request_irq failed (%d)\n", rc
);
12224 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12225 * @phba: pointer to lpfc hba data structure.
12226 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12228 * This routine is invoked to enable device interrupt and associate driver's
12229 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12230 * spec. Depends on the interrupt mode configured to the driver, the driver
12231 * will try to fallback from the configured interrupt mode to an interrupt
12232 * mode which is supported by the platform, kernel, and device in the order
12234 * MSI-X -> MSI -> IRQ.
12238 * other values - error
12241 lpfc_sli_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
12243 uint32_t intr_mode
= LPFC_INTR_ERROR
;
12246 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12247 retval
= lpfc_sli_config_port(phba
, LPFC_SLI_REV3
);
12250 clear_bit(HBA_NEEDS_CFG_PORT
, &phba
->hba_flag
);
12252 if (cfg_mode
== 2) {
12253 /* Now, try to enable MSI-X interrupt mode */
12254 retval
= lpfc_sli_enable_msix(phba
);
12256 /* Indicate initialization to MSI-X mode */
12257 phba
->intr_type
= MSIX
;
12262 /* Fallback to MSI if MSI-X initialization failed */
12263 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
12264 retval
= lpfc_sli_enable_msi(phba
);
12266 /* Indicate initialization to MSI mode */
12267 phba
->intr_type
= MSI
;
12272 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12273 if (phba
->intr_type
== NONE
) {
12274 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
12275 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
12277 /* Indicate initialization to INTx mode */
12278 phba
->intr_type
= INTx
;
12286 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12287 * @phba: pointer to lpfc hba data structure.
12289 * This routine is invoked to disable device interrupt and disassociate the
12290 * driver's interrupt handler(s) from interrupt vector(s) to device with
12291 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12292 * release the interrupt vector(s) for the message signaled interrupt.
12295 lpfc_sli_disable_intr(struct lpfc_hba
*phba
)
12299 if (phba
->intr_type
== MSIX
)
12300 nr_irqs
= LPFC_MSIX_VECTORS
;
12304 for (i
= 0; i
< nr_irqs
; i
++)
12305 free_irq(pci_irq_vector(phba
->pcidev
, i
), phba
);
12306 pci_free_irq_vectors(phba
->pcidev
);
12308 /* Reset interrupt management states */
12309 phba
->intr_type
= NONE
;
12310 phba
->sli
.slistat
.sli_intr
= 0;
12314 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12315 * @phba: pointer to lpfc hba data structure.
12316 * @id: EQ vector index or Hardware Queue index
12317 * @match: LPFC_FIND_BY_EQ = match by EQ
12318 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12319 * Return the CPU that matches the selection criteria
12322 lpfc_find_cpu_handle(struct lpfc_hba
*phba
, uint16_t id
, int match
)
12324 struct lpfc_vector_map_info
*cpup
;
12327 /* Loop through all CPUs */
12328 for_each_present_cpu(cpu
) {
12329 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12331 /* If we are matching by EQ, there may be multiple CPUs using
12332 * using the same vector, so select the one with
12333 * LPFC_CPU_FIRST_IRQ set.
12335 if ((match
== LPFC_FIND_BY_EQ
) &&
12336 (cpup
->flag
& LPFC_CPU_FIRST_IRQ
) &&
12340 /* If matching by HDWQ, select the first CPU that matches */
12341 if ((match
== LPFC_FIND_BY_HDWQ
) && (cpup
->hdwq
== id
))
12349 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12350 * @phba: pointer to lpfc hba data structure.
12351 * @cpu: CPU map index
12352 * @phys_id: CPU package physical id
12353 * @core_id: CPU core id
12356 lpfc_find_hyper(struct lpfc_hba
*phba
, int cpu
,
12357 uint16_t phys_id
, uint16_t core_id
)
12359 struct lpfc_vector_map_info
*cpup
;
12362 for_each_present_cpu(idx
) {
12363 cpup
= &phba
->sli4_hba
.cpu_map
[idx
];
12364 /* Does the cpup match the one we are looking for */
12365 if ((cpup
->phys_id
== phys_id
) &&
12366 (cpup
->core_id
== core_id
) &&
12375 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12376 * @phba: pointer to lpfc hba data structure.
12377 * @eqidx: index for eq and irq vector
12378 * @flag: flags to set for vector_map structure
12379 * @cpu: cpu used to index vector_map structure
12381 * The routine assigns eq info into vector_map structure
12384 lpfc_assign_eq_map_info(struct lpfc_hba
*phba
, uint16_t eqidx
, uint16_t flag
,
12387 struct lpfc_vector_map_info
*cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12388 struct lpfc_hba_eq_hdl
*eqhdl
= lpfc_get_eq_hdl(eqidx
);
12391 cpup
->flag
|= flag
;
12393 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12394 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12395 cpu
, eqhdl
->irq
, cpup
->eq
, cpup
->flag
);
12399 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12400 * @phba: pointer to lpfc hba data structure.
12402 * The routine initializes the cpu_map array structure
12405 lpfc_cpu_map_array_init(struct lpfc_hba
*phba
)
12407 struct lpfc_vector_map_info
*cpup
;
12408 struct lpfc_eq_intr_info
*eqi
;
12411 for_each_possible_cpu(cpu
) {
12412 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12413 cpup
->phys_id
= LPFC_VECTOR_MAP_EMPTY
;
12414 cpup
->core_id
= LPFC_VECTOR_MAP_EMPTY
;
12415 cpup
->hdwq
= LPFC_VECTOR_MAP_EMPTY
;
12416 cpup
->eq
= LPFC_VECTOR_MAP_EMPTY
;
12418 eqi
= per_cpu_ptr(phba
->sli4_hba
.eq_info
, cpu
);
12419 INIT_LIST_HEAD(&eqi
->list
);
12425 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12426 * @phba: pointer to lpfc hba data structure.
12428 * The routine initializes the hba_eq_hdl array structure
12431 lpfc_hba_eq_hdl_array_init(struct lpfc_hba
*phba
)
12433 struct lpfc_hba_eq_hdl
*eqhdl
;
12436 for (i
= 0; i
< phba
->cfg_irq_chann
; i
++) {
12437 eqhdl
= lpfc_get_eq_hdl(i
);
12438 eqhdl
->irq
= LPFC_IRQ_EMPTY
;
12439 eqhdl
->phba
= phba
;
12444 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12445 * @phba: pointer to lpfc hba data structure.
12446 * @vectors: number of msix vectors allocated.
12448 * The routine will figure out the CPU affinity assignment for every
12449 * MSI-X vector allocated for the HBA.
12450 * In addition, the CPU to IO channel mapping will be calculated
12451 * and the phba->sli4_hba.cpu_map array will reflect this.
12454 lpfc_cpu_affinity_check(struct lpfc_hba
*phba
, int vectors
)
12456 int i
, cpu
, idx
, next_idx
, new_cpu
, start_cpu
, first_cpu
;
12457 int max_phys_id
, min_phys_id
;
12458 int max_core_id
, min_core_id
;
12459 struct lpfc_vector_map_info
*cpup
;
12460 struct lpfc_vector_map_info
*new_cpup
;
12461 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12462 struct lpfc_hdwq_stat
*c_stat
;
12466 min_phys_id
= LPFC_VECTOR_MAP_EMPTY
;
12468 min_core_id
= LPFC_VECTOR_MAP_EMPTY
;
12470 /* Update CPU map with physical id and core id of each CPU */
12471 for_each_present_cpu(cpu
) {
12472 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12474 cpup
->phys_id
= topology_physical_package_id(cpu
);
12475 cpup
->core_id
= topology_core_id(cpu
);
12476 if (lpfc_find_hyper(phba
, cpu
, cpup
->phys_id
, cpup
->core_id
))
12477 cpup
->flag
|= LPFC_CPU_MAP_HYPER
;
12479 /* No distinction between CPUs for other platforms */
12481 cpup
->core_id
= cpu
;
12484 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12485 "3328 CPU %d physid %d coreid %d flag x%x\n",
12486 cpu
, cpup
->phys_id
, cpup
->core_id
, cpup
->flag
);
12488 if (cpup
->phys_id
> max_phys_id
)
12489 max_phys_id
= cpup
->phys_id
;
12490 if (cpup
->phys_id
< min_phys_id
)
12491 min_phys_id
= cpup
->phys_id
;
12493 if (cpup
->core_id
> max_core_id
)
12494 max_core_id
= cpup
->core_id
;
12495 if (cpup
->core_id
< min_core_id
)
12496 min_core_id
= cpup
->core_id
;
12499 /* After looking at each irq vector assigned to this pcidev, its
12500 * possible to see that not ALL CPUs have been accounted for.
12501 * Next we will set any unassigned (unaffinitized) cpu map
12502 * entries to a IRQ on the same phys_id.
12504 first_cpu
= cpumask_first(cpu_present_mask
);
12505 start_cpu
= first_cpu
;
12507 for_each_present_cpu(cpu
) {
12508 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12510 /* Is this CPU entry unassigned */
12511 if (cpup
->eq
== LPFC_VECTOR_MAP_EMPTY
) {
12512 /* Mark CPU as IRQ not assigned by the kernel */
12513 cpup
->flag
|= LPFC_CPU_MAP_UNASSIGN
;
12515 /* If so, find a new_cpup that is on the SAME
12516 * phys_id as cpup. start_cpu will start where we
12517 * left off so all unassigned entries don't get assgined
12518 * the IRQ of the first entry.
12520 new_cpu
= start_cpu
;
12521 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
12522 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
12523 if (!(new_cpup
->flag
& LPFC_CPU_MAP_UNASSIGN
) &&
12524 (new_cpup
->eq
!= LPFC_VECTOR_MAP_EMPTY
) &&
12525 (new_cpup
->phys_id
== cpup
->phys_id
))
12527 new_cpu
= lpfc_next_present_cpu(new_cpu
);
12529 /* At this point, we leave the CPU as unassigned */
12532 /* We found a matching phys_id, so copy the IRQ info */
12533 cpup
->eq
= new_cpup
->eq
;
12535 /* Bump start_cpu to the next slot to minmize the
12536 * chance of having multiple unassigned CPU entries
12537 * selecting the same IRQ.
12539 start_cpu
= lpfc_next_present_cpu(new_cpu
);
12541 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12542 "3337 Set Affinity: CPU %d "
12543 "eq %d from peer cpu %d same "
12545 cpu
, cpup
->eq
, new_cpu
,
12550 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12551 start_cpu
= first_cpu
;
12553 for_each_present_cpu(cpu
) {
12554 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12556 /* Is this entry unassigned */
12557 if (cpup
->eq
== LPFC_VECTOR_MAP_EMPTY
) {
12558 /* Mark it as IRQ not assigned by the kernel */
12559 cpup
->flag
|= LPFC_CPU_MAP_UNASSIGN
;
12561 /* If so, find a new_cpup thats on ANY phys_id
12562 * as the cpup. start_cpu will start where we
12563 * left off so all unassigned entries don't get
12564 * assigned the IRQ of the first entry.
12566 new_cpu
= start_cpu
;
12567 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
12568 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
12569 if (!(new_cpup
->flag
& LPFC_CPU_MAP_UNASSIGN
) &&
12570 (new_cpup
->eq
!= LPFC_VECTOR_MAP_EMPTY
))
12572 new_cpu
= lpfc_next_present_cpu(new_cpu
);
12574 /* We should never leave an entry unassigned */
12575 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12576 "3339 Set Affinity: CPU %d "
12577 "eq %d UNASSIGNED\n",
12578 cpup
->hdwq
, cpup
->eq
);
12581 /* We found an available entry, copy the IRQ info */
12582 cpup
->eq
= new_cpup
->eq
;
12584 /* Bump start_cpu to the next slot to minmize the
12585 * chance of having multiple unassigned CPU entries
12586 * selecting the same IRQ.
12588 start_cpu
= lpfc_next_present_cpu(new_cpu
);
12590 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12591 "3338 Set Affinity: CPU %d "
12592 "eq %d from peer cpu %d (%d/%d)\n",
12593 cpu
, cpup
->eq
, new_cpu
,
12594 new_cpup
->phys_id
, new_cpup
->core_id
);
12598 /* Assign hdwq indices that are unique across all cpus in the map
12599 * that are also FIRST_CPUs.
12602 for_each_present_cpu(cpu
) {
12603 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12605 /* Only FIRST IRQs get a hdwq index assignment. */
12606 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
12609 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12612 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12613 "3333 Set Affinity: CPU %d (phys %d core %d): "
12614 "hdwq %d eq %d flg x%x\n",
12615 cpu
, cpup
->phys_id
, cpup
->core_id
,
12616 cpup
->hdwq
, cpup
->eq
, cpup
->flag
);
12618 /* Associate a hdwq with each cpu_map entry
12619 * This will be 1 to 1 - hdwq to cpu, unless there are less
12620 * hardware queues then CPUs. For that case we will just round-robin
12621 * the available hardware queues as they get assigned to CPUs.
12622 * The next_idx is the idx from the FIRST_CPU loop above to account
12623 * for irq_chann < hdwq. The idx is used for round-robin assignments
12624 * and needs to start at 0.
12629 for_each_present_cpu(cpu
) {
12630 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12632 /* FIRST cpus are already mapped. */
12633 if (cpup
->flag
& LPFC_CPU_FIRST_IRQ
)
12636 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12637 * of the unassigned cpus to the next idx so that all
12638 * hdw queues are fully utilized.
12640 if (next_idx
< phba
->cfg_hdw_queue
) {
12641 cpup
->hdwq
= next_idx
;
12646 /* Not a First CPU and all hdw_queues are used. Reuse a
12647 * Hardware Queue for another CPU, so be smart about it
12648 * and pick one that has its IRQ/EQ mapped to the same phys_id
12649 * (CPU package) and core_id.
12651 new_cpu
= start_cpu
;
12652 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
12653 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
12654 if (new_cpup
->hdwq
!= LPFC_VECTOR_MAP_EMPTY
&&
12655 new_cpup
->phys_id
== cpup
->phys_id
&&
12656 new_cpup
->core_id
== cpup
->core_id
) {
12659 new_cpu
= lpfc_next_present_cpu(new_cpu
);
12662 /* If we can't match both phys_id and core_id,
12663 * settle for just a phys_id match.
12665 new_cpu
= start_cpu
;
12666 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
12667 new_cpup
= &phba
->sli4_hba
.cpu_map
[new_cpu
];
12668 if (new_cpup
->hdwq
!= LPFC_VECTOR_MAP_EMPTY
&&
12669 new_cpup
->phys_id
== cpup
->phys_id
)
12671 new_cpu
= lpfc_next_present_cpu(new_cpu
);
12674 /* Otherwise just round robin on cfg_hdw_queue */
12675 cpup
->hdwq
= idx
% phba
->cfg_hdw_queue
;
12679 /* We found an available entry, copy the IRQ info */
12680 start_cpu
= lpfc_next_present_cpu(new_cpu
);
12681 cpup
->hdwq
= new_cpup
->hdwq
;
12683 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12684 "3335 Set Affinity: CPU %d (phys %d core %d): "
12685 "hdwq %d eq %d flg x%x\n",
12686 cpu
, cpup
->phys_id
, cpup
->core_id
,
12687 cpup
->hdwq
, cpup
->eq
, cpup
->flag
);
12691 * Initialize the cpu_map slots for not-present cpus in case
12692 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12695 for_each_possible_cpu(cpu
) {
12696 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12697 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12698 c_stat
= per_cpu_ptr(phba
->sli4_hba
.c_stat
, cpu
);
12699 c_stat
->hdwq_no
= cpup
->hdwq
;
12701 if (cpup
->hdwq
!= LPFC_VECTOR_MAP_EMPTY
)
12704 cpup
->hdwq
= idx
++ % phba
->cfg_hdw_queue
;
12705 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12706 c_stat
->hdwq_no
= cpup
->hdwq
;
12708 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12709 "3340 Set Affinity: not present "
12710 "CPU %d hdwq %d\n",
12714 /* The cpu_map array will be used later during initialization
12715 * when EQ / CQ / WQs are allocated and configured.
12721 * lpfc_cpuhp_get_eq
12723 * @phba: pointer to lpfc hba data structure.
12724 * @cpu: cpu going offline
12725 * @eqlist: eq list to append to
12728 lpfc_cpuhp_get_eq(struct lpfc_hba
*phba
, unsigned int cpu
,
12729 struct list_head
*eqlist
)
12731 const struct cpumask
*maskp
;
12732 struct lpfc_queue
*eq
;
12733 struct cpumask
*tmp
;
12736 tmp
= kzalloc(cpumask_size(), GFP_KERNEL
);
12740 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
12741 maskp
= pci_irq_get_affinity(phba
->pcidev
, idx
);
12745 * if irq is not affinitized to the cpu going
12746 * then we don't need to poll the eq attached
12749 if (!cpumask_and(tmp
, maskp
, cpumask_of(cpu
)))
12751 /* get the cpus that are online and are affini-
12752 * tized to this irq vector. If the count is
12753 * more than 1 then cpuhp is not going to shut-
12754 * down this vector. Since this cpu has not
12755 * gone offline yet, we need >1.
12757 cpumask_and(tmp
, maskp
, cpu_online_mask
);
12758 if (cpumask_weight(tmp
) > 1)
12761 /* Now that we have an irq to shutdown, get the eq
12762 * mapped to this irq. Note: multiple hdwq's in
12763 * the software can share an eq, but eventually
12764 * only eq will be mapped to this vector
12766 eq
= phba
->sli4_hba
.hba_eq_hdl
[idx
].eq
;
12767 list_add(&eq
->_poll_list
, eqlist
);
12773 static void __lpfc_cpuhp_remove(struct lpfc_hba
*phba
)
12775 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
12778 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state
,
12781 * unregistering the instance doesn't stop the polling
12782 * timer. Wait for the poll timer to retire.
12785 del_timer_sync(&phba
->cpuhp_poll_timer
);
12788 static void lpfc_cpuhp_remove(struct lpfc_hba
*phba
)
12791 test_bit(FC_OFFLINE_MODE
, &phba
->pport
->fc_flag
))
12794 __lpfc_cpuhp_remove(phba
);
12797 static void lpfc_cpuhp_add(struct lpfc_hba
*phba
)
12799 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
12804 if (!list_empty(&phba
->poll_list
))
12805 mod_timer(&phba
->cpuhp_poll_timer
,
12806 jiffies
+ msecs_to_jiffies(LPFC_POLL_HB
));
12810 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state
,
12814 static int __lpfc_cpuhp_checks(struct lpfc_hba
*phba
, int *retval
)
12816 if (test_bit(FC_UNLOADING
, &phba
->pport
->load_flag
)) {
12821 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
12826 /* proceed with the hotplug */
12831 * lpfc_irq_set_aff - set IRQ affinity
12832 * @eqhdl: EQ handle
12833 * @cpu: cpu to set affinity
12837 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl
*eqhdl
, unsigned int cpu
)
12839 cpumask_clear(&eqhdl
->aff_mask
);
12840 cpumask_set_cpu(cpu
, &eqhdl
->aff_mask
);
12841 irq_set_status_flags(eqhdl
->irq
, IRQ_NO_BALANCING
);
12842 irq_set_affinity(eqhdl
->irq
, &eqhdl
->aff_mask
);
12846 * lpfc_irq_clear_aff - clear IRQ affinity
12847 * @eqhdl: EQ handle
12851 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl
*eqhdl
)
12853 cpumask_clear(&eqhdl
->aff_mask
);
12854 irq_clear_status_flags(eqhdl
->irq
, IRQ_NO_BALANCING
);
12858 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12859 * @phba: pointer to HBA context object.
12860 * @cpu: cpu going offline/online
12861 * @offline: true, cpu is going offline. false, cpu is coming online.
12863 * If cpu is going offline, we'll try our best effort to find the next
12864 * online cpu on the phba's original_mask and migrate all offlining IRQ
12867 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12869 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12870 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12874 lpfc_irq_rebalance(struct lpfc_hba
*phba
, unsigned int cpu
, bool offline
)
12876 struct lpfc_vector_map_info
*cpup
;
12877 struct cpumask
*aff_mask
;
12878 unsigned int cpu_select
, cpu_next
, idx
;
12879 const struct cpumask
*orig_mask
;
12881 if (phba
->irq_chann_mode
== NORMAL_MODE
)
12884 orig_mask
= &phba
->sli4_hba
.irq_aff_mask
;
12886 if (!cpumask_test_cpu(cpu
, orig_mask
))
12889 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
12891 if (!(cpup
->flag
& LPFC_CPU_FIRST_IRQ
))
12895 /* Find next online CPU on original mask */
12896 cpu_next
= cpumask_next_wrap(cpu
, orig_mask
, cpu
, true);
12897 cpu_select
= lpfc_next_online_cpu(orig_mask
, cpu_next
);
12899 /* Found a valid CPU */
12900 if ((cpu_select
< nr_cpu_ids
) && (cpu_select
!= cpu
)) {
12901 /* Go through each eqhdl and ensure offlining
12902 * cpu aff_mask is migrated
12904 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
12905 aff_mask
= lpfc_get_aff_mask(idx
);
12907 /* Migrate affinity */
12908 if (cpumask_test_cpu(cpu
, aff_mask
))
12909 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx
),
12913 /* Rely on irqbalance if no online CPUs left on NUMA */
12914 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++)
12915 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx
));
12918 /* Migrate affinity back to this CPU */
12919 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup
->eq
), cpu
);
12923 static int lpfc_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
12925 struct lpfc_hba
*phba
= hlist_entry_safe(node
, struct lpfc_hba
, cpuhp
);
12926 struct lpfc_queue
*eq
, *next
;
12931 WARN_ONCE(!phba
, "cpu: %u. phba:NULL", raw_smp_processor_id());
12935 if (__lpfc_cpuhp_checks(phba
, &retval
))
12938 lpfc_irq_rebalance(phba
, cpu
, true);
12940 retval
= lpfc_cpuhp_get_eq(phba
, cpu
, &eqlist
);
12944 /* start polling on these eq's */
12945 list_for_each_entry_safe(eq
, next
, &eqlist
, _poll_list
) {
12946 list_del_init(&eq
->_poll_list
);
12947 lpfc_sli4_start_polling(eq
);
12953 static int lpfc_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
12955 struct lpfc_hba
*phba
= hlist_entry_safe(node
, struct lpfc_hba
, cpuhp
);
12956 struct lpfc_queue
*eq
, *next
;
12961 WARN_ONCE(!phba
, "cpu: %u. phba:NULL", raw_smp_processor_id());
12965 if (__lpfc_cpuhp_checks(phba
, &retval
))
12968 lpfc_irq_rebalance(phba
, cpu
, false);
12970 list_for_each_entry_safe(eq
, next
, &phba
->poll_list
, _poll_list
) {
12971 n
= lpfc_find_cpu_handle(phba
, eq
->hdwq
, LPFC_FIND_BY_HDWQ
);
12973 lpfc_sli4_stop_polling(eq
);
12980 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12981 * @phba: pointer to lpfc hba data structure.
12983 * This routine is invoked to enable the MSI-X interrupt vectors to device
12984 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12985 * to cpus on the system.
12987 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12988 * the number of cpus on the same numa node as this adapter. The vectors are
12989 * allocated without requesting OS affinity mapping. A vector will be
12990 * allocated and assigned to each online and offline cpu. If the cpu is
12991 * online, then affinity will be set to that cpu. If the cpu is offline, then
12992 * affinity will be set to the nearest peer cpu within the numa node that is
12993 * online. If there are no online cpus within the numa node, affinity is not
12994 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12995 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12998 * If numa mode is not enabled and there is more than 1 vector allocated, then
12999 * the driver relies on the managed irq interface where the OS assigns vector to
13000 * cpu affinity. The driver will then use that affinity mapping to setup its
13001 * cpu mapping table.
13005 * other values - error
13008 lpfc_sli4_enable_msix(struct lpfc_hba
*phba
)
13010 int vectors
, rc
, index
;
13012 const struct cpumask
*aff_mask
= NULL
;
13013 unsigned int cpu
= 0, cpu_cnt
= 0, cpu_select
= nr_cpu_ids
;
13014 struct lpfc_vector_map_info
*cpup
;
13015 struct lpfc_hba_eq_hdl
*eqhdl
;
13016 const struct cpumask
*maskp
;
13017 unsigned int flags
= PCI_IRQ_MSIX
;
13019 /* Set up MSI-X multi-message vectors */
13020 vectors
= phba
->cfg_irq_chann
;
13022 if (phba
->irq_chann_mode
!= NORMAL_MODE
)
13023 aff_mask
= &phba
->sli4_hba
.irq_aff_mask
;
13026 cpu_cnt
= cpumask_weight(aff_mask
);
13027 vectors
= min(phba
->cfg_irq_chann
, cpu_cnt
);
13029 /* cpu: iterates over aff_mask including offline or online
13030 * cpu_select: iterates over online aff_mask to set affinity
13032 cpu
= cpumask_first(aff_mask
);
13033 cpu_select
= lpfc_next_online_cpu(aff_mask
, cpu
);
13035 flags
|= PCI_IRQ_AFFINITY
;
13038 rc
= pci_alloc_irq_vectors(phba
->pcidev
, 1, vectors
, flags
);
13040 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13041 "0484 PCI enable MSI-X failed (%d)\n", rc
);
13046 /* Assign MSI-X vectors to interrupt handlers */
13047 for (index
= 0; index
< vectors
; index
++) {
13048 eqhdl
= lpfc_get_eq_hdl(index
);
13049 name
= eqhdl
->handler_name
;
13050 memset(name
, 0, LPFC_SLI4_HANDLER_NAME_SZ
);
13051 snprintf(name
, LPFC_SLI4_HANDLER_NAME_SZ
,
13052 LPFC_DRIVER_HANDLER_NAME
"%d", index
);
13054 eqhdl
->idx
= index
;
13055 rc
= pci_irq_vector(phba
->pcidev
, index
);
13057 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
13058 "0489 MSI-X fast-path (%d) "
13059 "pci_irq_vec failed (%d)\n", index
, rc
);
13064 rc
= request_threaded_irq(eqhdl
->irq
,
13065 &lpfc_sli4_hba_intr_handler
,
13066 &lpfc_sli4_hba_intr_handler_th
,
13069 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
13070 "0486 MSI-X fast-path (%d) "
13071 "request_irq failed (%d)\n", index
, rc
);
13076 /* If found a neighboring online cpu, set affinity */
13077 if (cpu_select
< nr_cpu_ids
)
13078 lpfc_irq_set_aff(eqhdl
, cpu_select
);
13080 /* Assign EQ to cpu_map */
13081 lpfc_assign_eq_map_info(phba
, index
,
13082 LPFC_CPU_FIRST_IRQ
,
13085 /* Iterate to next offline or online cpu in aff_mask */
13086 cpu
= cpumask_next(cpu
, aff_mask
);
13088 /* Find next online cpu in aff_mask to set affinity */
13089 cpu_select
= lpfc_next_online_cpu(aff_mask
, cpu
);
13090 } else if (vectors
== 1) {
13091 cpu
= cpumask_first(cpu_present_mask
);
13092 lpfc_assign_eq_map_info(phba
, index
, LPFC_CPU_FIRST_IRQ
,
13095 maskp
= pci_irq_get_affinity(phba
->pcidev
, index
);
13097 /* Loop through all CPUs associated with vector index */
13098 for_each_cpu_and(cpu
, maskp
, cpu_present_mask
) {
13099 cpup
= &phba
->sli4_hba
.cpu_map
[cpu
];
13101 /* If this is the first CPU thats assigned to
13102 * this vector, set LPFC_CPU_FIRST_IRQ.
13104 * With certain platforms its possible that irq
13105 * vectors are affinitized to all the cpu's.
13106 * This can result in each cpu_map.eq to be set
13107 * to the last vector, resulting in overwrite
13108 * of all the previous cpu_map.eq. Ensure that
13109 * each vector receives a place in cpu_map.
13110 * Later call to lpfc_cpu_affinity_check will
13111 * ensure we are nicely balanced out.
13113 if (cpup
->eq
!= LPFC_VECTOR_MAP_EMPTY
)
13115 lpfc_assign_eq_map_info(phba
, index
,
13116 LPFC_CPU_FIRST_IRQ
,
13123 if (vectors
!= phba
->cfg_irq_chann
) {
13124 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13125 "3238 Reducing IO channels to match number of "
13126 "MSI-X vectors, requested %d got %d\n",
13127 phba
->cfg_irq_chann
, vectors
);
13128 if (phba
->cfg_irq_chann
> vectors
)
13129 phba
->cfg_irq_chann
= vectors
;
13135 /* free the irq already requested */
13136 for (--index
; index
>= 0; index
--) {
13137 eqhdl
= lpfc_get_eq_hdl(index
);
13138 lpfc_irq_clear_aff(eqhdl
);
13139 free_irq(eqhdl
->irq
, eqhdl
);
13142 /* Unconfigure MSI-X capability structure */
13143 pci_free_irq_vectors(phba
->pcidev
);
13150 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13151 * @phba: pointer to lpfc hba data structure.
13153 * This routine is invoked to enable the MSI interrupt mode to device with
13154 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13155 * called to enable the MSI vector. The device driver is responsible for
13156 * calling the request_irq() to register MSI vector with a interrupt the
13157 * handler, which is done in this function.
13161 * other values - error
13164 lpfc_sli4_enable_msi(struct lpfc_hba
*phba
)
13168 struct lpfc_hba_eq_hdl
*eqhdl
;
13170 rc
= pci_alloc_irq_vectors(phba
->pcidev
, 1, 1,
13171 PCI_IRQ_MSI
| PCI_IRQ_AFFINITY
);
13173 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13174 "0487 PCI enable MSI mode success.\n");
13176 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13177 "0488 PCI enable MSI mode failed (%d)\n", rc
);
13178 return rc
? rc
: -1;
13181 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
13182 0, LPFC_DRIVER_NAME
, phba
);
13184 pci_free_irq_vectors(phba
->pcidev
);
13185 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
13186 "0490 MSI request_irq failed (%d)\n", rc
);
13190 eqhdl
= lpfc_get_eq_hdl(0);
13191 rc
= pci_irq_vector(phba
->pcidev
, 0);
13193 pci_free_irq_vectors(phba
->pcidev
);
13194 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
13195 "0496 MSI pci_irq_vec failed (%d)\n", rc
);
13200 cpu
= cpumask_first(cpu_present_mask
);
13201 lpfc_assign_eq_map_info(phba
, 0, LPFC_CPU_FIRST_IRQ
, cpu
);
13203 for (index
= 0; index
< phba
->cfg_irq_chann
; index
++) {
13204 eqhdl
= lpfc_get_eq_hdl(index
);
13205 eqhdl
->idx
= index
;
13212 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13213 * @phba: pointer to lpfc hba data structure.
13214 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13216 * This routine is invoked to enable device interrupt and associate driver's
13217 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13218 * interface spec. Depends on the interrupt mode configured to the driver,
13219 * the driver will try to fallback from the configured interrupt mode to an
13220 * interrupt mode which is supported by the platform, kernel, and device in
13222 * MSI-X -> MSI -> IRQ.
13225 * Interrupt mode (2, 1, 0) - successful
13226 * LPFC_INTR_ERROR - error
13229 lpfc_sli4_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
13231 uint32_t intr_mode
= LPFC_INTR_ERROR
;
13234 if (cfg_mode
== 2) {
13235 /* Preparation before conf_msi mbox cmd */
13238 /* Now, try to enable MSI-X interrupt mode */
13239 retval
= lpfc_sli4_enable_msix(phba
);
13241 /* Indicate initialization to MSI-X mode */
13242 phba
->intr_type
= MSIX
;
13248 /* Fallback to MSI if MSI-X initialization failed */
13249 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
13250 retval
= lpfc_sli4_enable_msi(phba
);
13252 /* Indicate initialization to MSI mode */
13253 phba
->intr_type
= MSI
;
13258 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13259 if (phba
->intr_type
== NONE
) {
13260 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
13261 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
13263 struct lpfc_hba_eq_hdl
*eqhdl
;
13266 /* Indicate initialization to INTx mode */
13267 phba
->intr_type
= INTx
;
13270 eqhdl
= lpfc_get_eq_hdl(0);
13271 retval
= pci_irq_vector(phba
->pcidev
, 0);
13273 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
13274 "0502 INTR pci_irq_vec failed (%d)\n",
13276 return LPFC_INTR_ERROR
;
13278 eqhdl
->irq
= retval
;
13280 cpu
= cpumask_first(cpu_present_mask
);
13281 lpfc_assign_eq_map_info(phba
, 0, LPFC_CPU_FIRST_IRQ
,
13283 for (idx
= 0; idx
< phba
->cfg_irq_chann
; idx
++) {
13284 eqhdl
= lpfc_get_eq_hdl(idx
);
13293 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13294 * @phba: pointer to lpfc hba data structure.
13296 * This routine is invoked to disable device interrupt and disassociate
13297 * the driver's interrupt handler(s) from interrupt vector(s) to device
13298 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13299 * will release the interrupt vector(s) for the message signaled interrupt.
13302 lpfc_sli4_disable_intr(struct lpfc_hba
*phba
)
13304 /* Disable the currently initialized interrupt mode */
13305 if (phba
->intr_type
== MSIX
) {
13307 struct lpfc_hba_eq_hdl
*eqhdl
;
13309 /* Free up MSI-X multi-message vectors */
13310 for (index
= 0; index
< phba
->cfg_irq_chann
; index
++) {
13311 eqhdl
= lpfc_get_eq_hdl(index
);
13312 lpfc_irq_clear_aff(eqhdl
);
13313 free_irq(eqhdl
->irq
, eqhdl
);
13316 free_irq(phba
->pcidev
->irq
, phba
);
13319 pci_free_irq_vectors(phba
->pcidev
);
13321 /* Reset interrupt management states */
13322 phba
->intr_type
= NONE
;
13323 phba
->sli
.slistat
.sli_intr
= 0;
13327 * lpfc_unset_hba - Unset SLI3 hba device initialization
13328 * @phba: pointer to lpfc hba data structure.
13330 * This routine is invoked to unset the HBA device initialization steps to
13331 * a device with SLI-3 interface spec.
13334 lpfc_unset_hba(struct lpfc_hba
*phba
)
13336 set_bit(FC_UNLOADING
, &phba
->pport
->load_flag
);
13338 kfree(phba
->vpi_bmask
);
13339 kfree(phba
->vpi_ids
);
13341 lpfc_stop_hba_timers(phba
);
13343 phba
->pport
->work_port_events
= 0;
13345 lpfc_sli_hba_down(phba
);
13347 lpfc_sli_brdrestart(phba
);
13349 lpfc_sli_disable_intr(phba
);
13355 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13356 * @phba: Pointer to HBA context object.
13358 * This function is called in the SLI4 code path to wait for completion
13359 * of device's XRIs exchange busy. It will check the XRI exchange busy
13360 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13361 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13362 * I/Os every 30 seconds, log error message, and wait forever. Only when
13363 * all XRI exchange busy complete, the driver unload shall proceed with
13364 * invoking the function reset ioctl mailbox command to the CNA and the
13365 * the rest of the driver unload resource release.
13368 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba
*phba
)
13370 struct lpfc_sli4_hdw_queue
*qp
;
13373 int io_xri_cmpl
= 1;
13374 int nvmet_xri_cmpl
= 1;
13375 int els_xri_cmpl
= list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
13377 /* Driver just aborted IOs during the hba_unset process. Pause
13378 * here to give the HBA time to complete the IO and get entries
13379 * into the abts lists.
13381 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1
* 5);
13383 /* Wait for NVME pending IO to flush back to transport. */
13384 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
13385 lpfc_nvme_wait_for_io_drain(phba
);
13388 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
13389 qp
= &phba
->sli4_hba
.hdwq
[idx
];
13390 io_xri_cmpl
= list_empty(&qp
->lpfc_abts_io_buf_list
);
13391 if (!io_xri_cmpl
) /* if list is NOT empty */
13397 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
13399 list_empty(&phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
13402 while (!els_xri_cmpl
|| !io_xri_cmpl
|| !nvmet_xri_cmpl
) {
13403 if (wait_time
> LPFC_XRI_EXCH_BUSY_WAIT_TMO
) {
13404 if (!nvmet_xri_cmpl
)
13405 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13406 "6424 NVMET XRI exchange busy "
13407 "wait time: %d seconds.\n",
13410 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13411 "6100 IO XRI exchange busy "
13412 "wait time: %d seconds.\n",
13415 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13416 "2878 ELS XRI exchange busy "
13417 "wait time: %d seconds.\n",
13419 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2
);
13420 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T2
;
13422 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1
);
13423 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T1
;
13427 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
13428 qp
= &phba
->sli4_hba
.hdwq
[idx
];
13429 io_xri_cmpl
= list_empty(
13430 &qp
->lpfc_abts_io_buf_list
);
13431 if (!io_xri_cmpl
) /* if list is NOT empty */
13437 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
13438 nvmet_xri_cmpl
= list_empty(
13439 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
13442 list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
13448 * lpfc_sli4_hba_unset - Unset the fcoe hba
13449 * @phba: Pointer to HBA context object.
13451 * This function is called in the SLI4 code path to reset the HBA's FCoE
13452 * function. The caller is not required to hold any lock. This routine
13453 * issues PCI function reset mailbox command to reset the FCoE function.
13454 * At the end of the function, it calls lpfc_hba_down_post function to
13455 * free any pending commands.
13458 lpfc_sli4_hba_unset(struct lpfc_hba
*phba
)
13461 LPFC_MBOXQ_t
*mboxq
;
13462 struct pci_dev
*pdev
= phba
->pcidev
;
13464 lpfc_stop_hba_timers(phba
);
13465 hrtimer_cancel(&phba
->cmf_stats_timer
);
13466 hrtimer_cancel(&phba
->cmf_timer
);
13469 phba
->sli4_hba
.intr_enable
= 0;
13472 * Gracefully wait out the potential current outstanding asynchronous
13476 /* First, block any pending async mailbox command from posted */
13477 spin_lock_irq(&phba
->hbalock
);
13478 phba
->sli
.sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
13479 spin_unlock_irq(&phba
->hbalock
);
13480 /* Now, trying to wait it out if we can */
13481 while (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
13483 if (++wait_cnt
> LPFC_ACTIVE_MBOX_WAIT_CNT
)
13486 /* Forcefully release the outstanding mailbox command if timed out */
13487 if (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
13488 spin_lock_irq(&phba
->hbalock
);
13489 mboxq
= phba
->sli
.mbox_active
;
13490 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
13491 __lpfc_mbox_cmpl_put(phba
, mboxq
);
13492 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
13493 phba
->sli
.mbox_active
= NULL
;
13494 spin_unlock_irq(&phba
->hbalock
);
13497 /* Abort all iocbs associated with the hba */
13498 lpfc_sli_hba_iocb_abort(phba
);
13500 if (!pci_channel_offline(phba
->pcidev
))
13501 /* Wait for completion of device XRI exchange busy */
13502 lpfc_sli4_xri_exchange_busy_wait(phba
);
13504 /* per-phba callback de-registration for hotplug event */
13506 lpfc_cpuhp_remove(phba
);
13508 /* Disable PCI subsystem interrupt */
13509 lpfc_sli4_disable_intr(phba
);
13511 /* Disable SR-IOV if enabled */
13512 if (phba
->cfg_sriov_nr_virtfn
)
13513 pci_disable_sriov(pdev
);
13515 /* Stop kthread signal shall trigger work_done one more time */
13516 kthread_stop(phba
->worker_thread
);
13518 /* Disable FW logging to host memory */
13519 lpfc_ras_stop_fwlog(phba
);
13521 /* Reset SLI4 HBA FCoE function */
13522 lpfc_pci_function_reset(phba
);
13524 /* release all queue allocated resources. */
13525 lpfc_sli4_queue_destroy(phba
);
13527 /* Free RAS DMA memory */
13528 if (phba
->ras_fwlog
.ras_enabled
)
13529 lpfc_sli4_ras_dma_free(phba
);
13531 /* Stop the SLI4 device port */
13533 phba
->pport
->work_port_events
= 0;
13537 lpfc_cgn_crc32(uint32_t crc
, u8 byte
)
13542 for (bit
= 0; bit
< 8; bit
++) {
13543 msb
= (crc
>> 31) & 1;
13546 if (msb
^ (byte
& 1)) {
13547 crc
^= LPFC_CGN_CRC32_MAGIC_NUMBER
;
13556 lpfc_cgn_reverse_bits(uint32_t wd
)
13558 uint32_t result
= 0;
13561 for (i
= 0; i
< 32; i
++) {
13563 result
|= (1 & (wd
>> i
));
13569 * The routine corresponds with the algorithm the HBA firmware
13570 * uses to validate the data integrity.
13573 lpfc_cgn_calc_crc32(void *ptr
, uint32_t byteLen
, uint32_t crc
)
13577 uint8_t *data
= (uint8_t *)ptr
;
13579 for (i
= 0; i
< byteLen
; ++i
)
13580 crc
= lpfc_cgn_crc32(crc
, data
[i
]);
13582 result
= ~lpfc_cgn_reverse_bits(crc
);
13587 lpfc_init_congestion_buf(struct lpfc_hba
*phba
)
13589 struct lpfc_cgn_info
*cp
;
13593 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
13594 "6235 INIT Congestion Buffer %p\n", phba
->cgn_i
);
13598 cp
= (struct lpfc_cgn_info
*)phba
->cgn_i
->virt
;
13600 atomic_set(&phba
->cgn_fabric_warn_cnt
, 0);
13601 atomic_set(&phba
->cgn_fabric_alarm_cnt
, 0);
13602 atomic_set(&phba
->cgn_sync_alarm_cnt
, 0);
13603 atomic_set(&phba
->cgn_sync_warn_cnt
, 0);
13605 atomic_set(&phba
->cgn_driver_evt_cnt
, 0);
13606 atomic_set(&phba
->cgn_latency_evt_cnt
, 0);
13607 atomic64_set(&phba
->cgn_latency_evt
, 0);
13608 phba
->cgn_evt_minute
= 0;
13610 memset(cp
, 0xff, offsetof(struct lpfc_cgn_info
, cgn_stat
));
13611 cp
->cgn_info_size
= cpu_to_le16(LPFC_CGN_INFO_SZ
);
13612 cp
->cgn_info_version
= LPFC_CGN_INFO_V4
;
13614 /* cgn parameters */
13615 cp
->cgn_info_mode
= phba
->cgn_p
.cgn_param_mode
;
13616 cp
->cgn_info_level0
= phba
->cgn_p
.cgn_param_level0
;
13617 cp
->cgn_info_level1
= phba
->cgn_p
.cgn_param_level1
;
13618 cp
->cgn_info_level2
= phba
->cgn_p
.cgn_param_level2
;
13620 lpfc_cgn_update_tstamp(phba
, &cp
->base_time
);
13622 /* Fill in default LUN qdepth */
13624 size
= (uint16_t)(phba
->pport
->cfg_lun_queue_depth
);
13625 cp
->cgn_lunq
= cpu_to_le16(size
);
13628 /* last used Index initialized to 0xff already */
13630 cp
->cgn_warn_freq
= cpu_to_le16(LPFC_FPIN_INIT_FREQ
);
13631 cp
->cgn_alarm_freq
= cpu_to_le16(LPFC_FPIN_INIT_FREQ
);
13632 crc
= lpfc_cgn_calc_crc32(cp
, LPFC_CGN_INFO_SZ
, LPFC_CGN_CRC32_SEED
);
13633 cp
->cgn_info_crc
= cpu_to_le32(crc
);
13635 phba
->cgn_evt_timestamp
= jiffies
+
13636 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN
);
13640 lpfc_init_congestion_stat(struct lpfc_hba
*phba
)
13642 struct lpfc_cgn_info
*cp
;
13645 lpfc_printf_log(phba
, KERN_INFO
, LOG_CGN_MGMT
,
13646 "6236 INIT Congestion Stat %p\n", phba
->cgn_i
);
13651 cp
= (struct lpfc_cgn_info
*)phba
->cgn_i
->virt
;
13652 memset(&cp
->cgn_stat
, 0, sizeof(cp
->cgn_stat
));
13654 lpfc_cgn_update_tstamp(phba
, &cp
->stat_start
);
13655 crc
= lpfc_cgn_calc_crc32(cp
, LPFC_CGN_INFO_SZ
, LPFC_CGN_CRC32_SEED
);
13656 cp
->cgn_info_crc
= cpu_to_le32(crc
);
13660 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13661 * @phba: Pointer to hba context object.
13662 * @reg: flag to determine register or unregister.
13665 __lpfc_reg_congestion_buf(struct lpfc_hba
*phba
, int reg
)
13667 struct lpfc_mbx_reg_congestion_buf
*reg_congestion_buf
;
13668 union lpfc_sli4_cfg_shdr
*shdr
;
13669 uint32_t shdr_status
, shdr_add_status
;
13670 LPFC_MBOXQ_t
*mboxq
;
13676 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13678 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
13679 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13680 "HBA state x%x reg %d\n",
13681 phba
->pport
->port_state
, reg
);
13685 length
= (sizeof(struct lpfc_mbx_reg_congestion_buf
) -
13686 sizeof(struct lpfc_sli4_cfg_mhdr
));
13687 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13688 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF
, length
,
13689 LPFC_SLI4_MBX_EMBED
);
13690 reg_congestion_buf
= &mboxq
->u
.mqe
.un
.reg_congestion_buf
;
13691 bf_set(lpfc_mbx_reg_cgn_buf_type
, reg_congestion_buf
, 1);
13693 bf_set(lpfc_mbx_reg_cgn_buf_cnt
, reg_congestion_buf
, 1);
13695 bf_set(lpfc_mbx_reg_cgn_buf_cnt
, reg_congestion_buf
, 0);
13696 reg_congestion_buf
->length
= sizeof(struct lpfc_cgn_info
);
13697 reg_congestion_buf
->addr_lo
=
13698 putPaddrLow(phba
->cgn_i
->phys
);
13699 reg_congestion_buf
->addr_hi
=
13700 putPaddrHigh(phba
->cgn_i
->phys
);
13702 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
13703 shdr
= (union lpfc_sli4_cfg_shdr
*)
13704 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
13705 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13706 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
13708 mempool_free(mboxq
, phba
->mbox_mem_pool
);
13709 if (shdr_status
|| shdr_add_status
|| rc
) {
13710 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13711 "2642 REG_CONGESTION_BUF mailbox "
13712 "failed with status x%x add_status x%x,"
13713 " mbx status x%x reg %d\n",
13714 shdr_status
, shdr_add_status
, rc
, reg
);
13721 lpfc_unreg_congestion_buf(struct lpfc_hba
*phba
)
13723 lpfc_cmf_stop(phba
);
13724 return __lpfc_reg_congestion_buf(phba
, 0);
13728 lpfc_reg_congestion_buf(struct lpfc_hba
*phba
)
13730 return __lpfc_reg_congestion_buf(phba
, 1);
13734 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13735 * @phba: Pointer to HBA context object.
13736 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13738 * This function is called in the SLI4 code path to read the port's
13739 * sli4 capabilities.
13741 * This function may be be called from any context that can block-wait
13742 * for the completion. The expectation is that this routine is called
13743 * typically from probe_one or from the online routine.
13746 lpfc_get_sli4_parameters(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
13749 struct lpfc_mqe
*mqe
= &mboxq
->u
.mqe
;
13750 struct lpfc_pc_sli4_params
*sli4_params
;
13753 bool exp_wqcq_pages
= true;
13754 struct lpfc_sli4_parameters
*mbx_sli4_parameters
;
13757 * By default, the driver assumes the SLI4 port requires RPI
13758 * header postings. The SLI4_PARAM response will correct this
13761 phba
->sli4_hba
.rpi_hdrs_in_use
= 1;
13763 /* Read the port's SLI4 Config Parameters */
13764 length
= (sizeof(struct lpfc_mbx_get_sli4_parameters
) -
13765 sizeof(struct lpfc_sli4_cfg_mhdr
));
13766 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13767 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS
,
13768 length
, LPFC_SLI4_MBX_EMBED
);
13769 if (!phba
->sli4_hba
.intr_enable
)
13770 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
13772 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
13773 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
13777 sli4_params
= &phba
->sli4_hba
.pc_sli4_params
;
13778 mbx_sli4_parameters
= &mqe
->un
.get_sli4_parameters
.sli4_parameters
;
13779 sli4_params
->if_type
= bf_get(cfg_if_type
, mbx_sli4_parameters
);
13780 sli4_params
->sli_rev
= bf_get(cfg_sli_rev
, mbx_sli4_parameters
);
13781 sli4_params
->sli_family
= bf_get(cfg_sli_family
, mbx_sli4_parameters
);
13782 sli4_params
->featurelevel_1
= bf_get(cfg_sli_hint_1
,
13783 mbx_sli4_parameters
);
13784 sli4_params
->featurelevel_2
= bf_get(cfg_sli_hint_2
,
13785 mbx_sli4_parameters
);
13786 if (bf_get(cfg_phwq
, mbx_sli4_parameters
))
13787 phba
->sli3_options
|= LPFC_SLI4_PHWQ_ENABLED
;
13789 phba
->sli3_options
&= ~LPFC_SLI4_PHWQ_ENABLED
;
13790 sli4_params
->sge_supp_len
= mbx_sli4_parameters
->sge_supp_len
;
13791 sli4_params
->loopbk_scope
= bf_get(cfg_loopbk_scope
,
13792 mbx_sli4_parameters
);
13793 sli4_params
->oas_supported
= bf_get(cfg_oas
, mbx_sli4_parameters
);
13794 sli4_params
->cqv
= bf_get(cfg_cqv
, mbx_sli4_parameters
);
13795 sli4_params
->mqv
= bf_get(cfg_mqv
, mbx_sli4_parameters
);
13796 sli4_params
->wqv
= bf_get(cfg_wqv
, mbx_sli4_parameters
);
13797 sli4_params
->rqv
= bf_get(cfg_rqv
, mbx_sli4_parameters
);
13798 sli4_params
->eqav
= bf_get(cfg_eqav
, mbx_sli4_parameters
);
13799 sli4_params
->cqav
= bf_get(cfg_cqav
, mbx_sli4_parameters
);
13800 sli4_params
->wqsize
= bf_get(cfg_wqsize
, mbx_sli4_parameters
);
13801 sli4_params
->bv1s
= bf_get(cfg_bv1s
, mbx_sli4_parameters
);
13802 sli4_params
->pls
= bf_get(cfg_pvl
, mbx_sli4_parameters
);
13803 sli4_params
->sgl_pages_max
= bf_get(cfg_sgl_page_cnt
,
13804 mbx_sli4_parameters
);
13805 sli4_params
->wqpcnt
= bf_get(cfg_wqpcnt
, mbx_sli4_parameters
);
13806 sli4_params
->sgl_pp_align
= bf_get(cfg_sgl_pp_align
,
13807 mbx_sli4_parameters
);
13808 phba
->sli4_hba
.extents_in_use
= bf_get(cfg_ext
, mbx_sli4_parameters
);
13809 phba
->sli4_hba
.rpi_hdrs_in_use
= bf_get(cfg_hdrr
, mbx_sli4_parameters
);
13810 sli4_params
->mi_cap
= bf_get(cfg_mi_ver
, mbx_sli4_parameters
);
13812 /* Check for Extended Pre-Registered SGL support */
13813 phba
->cfg_xpsgl
= bf_get(cfg_xpsgl
, mbx_sli4_parameters
);
13815 /* Check for firmware nvme support */
13816 rc
= (bf_get(cfg_nvme
, mbx_sli4_parameters
) &&
13817 bf_get(cfg_xib
, mbx_sli4_parameters
));
13820 /* Save this to indicate the Firmware supports NVME */
13821 sli4_params
->nvme
= 1;
13823 /* Firmware NVME support, check driver FC4 NVME support */
13824 if (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
) {
13825 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_NVME
,
13826 "6133 Disabling NVME support: "
13827 "FC4 type not supported: x%x\n",
13828 phba
->cfg_enable_fc4_type
);
13832 /* No firmware NVME support, check driver FC4 NVME support */
13833 sli4_params
->nvme
= 0;
13834 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
13835 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_NVME
,
13836 "6101 Disabling NVME support: Not "
13837 "supported by firmware (%d %d) x%x\n",
13838 bf_get(cfg_nvme
, mbx_sli4_parameters
),
13839 bf_get(cfg_xib
, mbx_sli4_parameters
),
13840 phba
->cfg_enable_fc4_type
);
13842 phba
->nvmet_support
= 0;
13843 phba
->cfg_nvmet_mrq
= 0;
13844 phba
->cfg_nvme_seg_cnt
= 0;
13846 /* If no FC4 type support, move to just SCSI support */
13847 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
13849 phba
->cfg_enable_fc4_type
= LPFC_ENABLE_FCP
;
13853 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13854 * accommodate 512K and 1M IOs in a single nvme buf.
13856 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
13857 phba
->cfg_sg_seg_cnt
= LPFC_MAX_NVME_SEG_CNT
;
13859 /* Enable embedded Payload BDE if support is indicated */
13860 if (bf_get(cfg_pbde
, mbx_sli4_parameters
))
13861 phba
->cfg_enable_pbde
= 1;
13863 phba
->cfg_enable_pbde
= 0;
13866 * To support Suppress Response feature we must satisfy 3 conditions.
13867 * lpfc_suppress_rsp module parameter must be set (default).
13868 * In SLI4-Parameters Descriptor:
13869 * Extended Inline Buffers (XIB) must be supported.
13870 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13871 * (double negative).
13873 if (phba
->cfg_suppress_rsp
&& bf_get(cfg_xib
, mbx_sli4_parameters
) &&
13874 !(bf_get(cfg_nosr
, mbx_sli4_parameters
)))
13875 phba
->sli
.sli_flag
|= LPFC_SLI_SUPPRESS_RSP
;
13877 phba
->cfg_suppress_rsp
= 0;
13879 if (bf_get(cfg_eqdr
, mbx_sli4_parameters
))
13880 phba
->sli
.sli_flag
|= LPFC_SLI_USE_EQDR
;
13882 /* Make sure that sge_supp_len can be handled by the driver */
13883 if (sli4_params
->sge_supp_len
> LPFC_MAX_SGE_SIZE
)
13884 sli4_params
->sge_supp_len
= LPFC_MAX_SGE_SIZE
;
13886 dma_set_max_seg_size(&phba
->pcidev
->dev
, sli4_params
->sge_supp_len
);
13889 * Check whether the adapter supports an embedded copy of the
13890 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13891 * to use this option, 128-byte WQEs must be used.
13893 if (bf_get(cfg_ext_embed_cb
, mbx_sli4_parameters
))
13894 phba
->fcp_embed_io
= 1;
13896 phba
->fcp_embed_io
= 0;
13898 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_NVME
,
13899 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13900 bf_get(cfg_xib
, mbx_sli4_parameters
),
13901 phba
->cfg_enable_pbde
,
13902 phba
->fcp_embed_io
, sli4_params
->nvme
,
13903 phba
->cfg_nvme_embed_cmd
, phba
->cfg_suppress_rsp
);
13905 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
13906 LPFC_SLI_INTF_IF_TYPE_2
) &&
13907 (bf_get(lpfc_sli_intf_sli_family
, &phba
->sli4_hba
.sli_intf
) ==
13908 LPFC_SLI_INTF_FAMILY_LNCR_A0
))
13909 exp_wqcq_pages
= false;
13911 if ((bf_get(cfg_cqpsize
, mbx_sli4_parameters
) & LPFC_CQ_16K_PAGE_SZ
) &&
13912 (bf_get(cfg_wqpsize
, mbx_sli4_parameters
) & LPFC_WQ_16K_PAGE_SZ
) &&
13914 (sli4_params
->wqsize
& LPFC_WQ_SZ128_SUPPORT
))
13915 phba
->enab_exp_wqcq_pages
= 1;
13917 phba
->enab_exp_wqcq_pages
= 0;
13919 * Check if the SLI port supports MDS Diagnostics
13921 if (bf_get(cfg_mds_diags
, mbx_sli4_parameters
))
13922 phba
->mds_diags_support
= 1;
13924 phba
->mds_diags_support
= 0;
13927 * Check if the SLI port supports NSLER
13929 if (bf_get(cfg_nsler
, mbx_sli4_parameters
))
13938 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13939 * @pdev: pointer to PCI device
13940 * @pid: pointer to PCI device identifier
13942 * This routine is to be called to attach a device with SLI-3 interface spec
13943 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13944 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13945 * information of the device and driver to see if the driver state that it can
13946 * support this kind of device. If the match is successful, the driver core
13947 * invokes this routine. If this routine determines it can claim the HBA, it
13948 * does all the initialization that it needs to do to handle the HBA properly.
13951 * 0 - driver can claim the device
13952 * negative value - driver can not claim the device
13955 lpfc_pci_probe_one_s3(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
13957 struct lpfc_hba
*phba
;
13958 struct lpfc_vport
*vport
= NULL
;
13959 struct Scsi_Host
*shost
= NULL
;
13961 uint32_t cfg_mode
, intr_mode
;
13963 /* Allocate memory for HBA structure */
13964 phba
= lpfc_hba_alloc(pdev
);
13968 /* Perform generic PCI device enabling operation */
13969 error
= lpfc_enable_pci_dev(phba
);
13971 goto out_free_phba
;
13973 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13974 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_LP
);
13976 goto out_disable_pci_dev
;
13978 /* Set up SLI-3 specific device PCI memory space */
13979 error
= lpfc_sli_pci_mem_setup(phba
);
13981 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13982 "1402 Failed to set up pci memory space.\n");
13983 goto out_disable_pci_dev
;
13986 /* Set up SLI-3 specific device driver resources */
13987 error
= lpfc_sli_driver_resource_setup(phba
);
13989 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13990 "1404 Failed to set up driver resource.\n");
13991 goto out_unset_pci_mem_s3
;
13994 /* Initialize and populate the iocb list per host */
13996 error
= lpfc_init_iocb_list(phba
, LPFC_IOCB_LIST_CNT
);
13998 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13999 "1405 Failed to initialize iocb list.\n");
14000 goto out_unset_driver_resource_s3
;
14003 /* Set up common device driver resources */
14004 error
= lpfc_setup_driver_resource_phase2(phba
);
14006 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14007 "1406 Failed to set up driver resource.\n");
14008 goto out_free_iocb_list
;
14011 /* Get the default values for Model Name and Description */
14012 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
14014 /* Create SCSI host to the physical port */
14015 error
= lpfc_create_shost(phba
);
14017 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14018 "1407 Failed to create scsi host.\n");
14019 goto out_unset_driver_resource
;
14022 /* Configure sysfs attributes */
14023 vport
= phba
->pport
;
14024 error
= lpfc_alloc_sysfs_attr(vport
);
14026 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14027 "1476 Failed to allocate sysfs attr\n");
14028 goto out_destroy_shost
;
14031 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
14032 /* Now, trying to enable interrupt and bring up the device */
14033 cfg_mode
= phba
->cfg_use_msi
;
14035 /* Put device to a known state before enabling interrupt */
14036 lpfc_stop_port(phba
);
14037 /* Configure and enable interrupt */
14038 intr_mode
= lpfc_sli_enable_intr(phba
, cfg_mode
);
14039 if (intr_mode
== LPFC_INTR_ERROR
) {
14040 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14041 "0431 Failed to enable interrupt.\n");
14043 goto out_free_sysfs_attr
;
14045 /* SLI-3 HBA setup */
14046 if (lpfc_sli_hba_setup(phba
)) {
14047 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14048 "1477 Failed to set up hba\n");
14050 goto out_remove_device
;
14053 /* Wait 50ms for the interrupts of previous mailbox commands */
14055 /* Check active interrupts on message signaled interrupts */
14056 if (intr_mode
== 0 ||
14057 phba
->sli
.slistat
.sli_intr
> LPFC_MSIX_VECTORS
) {
14058 /* Log the current active interrupt mode */
14059 phba
->intr_mode
= intr_mode
;
14060 lpfc_log_intr_mode(phba
, intr_mode
);
14063 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
14064 "0447 Configure interrupt mode (%d) "
14065 "failed active interrupt test.\n",
14067 /* Disable the current interrupt mode */
14068 lpfc_sli_disable_intr(phba
);
14069 /* Try next level of interrupt mode */
14070 cfg_mode
= --intr_mode
;
14074 /* Perform post initialization setup */
14075 lpfc_post_init_setup(phba
);
14077 /* Check if there are static vports to be created. */
14078 lpfc_create_static_vport(phba
);
14083 lpfc_unset_hba(phba
);
14084 out_free_sysfs_attr
:
14085 lpfc_free_sysfs_attr(vport
);
14087 lpfc_destroy_shost(phba
);
14088 out_unset_driver_resource
:
14089 lpfc_unset_driver_resource_phase2(phba
);
14090 out_free_iocb_list
:
14091 lpfc_free_iocb_list(phba
);
14092 out_unset_driver_resource_s3
:
14093 lpfc_sli_driver_resource_unset(phba
);
14094 out_unset_pci_mem_s3
:
14095 lpfc_sli_pci_mem_unset(phba
);
14096 out_disable_pci_dev
:
14097 lpfc_disable_pci_dev(phba
);
14099 scsi_host_put(shost
);
14101 lpfc_hba_free(phba
);
14106 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14107 * @pdev: pointer to PCI device
14109 * This routine is to be called to disattach a device with SLI-3 interface
14110 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14111 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14112 * device to be removed from the PCI subsystem properly.
14115 lpfc_pci_remove_one_s3(struct pci_dev
*pdev
)
14117 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
14118 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
14119 struct lpfc_vport
**vports
;
14120 struct lpfc_hba
*phba
= vport
->phba
;
14123 set_bit(FC_UNLOADING
, &vport
->load_flag
);
14125 lpfc_free_sysfs_attr(vport
);
14127 /* Release all the vports against this physical port */
14128 vports
= lpfc_create_vport_work_array(phba
);
14129 if (vports
!= NULL
)
14130 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
14131 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
14133 fc_vport_terminate(vports
[i
]->fc_vport
);
14135 lpfc_destroy_vport_work_array(phba
, vports
);
14137 /* Remove FC host with the physical port */
14138 fc_remove_host(shost
);
14139 scsi_remove_host(shost
);
14141 /* Clean up all nodes, mailboxes and IOs. */
14142 lpfc_cleanup(vport
);
14145 * Bring down the SLI Layer. This step disable all interrupts,
14146 * clears the rings, discards all mailbox commands, and resets
14150 /* HBA interrupt will be disabled after this call */
14151 lpfc_sli_hba_down(phba
);
14152 /* Stop kthread signal shall trigger work_done one more time */
14153 kthread_stop(phba
->worker_thread
);
14154 /* Final cleanup of txcmplq and reset the HBA */
14155 lpfc_sli_brdrestart(phba
);
14157 kfree(phba
->vpi_bmask
);
14158 kfree(phba
->vpi_ids
);
14160 lpfc_stop_hba_timers(phba
);
14161 spin_lock_irq(&phba
->port_list_lock
);
14162 list_del_init(&vport
->listentry
);
14163 spin_unlock_irq(&phba
->port_list_lock
);
14165 lpfc_debugfs_terminate(vport
);
14167 /* Disable SR-IOV if enabled */
14168 if (phba
->cfg_sriov_nr_virtfn
)
14169 pci_disable_sriov(pdev
);
14171 /* Disable interrupt */
14172 lpfc_sli_disable_intr(phba
);
14174 scsi_host_put(shost
);
14177 * Call scsi_free before mem_free since scsi bufs are released to their
14178 * corresponding pools here.
14180 lpfc_scsi_free(phba
);
14181 lpfc_free_iocb_list(phba
);
14183 lpfc_mem_free_all(phba
);
14185 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
14186 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
14188 /* Free resources associated with SLI2 interface */
14189 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
14190 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
14192 /* unmap adapter SLIM and Control Registers */
14193 iounmap(phba
->ctrl_regs_memmap_p
);
14194 iounmap(phba
->slim_memmap_p
);
14196 lpfc_hba_free(phba
);
14198 pci_release_mem_regions(pdev
);
14199 pci_disable_device(pdev
);
14203 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14204 * @dev_d: pointer to device
14206 * This routine is to be called from the kernel's PCI subsystem to support
14207 * system Power Management (PM) to device with SLI-3 interface spec. When
14208 * PM invokes this method, it quiesces the device by stopping the driver's
14209 * worker thread for the device, turning off device's interrupt and DMA,
14210 * and bring the device offline. Note that as the driver implements the
14211 * minimum PM requirements to a power-aware driver's PM support for the
14212 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14213 * to the suspend() method call will be treated as SUSPEND and the driver will
14214 * fully reinitialize its device during resume() method call, the driver will
14215 * set device to PCI_D3hot state in PCI config space instead of setting it
14216 * according to the @msg provided by the PM.
14219 * 0 - driver suspended the device
14222 static int __maybe_unused
14223 lpfc_pci_suspend_one_s3(struct device
*dev_d
)
14225 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
14226 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
14228 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
14229 "0473 PCI device Power Management suspend.\n");
14231 /* Bring down the device */
14232 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
14233 lpfc_offline(phba
);
14234 kthread_stop(phba
->worker_thread
);
14236 /* Disable interrupt from device */
14237 lpfc_sli_disable_intr(phba
);
14243 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14244 * @dev_d: pointer to device
14246 * This routine is to be called from the kernel's PCI subsystem to support
14247 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14248 * invokes this method, it restores the device's PCI config space state and
14249 * fully reinitializes the device and brings it online. Note that as the
14250 * driver implements the minimum PM requirements to a power-aware driver's
14251 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14252 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14253 * driver will fully reinitialize its device during resume() method call,
14254 * the device will be set to PCI_D0 directly in PCI config space before
14255 * restoring the state.
14258 * 0 - driver suspended the device
14261 static int __maybe_unused
14262 lpfc_pci_resume_one_s3(struct device
*dev_d
)
14264 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
14265 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
14266 uint32_t intr_mode
;
14269 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
14270 "0452 PCI device Power Management resume.\n");
14272 /* Startup the kernel thread for this host adapter. */
14273 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
14274 "lpfc_worker_%d", phba
->brd_no
);
14275 if (IS_ERR(phba
->worker_thread
)) {
14276 error
= PTR_ERR(phba
->worker_thread
);
14277 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14278 "0434 PM resume failed to start worker "
14279 "thread: error=x%x.\n", error
);
14283 /* Init cpu_map array */
14284 lpfc_cpu_map_array_init(phba
);
14285 /* Init hba_eq_hdl array */
14286 lpfc_hba_eq_hdl_array_init(phba
);
14287 /* Configure and enable interrupt */
14288 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
14289 if (intr_mode
== LPFC_INTR_ERROR
) {
14290 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14291 "0430 PM resume Failed to enable interrupt\n");
14294 phba
->intr_mode
= intr_mode
;
14296 /* Restart HBA and bring it online */
14297 lpfc_sli_brdrestart(phba
);
14300 /* Log the current active interrupt mode */
14301 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
14307 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14308 * @phba: pointer to lpfc hba data structure.
14310 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14311 * aborts all the outstanding SCSI I/Os to the pci device.
14314 lpfc_sli_prep_dev_for_recover(struct lpfc_hba
*phba
)
14316 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14317 "2723 PCI channel I/O abort preparing for recovery\n");
14320 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14321 * and let the SCSI mid-layer to retry them to recover.
14323 lpfc_sli_abort_fcp_rings(phba
);
14327 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14328 * @phba: pointer to lpfc hba data structure.
14330 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14331 * disables the device interrupt and pci device, and aborts the internal FCP
14335 lpfc_sli_prep_dev_for_reset(struct lpfc_hba
*phba
)
14337 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14338 "2710 PCI channel disable preparing for reset\n");
14340 /* Block any management I/Os to the device */
14341 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
14343 /* Block all SCSI devices' I/Os on the host */
14344 lpfc_scsi_dev_block(phba
);
14346 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14347 lpfc_sli_flush_io_rings(phba
);
14349 /* stop all timers */
14350 lpfc_stop_hba_timers(phba
);
14352 /* Disable interrupt and pci device */
14353 lpfc_sli_disable_intr(phba
);
14354 pci_disable_device(phba
->pcidev
);
14358 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14359 * @phba: pointer to lpfc hba data structure.
14361 * This routine is called to prepare the SLI3 device for PCI slot permanently
14362 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14366 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
14368 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14369 "2711 PCI channel permanent disable for failure\n");
14370 /* Block all SCSI devices' I/Os on the host */
14371 lpfc_scsi_dev_block(phba
);
14372 lpfc_sli4_prep_dev_for_reset(phba
);
14374 /* stop all timers */
14375 lpfc_stop_hba_timers(phba
);
14377 /* Clean up all driver's outstanding SCSI I/Os */
14378 lpfc_sli_flush_io_rings(phba
);
14382 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14383 * @pdev: pointer to PCI device.
14384 * @state: the current PCI connection state.
14386 * This routine is called from the PCI subsystem for I/O error handling to
14387 * device with SLI-3 interface spec. This function is called by the PCI
14388 * subsystem after a PCI bus error affecting this device has been detected.
14389 * When this function is invoked, it will need to stop all the I/Os and
14390 * interrupt(s) to the device. Once that is done, it will return
14391 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14395 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14396 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14397 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14399 static pci_ers_result_t
14400 lpfc_io_error_detected_s3(struct pci_dev
*pdev
, pci_channel_state_t state
)
14402 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
14403 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
14406 case pci_channel_io_normal
:
14407 /* Non-fatal error, prepare for recovery */
14408 lpfc_sli_prep_dev_for_recover(phba
);
14409 return PCI_ERS_RESULT_CAN_RECOVER
;
14410 case pci_channel_io_frozen
:
14411 /* Fatal error, prepare for slot reset */
14412 lpfc_sli_prep_dev_for_reset(phba
);
14413 return PCI_ERS_RESULT_NEED_RESET
;
14414 case pci_channel_io_perm_failure
:
14415 /* Permanent failure, prepare for device down */
14416 lpfc_sli_prep_dev_for_perm_failure(phba
);
14417 return PCI_ERS_RESULT_DISCONNECT
;
14419 /* Unknown state, prepare and request slot reset */
14420 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14421 "0472 Unknown PCI error state: x%x\n", state
);
14422 lpfc_sli_prep_dev_for_reset(phba
);
14423 return PCI_ERS_RESULT_NEED_RESET
;
14428 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14429 * @pdev: pointer to PCI device.
14431 * This routine is called from the PCI subsystem for error handling to
14432 * device with SLI-3 interface spec. This is called after PCI bus has been
14433 * reset to restart the PCI card from scratch, as if from a cold-boot.
14434 * During the PCI subsystem error recovery, after driver returns
14435 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14436 * recovery and then call this routine before calling the .resume method
14437 * to recover the device. This function will initialize the HBA device,
14438 * enable the interrupt, but it will just put the HBA to offline state
14439 * without passing any I/O traffic.
14442 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14443 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14445 static pci_ers_result_t
14446 lpfc_io_slot_reset_s3(struct pci_dev
*pdev
)
14448 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
14449 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
14450 struct lpfc_sli
*psli
= &phba
->sli
;
14451 uint32_t intr_mode
;
14453 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
14454 if (pci_enable_device_mem(pdev
)) {
14455 printk(KERN_ERR
"lpfc: Cannot re-enable "
14456 "PCI device after reset.\n");
14457 return PCI_ERS_RESULT_DISCONNECT
;
14460 pci_restore_state(pdev
);
14463 * As the new kernel behavior of pci_restore_state() API call clears
14464 * device saved_state flag, need to save the restored state again.
14466 pci_save_state(pdev
);
14468 if (pdev
->is_busmaster
)
14469 pci_set_master(pdev
);
14471 spin_lock_irq(&phba
->hbalock
);
14472 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
14473 spin_unlock_irq(&phba
->hbalock
);
14475 /* Configure and enable interrupt */
14476 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
14477 if (intr_mode
== LPFC_INTR_ERROR
) {
14478 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14479 "0427 Cannot re-enable interrupt after "
14481 return PCI_ERS_RESULT_DISCONNECT
;
14483 phba
->intr_mode
= intr_mode
;
14485 /* Take device offline, it will perform cleanup */
14486 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
14487 lpfc_offline(phba
);
14488 lpfc_sli_brdrestart(phba
);
14490 /* Log the current active interrupt mode */
14491 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
14493 return PCI_ERS_RESULT_RECOVERED
;
14497 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14498 * @pdev: pointer to PCI device
14500 * This routine is called from the PCI subsystem for error handling to device
14501 * with SLI-3 interface spec. It is called when kernel error recovery tells
14502 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14503 * error recovery. After this call, traffic can start to flow from this device
14507 lpfc_io_resume_s3(struct pci_dev
*pdev
)
14509 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
14510 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
14512 /* Bring device online, it will be no-op for non-fatal error resume */
14517 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14518 * @phba: pointer to lpfc hba data structure.
14520 * returns the number of ELS/CT IOCBs to reserve
14523 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba
*phba
)
14525 int max_xri
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
14527 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
14528 if (max_xri
<= 100)
14530 else if (max_xri
<= 256)
14532 else if (max_xri
<= 512)
14534 else if (max_xri
<= 1024)
14536 else if (max_xri
<= 1536)
14538 else if (max_xri
<= 2048)
14547 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14548 * @phba: pointer to lpfc hba data structure.
14550 * returns the number of ELS/CT + NVMET IOCBs to reserve
14553 lpfc_sli4_get_iocb_cnt(struct lpfc_hba
*phba
)
14555 int max_xri
= lpfc_sli4_get_els_iocb_cnt(phba
);
14557 if (phba
->nvmet_support
)
14558 max_xri
+= LPFC_NVMET_BUF_POST
;
14564 lpfc_log_write_firmware_error(struct lpfc_hba
*phba
, uint32_t offset
,
14565 uint32_t magic_number
, uint32_t ftype
, uint32_t fid
, uint32_t fsize
,
14566 const struct firmware
*fw
)
14571 sli_family
= bf_get(lpfc_sli_intf_sli_family
, &phba
->sli4_hba
.sli_intf
);
14572 /* Three cases: (1) FW was not supported on the detected adapter.
14573 * (2) FW update has been locked out administratively.
14574 * (3) Some other error during FW update.
14575 * In each case, an unmaskable message is written to the console
14576 * for admin diagnosis.
14578 if (offset
== ADD_STATUS_FW_NOT_SUPPORTED
||
14579 (sli_family
== LPFC_SLI_INTF_FAMILY_G6
&&
14580 magic_number
!= MAGIC_NUMBER_G6
) ||
14581 (sli_family
== LPFC_SLI_INTF_FAMILY_G7
&&
14582 magic_number
!= MAGIC_NUMBER_G7
) ||
14583 (sli_family
== LPFC_SLI_INTF_FAMILY_G7P
&&
14584 magic_number
!= MAGIC_NUMBER_G7P
)) {
14585 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14586 "3030 This firmware version is not supported on"
14587 " this HBA model. Device:%x Magic:%x Type:%x "
14588 "ID:%x Size %d %zd\n",
14589 phba
->pcidev
->device
, magic_number
, ftype
, fid
,
14592 } else if (offset
== ADD_STATUS_FW_DOWNLOAD_HW_DISABLED
) {
14593 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14594 "3021 Firmware downloads have been prohibited "
14595 "by a system configuration setting on "
14596 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14598 phba
->pcidev
->device
, magic_number
, ftype
, fid
,
14602 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14603 "3022 FW Download failed. Add Status x%x "
14604 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14606 offset
, phba
->pcidev
->device
, magic_number
,
14607 ftype
, fid
, fsize
, fw
->size
);
14614 * lpfc_write_firmware - attempt to write a firmware image to the port
14615 * @fw: pointer to firmware image returned from request_firmware.
14616 * @context: pointer to firmware image returned from request_firmware.
14620 lpfc_write_firmware(const struct firmware
*fw
, void *context
)
14622 struct lpfc_hba
*phba
= (struct lpfc_hba
*)context
;
14623 char fwrev
[FW_REV_STR_SIZE
];
14624 struct lpfc_grp_hdr
*image
;
14625 struct list_head dma_buffer_list
;
14627 struct lpfc_dmabuf
*dmabuf
, *next
;
14628 uint32_t offset
= 0, temp_offset
= 0;
14629 uint32_t magic_number
, ftype
, fid
, fsize
;
14631 /* It can be null in no-wait mode, sanity check */
14636 image
= (struct lpfc_grp_hdr
*)fw
->data
;
14638 magic_number
= be32_to_cpu(image
->magic_number
);
14639 ftype
= bf_get_be32(lpfc_grp_hdr_file_type
, image
);
14640 fid
= bf_get_be32(lpfc_grp_hdr_id
, image
);
14641 fsize
= be32_to_cpu(image
->size
);
14643 INIT_LIST_HEAD(&dma_buffer_list
);
14644 lpfc_decode_firmware_rev(phba
, fwrev
, 1);
14645 if (strncmp(fwrev
, image
->revision
, strnlen(image
->revision
, 16))) {
14646 lpfc_log_msg(phba
, KERN_NOTICE
, LOG_INIT
| LOG_SLI
,
14647 "3023 Updating Firmware, Current Version:%s "
14648 "New Version:%s\n",
14649 fwrev
, image
->revision
);
14650 for (i
= 0; i
< LPFC_MBX_WR_CONFIG_MAX_BDE
; i
++) {
14651 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
),
14657 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
14661 if (!dmabuf
->virt
) {
14666 list_add_tail(&dmabuf
->list
, &dma_buffer_list
);
14668 while (offset
< fw
->size
) {
14669 temp_offset
= offset
;
14670 list_for_each_entry(dmabuf
, &dma_buffer_list
, list
) {
14671 if (temp_offset
+ SLI4_PAGE_SIZE
> fw
->size
) {
14672 memcpy(dmabuf
->virt
,
14673 fw
->data
+ temp_offset
,
14674 fw
->size
- temp_offset
);
14675 temp_offset
= fw
->size
;
14678 memcpy(dmabuf
->virt
, fw
->data
+ temp_offset
,
14680 temp_offset
+= SLI4_PAGE_SIZE
;
14682 rc
= lpfc_wr_object(phba
, &dma_buffer_list
,
14683 (fw
->size
- offset
), &offset
);
14685 rc
= lpfc_log_write_firmware_error(phba
, offset
,
14696 lpfc_log_msg(phba
, KERN_NOTICE
, LOG_INIT
| LOG_SLI
,
14697 "3029 Skipped Firmware update, Current "
14698 "Version:%s New Version:%s\n",
14699 fwrev
, image
->revision
);
14702 list_for_each_entry_safe(dmabuf
, next
, &dma_buffer_list
, list
) {
14703 list_del(&dmabuf
->list
);
14704 dma_free_coherent(&phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
14705 dmabuf
->virt
, dmabuf
->phys
);
14708 release_firmware(fw
);
14711 lpfc_log_msg(phba
, KERN_ERR
, LOG_INIT
| LOG_SLI
,
14712 "3062 Firmware update error, status %d.\n", rc
);
14714 lpfc_log_msg(phba
, KERN_NOTICE
, LOG_INIT
| LOG_SLI
,
14715 "3024 Firmware update success: size %d.\n", rc
);
14719 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14720 * @phba: pointer to lpfc hba data structure.
14721 * @fw_upgrade: which firmware to update.
14723 * This routine is called to perform Linux generic firmware upgrade on device
14724 * that supports such feature.
14727 lpfc_sli4_request_firmware_update(struct lpfc_hba
*phba
, uint8_t fw_upgrade
)
14729 char file_name
[ELX_FW_NAME_SIZE
] = {0};
14731 const struct firmware
*fw
;
14733 /* Only supported on SLI4 interface type 2 for now */
14734 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) <
14735 LPFC_SLI_INTF_IF_TYPE_2
)
14738 scnprintf(file_name
, sizeof(file_name
), "%s.grp", phba
->ModelName
);
14740 if (fw_upgrade
== INT_FW_UPGRADE
) {
14741 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_UEVENT
,
14742 file_name
, &phba
->pcidev
->dev
,
14743 GFP_KERNEL
, (void *)phba
,
14744 lpfc_write_firmware
);
14745 } else if (fw_upgrade
== RUN_FW_UPGRADE
) {
14746 ret
= request_firmware(&fw
, file_name
, &phba
->pcidev
->dev
);
14748 lpfc_write_firmware(fw
, (void *)phba
);
14757 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14758 * @pdev: pointer to PCI device
14759 * @pid: pointer to PCI device identifier
14761 * This routine is called from the kernel's PCI subsystem to device with
14762 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14763 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14764 * information of the device and driver to see if the driver state that it
14765 * can support this kind of device. If the match is successful, the driver
14766 * core invokes this routine. If this routine determines it can claim the HBA,
14767 * it does all the initialization that it needs to do to handle the HBA
14771 * 0 - driver can claim the device
14772 * negative value - driver can not claim the device
14775 lpfc_pci_probe_one_s4(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
14777 struct lpfc_hba
*phba
;
14778 struct lpfc_vport
*vport
= NULL
;
14779 struct Scsi_Host
*shost
= NULL
;
14781 uint32_t cfg_mode
, intr_mode
;
14783 /* Allocate memory for HBA structure */
14784 phba
= lpfc_hba_alloc(pdev
);
14788 INIT_LIST_HEAD(&phba
->poll_list
);
14790 /* Perform generic PCI device enabling operation */
14791 error
= lpfc_enable_pci_dev(phba
);
14793 goto out_free_phba
;
14795 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14796 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_OC
);
14798 goto out_disable_pci_dev
;
14800 /* Set up SLI-4 specific device PCI memory space */
14801 error
= lpfc_sli4_pci_mem_setup(phba
);
14803 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14804 "1410 Failed to set up pci memory space.\n");
14805 goto out_disable_pci_dev
;
14808 /* Set up SLI-4 Specific device driver resources */
14809 error
= lpfc_sli4_driver_resource_setup(phba
);
14811 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14812 "1412 Failed to set up driver resource.\n");
14813 goto out_unset_pci_mem_s4
;
14816 spin_lock_init(&phba
->rrq_list_lock
);
14817 INIT_LIST_HEAD(&phba
->active_rrq_list
);
14818 INIT_LIST_HEAD(&phba
->fcf
.fcf_pri_list
);
14820 /* Set up common device driver resources */
14821 error
= lpfc_setup_driver_resource_phase2(phba
);
14823 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14824 "1414 Failed to set up driver resource.\n");
14825 goto out_unset_driver_resource_s4
;
14828 /* Get the default values for Model Name and Description */
14829 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
14831 /* Now, trying to enable interrupt and bring up the device */
14832 cfg_mode
= phba
->cfg_use_msi
;
14834 /* Put device to a known state before enabling interrupt */
14835 phba
->pport
= NULL
;
14836 lpfc_stop_port(phba
);
14838 /* Init cpu_map array */
14839 lpfc_cpu_map_array_init(phba
);
14841 /* Init hba_eq_hdl array */
14842 lpfc_hba_eq_hdl_array_init(phba
);
14844 /* Configure and enable interrupt */
14845 intr_mode
= lpfc_sli4_enable_intr(phba
, cfg_mode
);
14846 if (intr_mode
== LPFC_INTR_ERROR
) {
14847 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14848 "0426 Failed to enable interrupt.\n");
14850 goto out_unset_driver_resource
;
14852 /* Default to single EQ for non-MSI-X */
14853 if (phba
->intr_type
!= MSIX
) {
14854 phba
->cfg_irq_chann
= 1;
14855 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
14856 if (phba
->nvmet_support
)
14857 phba
->cfg_nvmet_mrq
= 1;
14860 lpfc_cpu_affinity_check(phba
, phba
->cfg_irq_chann
);
14862 /* Create SCSI host to the physical port */
14863 error
= lpfc_create_shost(phba
);
14865 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14866 "1415 Failed to create scsi host.\n");
14867 goto out_disable_intr
;
14869 vport
= phba
->pport
;
14870 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
14872 /* Configure sysfs attributes */
14873 error
= lpfc_alloc_sysfs_attr(vport
);
14875 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14876 "1416 Failed to allocate sysfs attr\n");
14877 goto out_destroy_shost
;
14880 /* Set up SLI-4 HBA */
14881 if (lpfc_sli4_hba_setup(phba
)) {
14882 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14883 "1421 Failed to set up hba\n");
14885 goto out_free_sysfs_attr
;
14888 /* Log the current active interrupt mode */
14889 phba
->intr_mode
= intr_mode
;
14890 lpfc_log_intr_mode(phba
, intr_mode
);
14892 /* Perform post initialization setup */
14893 lpfc_post_init_setup(phba
);
14895 /* NVME support in FW earlier in the driver load corrects the
14896 * FC4 type making a check for nvme_support unnecessary.
14898 if (phba
->nvmet_support
== 0) {
14899 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
14900 /* Create NVME binding with nvme_fc_transport. This
14901 * ensures the vport is initialized. If the localport
14902 * create fails, it should not unload the driver to
14903 * support field issues.
14905 error
= lpfc_nvme_create_localport(vport
);
14907 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14908 "6004 NVME registration "
14909 "failed, error x%x\n",
14915 /* check for firmware upgrade or downgrade */
14916 if (phba
->cfg_request_firmware_upgrade
)
14917 lpfc_sli4_request_firmware_update(phba
, INT_FW_UPGRADE
);
14919 /* Check if there are static vports to be created. */
14920 lpfc_create_static_vport(phba
);
14922 timer_setup(&phba
->cpuhp_poll_timer
, lpfc_sli4_poll_hbtimer
, 0);
14923 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state
, &phba
->cpuhp
);
14927 out_free_sysfs_attr
:
14928 lpfc_free_sysfs_attr(vport
);
14930 lpfc_destroy_shost(phba
);
14932 lpfc_sli4_disable_intr(phba
);
14933 out_unset_driver_resource
:
14934 lpfc_unset_driver_resource_phase2(phba
);
14935 out_unset_driver_resource_s4
:
14936 lpfc_sli4_driver_resource_unset(phba
);
14937 out_unset_pci_mem_s4
:
14938 lpfc_sli4_pci_mem_unset(phba
);
14939 out_disable_pci_dev
:
14940 lpfc_disable_pci_dev(phba
);
14942 scsi_host_put(shost
);
14944 lpfc_hba_free(phba
);
14949 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14950 * @pdev: pointer to PCI device
14952 * This routine is called from the kernel's PCI subsystem to device with
14953 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14954 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14955 * device to be removed from the PCI subsystem properly.
14958 lpfc_pci_remove_one_s4(struct pci_dev
*pdev
)
14960 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
14961 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
14962 struct lpfc_vport
**vports
;
14963 struct lpfc_hba
*phba
= vport
->phba
;
14966 /* Mark the device unloading flag */
14967 set_bit(FC_UNLOADING
, &vport
->load_flag
);
14969 lpfc_unreg_congestion_buf(phba
);
14971 lpfc_free_sysfs_attr(vport
);
14973 /* Release all the vports against this physical port */
14974 vports
= lpfc_create_vport_work_array(phba
);
14975 if (vports
!= NULL
)
14976 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
14977 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
14979 fc_vport_terminate(vports
[i
]->fc_vport
);
14981 lpfc_destroy_vport_work_array(phba
, vports
);
14983 /* Remove FC host with the physical port */
14984 fc_remove_host(shost
);
14985 scsi_remove_host(shost
);
14987 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
14988 * localports are destroyed after to cleanup all transport memory.
14990 lpfc_cleanup(vport
);
14991 lpfc_nvmet_destroy_targetport(phba
);
14992 lpfc_nvme_destroy_localport(vport
);
14994 /* De-allocate multi-XRI pools */
14995 if (phba
->cfg_xri_rebalancing
)
14996 lpfc_destroy_multixri_pools(phba
);
14999 * Bring down the SLI Layer. This step disables all interrupts,
15000 * clears the rings, discards all mailbox commands, and resets
15001 * the HBA FCoE function.
15003 lpfc_debugfs_terminate(vport
);
15005 lpfc_stop_hba_timers(phba
);
15006 spin_lock_irq(&phba
->port_list_lock
);
15007 list_del_init(&vport
->listentry
);
15008 spin_unlock_irq(&phba
->port_list_lock
);
15010 /* Perform scsi free before driver resource_unset since scsi
15011 * buffers are released to their corresponding pools here.
15013 lpfc_io_free(phba
);
15014 lpfc_free_iocb_list(phba
);
15015 lpfc_sli4_hba_unset(phba
);
15017 lpfc_unset_driver_resource_phase2(phba
);
15018 lpfc_sli4_driver_resource_unset(phba
);
15020 /* Unmap adapter Control and Doorbell registers */
15021 lpfc_sli4_pci_mem_unset(phba
);
15023 /* Release PCI resources and disable device's PCI function */
15024 scsi_host_put(shost
);
15025 lpfc_disable_pci_dev(phba
);
15027 /* Finally, free the driver's device data structure */
15028 lpfc_hba_free(phba
);
15034 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15035 * @dev_d: pointer to device
15037 * This routine is called from the kernel's PCI subsystem to support system
15038 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15039 * this method, it quiesces the device by stopping the driver's worker
15040 * thread for the device, turning off device's interrupt and DMA, and bring
15041 * the device offline. Note that as the driver implements the minimum PM
15042 * requirements to a power-aware driver's PM support for suspend/resume -- all
15043 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15044 * method call will be treated as SUSPEND and the driver will fully
15045 * reinitialize its device during resume() method call, the driver will set
15046 * device to PCI_D3hot state in PCI config space instead of setting it
15047 * according to the @msg provided by the PM.
15050 * 0 - driver suspended the device
15053 static int __maybe_unused
15054 lpfc_pci_suspend_one_s4(struct device
*dev_d
)
15056 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
15057 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15059 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
15060 "2843 PCI device Power Management suspend.\n");
15062 /* Bring down the device */
15063 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
15064 lpfc_offline(phba
);
15065 kthread_stop(phba
->worker_thread
);
15067 /* Disable interrupt from device */
15068 lpfc_sli4_disable_intr(phba
);
15069 lpfc_sli4_queue_destroy(phba
);
15075 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15076 * @dev_d: pointer to device
15078 * This routine is called from the kernel's PCI subsystem to support system
15079 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15080 * this method, it restores the device's PCI config space state and fully
15081 * reinitializes the device and brings it online. Note that as the driver
15082 * implements the minimum PM requirements to a power-aware driver's PM for
15083 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15084 * to the suspend() method call will be treated as SUSPEND and the driver
15085 * will fully reinitialize its device during resume() method call, the device
15086 * will be set to PCI_D0 directly in PCI config space before restoring the
15090 * 0 - driver suspended the device
15093 static int __maybe_unused
15094 lpfc_pci_resume_one_s4(struct device
*dev_d
)
15096 struct Scsi_Host
*shost
= dev_get_drvdata(dev_d
);
15097 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15098 uint32_t intr_mode
;
15101 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
15102 "0292 PCI device Power Management resume.\n");
15104 /* Startup the kernel thread for this host adapter. */
15105 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
15106 "lpfc_worker_%d", phba
->brd_no
);
15107 if (IS_ERR(phba
->worker_thread
)) {
15108 error
= PTR_ERR(phba
->worker_thread
);
15109 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15110 "0293 PM resume failed to start worker "
15111 "thread: error=x%x.\n", error
);
15115 /* Configure and enable interrupt */
15116 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
15117 if (intr_mode
== LPFC_INTR_ERROR
) {
15118 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15119 "0294 PM resume Failed to enable interrupt\n");
15122 phba
->intr_mode
= intr_mode
;
15124 /* Restart HBA and bring it online */
15125 lpfc_sli_brdrestart(phba
);
15128 /* Log the current active interrupt mode */
15129 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
15135 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15136 * @phba: pointer to lpfc hba data structure.
15138 * This routine is called to prepare the SLI4 device for PCI slot recover. It
15139 * aborts all the outstanding SCSI I/Os to the pci device.
15142 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba
*phba
)
15144 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15145 "2828 PCI channel I/O abort preparing for recovery\n");
15147 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15148 * and let the SCSI mid-layer to retry them to recover.
15150 lpfc_sli_abort_fcp_rings(phba
);
15154 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15155 * @phba: pointer to lpfc hba data structure.
15157 * This routine is called to prepare the SLI4 device for PCI slot reset. It
15158 * disables the device interrupt and pci device, and aborts the internal FCP
15162 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba
*phba
)
15164 int offline
= pci_channel_offline(phba
->pcidev
);
15166 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15167 "2826 PCI channel disable preparing for reset offline"
15170 /* Block any management I/Os to the device */
15171 lpfc_block_mgmt_io(phba
, LPFC_MBX_NO_WAIT
);
15174 /* HBA_PCI_ERR was set in io_error_detect */
15175 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
15176 /* Flush all driver's outstanding I/Os as we are to reset */
15177 lpfc_sli_flush_io_rings(phba
);
15178 lpfc_offline(phba
);
15180 /* stop all timers */
15181 lpfc_stop_hba_timers(phba
);
15183 lpfc_sli4_queue_destroy(phba
);
15184 /* Disable interrupt and pci device */
15185 lpfc_sli4_disable_intr(phba
);
15186 pci_disable_device(phba
->pcidev
);
15190 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15191 * @phba: pointer to lpfc hba data structure.
15193 * This routine is called to prepare the SLI4 device for PCI slot permanently
15194 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15198 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
15200 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15201 "2827 PCI channel permanent disable for failure\n");
15203 /* Block all SCSI devices' I/Os on the host */
15204 lpfc_scsi_dev_block(phba
);
15206 /* stop all timers */
15207 lpfc_stop_hba_timers(phba
);
15209 /* Clean up all driver's outstanding I/Os */
15210 lpfc_sli_flush_io_rings(phba
);
15214 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15215 * @pdev: pointer to PCI device.
15216 * @state: the current PCI connection state.
15218 * This routine is called from the PCI subsystem for error handling to device
15219 * with SLI-4 interface spec. This function is called by the PCI subsystem
15220 * after a PCI bus error affecting this device has been detected. When this
15221 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15222 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15223 * for the PCI subsystem to perform proper recovery as desired.
15226 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15227 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15229 static pci_ers_result_t
15230 lpfc_io_error_detected_s4(struct pci_dev
*pdev
, pci_channel_state_t state
)
15232 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
15233 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15237 case pci_channel_io_normal
:
15238 /* Non-fatal error, prepare for recovery */
15239 lpfc_sli4_prep_dev_for_recover(phba
);
15240 return PCI_ERS_RESULT_CAN_RECOVER
;
15241 case pci_channel_io_frozen
:
15242 hba_pci_err
= test_and_set_bit(HBA_PCI_ERR
, &phba
->bit_flags
);
15243 /* Fatal error, prepare for slot reset */
15245 lpfc_sli4_prep_dev_for_reset(phba
);
15247 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15248 "2832 Already handling PCI error "
15249 "state: x%x\n", state
);
15250 return PCI_ERS_RESULT_NEED_RESET
;
15251 case pci_channel_io_perm_failure
:
15252 set_bit(HBA_PCI_ERR
, &phba
->bit_flags
);
15253 /* Permanent failure, prepare for device down */
15254 lpfc_sli4_prep_dev_for_perm_failure(phba
);
15255 return PCI_ERS_RESULT_DISCONNECT
;
15257 hba_pci_err
= test_and_set_bit(HBA_PCI_ERR
, &phba
->bit_flags
);
15259 lpfc_sli4_prep_dev_for_reset(phba
);
15260 /* Unknown state, prepare and request slot reset */
15261 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15262 "2825 Unknown PCI error state: x%x\n", state
);
15263 lpfc_sli4_prep_dev_for_reset(phba
);
15264 return PCI_ERS_RESULT_NEED_RESET
;
15269 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15270 * @pdev: pointer to PCI device.
15272 * This routine is called from the PCI subsystem for error handling to device
15273 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15274 * restart the PCI card from scratch, as if from a cold-boot. During the
15275 * PCI subsystem error recovery, after the driver returns
15276 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15277 * recovery and then call this routine before calling the .resume method to
15278 * recover the device. This function will initialize the HBA device, enable
15279 * the interrupt, but it will just put the HBA to offline state without
15280 * passing any I/O traffic.
15283 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15284 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15286 static pci_ers_result_t
15287 lpfc_io_slot_reset_s4(struct pci_dev
*pdev
)
15289 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
15290 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15291 struct lpfc_sli
*psli
= &phba
->sli
;
15292 uint32_t intr_mode
;
15295 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
15296 if (pci_enable_device_mem(pdev
)) {
15297 printk(KERN_ERR
"lpfc: Cannot re-enable "
15298 "PCI device after reset.\n");
15299 return PCI_ERS_RESULT_DISCONNECT
;
15302 pci_restore_state(pdev
);
15304 hba_pci_err
= test_and_clear_bit(HBA_PCI_ERR
, &phba
->bit_flags
);
15306 dev_info(&pdev
->dev
,
15307 "hba_pci_err was not set, recovering slot reset.\n");
15309 * As the new kernel behavior of pci_restore_state() API call clears
15310 * device saved_state flag, need to save the restored state again.
15312 pci_save_state(pdev
);
15314 if (pdev
->is_busmaster
)
15315 pci_set_master(pdev
);
15317 spin_lock_irq(&phba
->hbalock
);
15318 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
15319 spin_unlock_irq(&phba
->hbalock
);
15321 /* Init cpu_map array */
15322 lpfc_cpu_map_array_init(phba
);
15323 /* Configure and enable interrupt */
15324 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
15325 if (intr_mode
== LPFC_INTR_ERROR
) {
15326 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15327 "2824 Cannot re-enable interrupt after "
15329 return PCI_ERS_RESULT_DISCONNECT
;
15331 phba
->intr_mode
= intr_mode
;
15332 lpfc_cpu_affinity_check(phba
, phba
->cfg_irq_chann
);
15334 /* Log the current active interrupt mode */
15335 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
15337 return PCI_ERS_RESULT_RECOVERED
;
15341 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15342 * @pdev: pointer to PCI device
15344 * This routine is called from the PCI subsystem for error handling to device
15345 * with SLI-4 interface spec. It is called when kernel error recovery tells
15346 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15347 * error recovery. After this call, traffic can start to flow from this device
15351 lpfc_io_resume_s4(struct pci_dev
*pdev
)
15353 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
15354 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15357 * In case of slot reset, as function reset is performed through
15358 * mailbox command which needs DMA to be enabled, this operation
15359 * has to be moved to the io resume phase. Taking device offline
15360 * will perform the necessary cleanup.
15362 if (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)) {
15363 /* Perform device reset */
15364 lpfc_sli_brdrestart(phba
);
15365 /* Bring the device back online */
15371 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15372 * @pdev: pointer to PCI device
15373 * @pid: pointer to PCI device identifier
15375 * This routine is to be registered to the kernel's PCI subsystem. When an
15376 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15377 * at PCI device-specific information of the device and driver to see if the
15378 * driver state that it can support this kind of device. If the match is
15379 * successful, the driver core invokes this routine. This routine dispatches
15380 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15381 * do all the initialization that it needs to do to handle the HBA device
15385 * 0 - driver can claim the device
15386 * negative value - driver can not claim the device
15389 lpfc_pci_probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
15392 struct lpfc_sli_intf intf
;
15394 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
, &intf
.word0
))
15397 if ((bf_get(lpfc_sli_intf_valid
, &intf
) == LPFC_SLI_INTF_VALID
) &&
15398 (bf_get(lpfc_sli_intf_slirev
, &intf
) == LPFC_SLI_INTF_REV_SLI4
))
15399 rc
= lpfc_pci_probe_one_s4(pdev
, pid
);
15401 rc
= lpfc_pci_probe_one_s3(pdev
, pid
);
15407 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15408 * @pdev: pointer to PCI device
15410 * This routine is to be registered to the kernel's PCI subsystem. When an
15411 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15412 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15413 * remove routine, which will perform all the necessary cleanup for the
15414 * device to be removed from the PCI subsystem properly.
15417 lpfc_pci_remove_one(struct pci_dev
*pdev
)
15419 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
15420 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15422 switch (phba
->pci_dev_grp
) {
15423 case LPFC_PCI_DEV_LP
:
15424 lpfc_pci_remove_one_s3(pdev
);
15426 case LPFC_PCI_DEV_OC
:
15427 lpfc_pci_remove_one_s4(pdev
);
15430 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15431 "1424 Invalid PCI device group: 0x%x\n",
15432 phba
->pci_dev_grp
);
15439 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15440 * @dev: pointer to device
15442 * This routine is to be registered to the kernel's PCI subsystem to support
15443 * system Power Management (PM). When PM invokes this method, it dispatches
15444 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15445 * suspend the device.
15448 * 0 - driver suspended the device
15451 static int __maybe_unused
15452 lpfc_pci_suspend_one(struct device
*dev
)
15454 struct Scsi_Host
*shost
= dev_get_drvdata(dev
);
15455 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15458 switch (phba
->pci_dev_grp
) {
15459 case LPFC_PCI_DEV_LP
:
15460 rc
= lpfc_pci_suspend_one_s3(dev
);
15462 case LPFC_PCI_DEV_OC
:
15463 rc
= lpfc_pci_suspend_one_s4(dev
);
15466 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15467 "1425 Invalid PCI device group: 0x%x\n",
15468 phba
->pci_dev_grp
);
15475 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15476 * @dev: pointer to device
15478 * This routine is to be registered to the kernel's PCI subsystem to support
15479 * system Power Management (PM). When PM invokes this method, it dispatches
15480 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15481 * resume the device.
15484 * 0 - driver suspended the device
15487 static int __maybe_unused
15488 lpfc_pci_resume_one(struct device
*dev
)
15490 struct Scsi_Host
*shost
= dev_get_drvdata(dev
);
15491 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15494 switch (phba
->pci_dev_grp
) {
15495 case LPFC_PCI_DEV_LP
:
15496 rc
= lpfc_pci_resume_one_s3(dev
);
15498 case LPFC_PCI_DEV_OC
:
15499 rc
= lpfc_pci_resume_one_s4(dev
);
15502 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15503 "1426 Invalid PCI device group: 0x%x\n",
15504 phba
->pci_dev_grp
);
15511 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15512 * @pdev: pointer to PCI device.
15513 * @state: the current PCI connection state.
15515 * This routine is registered to the PCI subsystem for error handling. This
15516 * function is called by the PCI subsystem after a PCI bus error affecting
15517 * this device has been detected. When this routine is invoked, it dispatches
15518 * the action to the proper SLI-3 or SLI-4 device error detected handling
15519 * routine, which will perform the proper error detected operation.
15522 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15523 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15525 static pci_ers_result_t
15526 lpfc_io_error_detected(struct pci_dev
*pdev
, pci_channel_state_t state
)
15528 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
15529 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15530 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15532 if (phba
->link_state
== LPFC_HBA_ERROR
&&
15533 test_bit(HBA_IOQ_FLUSH
, &phba
->hba_flag
))
15534 return PCI_ERS_RESULT_NEED_RESET
;
15536 switch (phba
->pci_dev_grp
) {
15537 case LPFC_PCI_DEV_LP
:
15538 rc
= lpfc_io_error_detected_s3(pdev
, state
);
15540 case LPFC_PCI_DEV_OC
:
15541 rc
= lpfc_io_error_detected_s4(pdev
, state
);
15544 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15545 "1427 Invalid PCI device group: 0x%x\n",
15546 phba
->pci_dev_grp
);
15553 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15554 * @pdev: pointer to PCI device.
15556 * This routine is registered to the PCI subsystem for error handling. This
15557 * function is called after PCI bus has been reset to restart the PCI card
15558 * from scratch, as if from a cold-boot. When this routine is invoked, it
15559 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15560 * routine, which will perform the proper device reset.
15563 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15564 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15566 static pci_ers_result_t
15567 lpfc_io_slot_reset(struct pci_dev
*pdev
)
15569 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
15570 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15571 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15573 switch (phba
->pci_dev_grp
) {
15574 case LPFC_PCI_DEV_LP
:
15575 rc
= lpfc_io_slot_reset_s3(pdev
);
15577 case LPFC_PCI_DEV_OC
:
15578 rc
= lpfc_io_slot_reset_s4(pdev
);
15581 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15582 "1428 Invalid PCI device group: 0x%x\n",
15583 phba
->pci_dev_grp
);
15590 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15591 * @pdev: pointer to PCI device
15593 * This routine is registered to the PCI subsystem for error handling. It
15594 * is called when kernel error recovery tells the lpfc driver that it is
15595 * OK to resume normal PCI operation after PCI bus error recovery. When
15596 * this routine is invoked, it dispatches the action to the proper SLI-3
15597 * or SLI-4 device io_resume routine, which will resume the device operation.
15600 lpfc_io_resume(struct pci_dev
*pdev
)
15602 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
15603 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
15605 switch (phba
->pci_dev_grp
) {
15606 case LPFC_PCI_DEV_LP
:
15607 lpfc_io_resume_s3(pdev
);
15609 case LPFC_PCI_DEV_OC
:
15610 lpfc_io_resume_s4(pdev
);
15613 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15614 "1429 Invalid PCI device group: 0x%x\n",
15615 phba
->pci_dev_grp
);
15622 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15623 * @phba: pointer to lpfc hba data structure.
15625 * This routine checks to see if OAS is supported for this adapter. If
15626 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15627 * the enable oas flag is cleared and the pool created for OAS device data
15632 lpfc_sli4_oas_verify(struct lpfc_hba
*phba
)
15635 if (!phba
->cfg_EnableXLane
)
15638 if (phba
->sli4_hba
.pc_sli4_params
.oas_supported
) {
15642 mempool_destroy(phba
->device_data_mem_pool
);
15643 phba
->device_data_mem_pool
= NULL
;
15650 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15651 * @phba: pointer to lpfc hba data structure.
15653 * This routine checks to see if RAS is supported by the adapter. Check the
15654 * function through which RAS support enablement is to be done.
15657 lpfc_sli4_ras_init(struct lpfc_hba
*phba
)
15659 /* if ASIC_GEN_NUM >= 0xC) */
15660 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
15661 LPFC_SLI_INTF_IF_TYPE_6
) ||
15662 (bf_get(lpfc_sli_intf_sli_family
, &phba
->sli4_hba
.sli_intf
) ==
15663 LPFC_SLI_INTF_FAMILY_G6
)) {
15664 phba
->ras_fwlog
.ras_hwsupport
= true;
15665 if (phba
->cfg_ras_fwlog_func
== PCI_FUNC(phba
->pcidev
->devfn
) &&
15666 phba
->cfg_ras_fwlog_buffsize
)
15667 phba
->ras_fwlog
.ras_enabled
= true;
15669 phba
->ras_fwlog
.ras_enabled
= false;
15671 phba
->ras_fwlog
.ras_hwsupport
= false;
15676 MODULE_DEVICE_TABLE(pci
, lpfc_id_table
);
15678 static const struct pci_error_handlers lpfc_err_handler
= {
15679 .error_detected
= lpfc_io_error_detected
,
15680 .slot_reset
= lpfc_io_slot_reset
,
15681 .resume
= lpfc_io_resume
,
15684 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one
,
15685 lpfc_pci_suspend_one
,
15686 lpfc_pci_resume_one
);
15688 static struct pci_driver lpfc_driver
= {
15689 .name
= LPFC_DRIVER_NAME
,
15690 .id_table
= lpfc_id_table
,
15691 .probe
= lpfc_pci_probe_one
,
15692 .remove
= lpfc_pci_remove_one
,
15693 .shutdown
= lpfc_pci_remove_one
,
15694 .driver
.pm
= &lpfc_pci_pm_ops_one
,
15695 .err_handler
= &lpfc_err_handler
,
15698 static const struct file_operations lpfc_mgmt_fop
= {
15699 .owner
= THIS_MODULE
,
15702 static struct miscdevice lpfc_mgmt_dev
= {
15703 .minor
= MISC_DYNAMIC_MINOR
,
15704 .name
= "lpfcmgmt",
15705 .fops
= &lpfc_mgmt_fop
,
15709 * lpfc_init - lpfc module initialization routine
15711 * This routine is to be invoked when the lpfc module is loaded into the
15712 * kernel. The special kernel macro module_init() is used to indicate the
15713 * role of this routine to the kernel as lpfc module entry point.
15717 * -ENOMEM - FC attach transport failed
15718 * all others - failed
15725 pr_info(LPFC_MODULE_DESC
"\n");
15726 pr_info(LPFC_COPYRIGHT
"\n");
15728 error
= misc_register(&lpfc_mgmt_dev
);
15730 printk(KERN_ERR
"Could not register lpfcmgmt device, "
15731 "misc_register returned with status %d", error
);
15734 lpfc_transport_functions
.vport_create
= lpfc_vport_create
;
15735 lpfc_transport_functions
.vport_delete
= lpfc_vport_delete
;
15736 lpfc_transport_template
=
15737 fc_attach_transport(&lpfc_transport_functions
);
15738 if (lpfc_transport_template
== NULL
)
15740 lpfc_vport_transport_template
=
15741 fc_attach_transport(&lpfc_vport_transport_functions
);
15742 if (lpfc_vport_transport_template
== NULL
) {
15743 fc_release_transport(lpfc_transport_template
);
15746 lpfc_wqe_cmd_template();
15747 lpfc_nvmet_cmd_template();
15749 /* Initialize in case vector mapping is needed */
15750 lpfc_present_cpu
= num_present_cpus();
15752 lpfc_pldv_detect
= false;
15754 error
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
,
15755 "lpfc/sli4:online",
15756 lpfc_cpu_online
, lpfc_cpu_offline
);
15758 goto cpuhp_failure
;
15759 lpfc_cpuhp_state
= error
;
15761 error
= pci_register_driver(&lpfc_driver
);
15768 cpuhp_remove_multi_state(lpfc_cpuhp_state
);
15770 fc_release_transport(lpfc_transport_template
);
15771 fc_release_transport(lpfc_vport_transport_template
);
15773 misc_deregister(&lpfc_mgmt_dev
);
15778 void lpfc_dmp_dbg(struct lpfc_hba
*phba
)
15780 unsigned int start_idx
;
15781 unsigned int dbg_cnt
;
15782 unsigned int temp_idx
;
15785 unsigned long rem_nsec
;
15787 if (atomic_cmpxchg(&phba
->dbg_log_dmping
, 0, 1) != 0)
15790 start_idx
= (unsigned int)atomic_read(&phba
->dbg_log_idx
) % DBG_LOG_SZ
;
15791 dbg_cnt
= (unsigned int)atomic_read(&phba
->dbg_log_cnt
);
15794 temp_idx
= start_idx
;
15795 if (dbg_cnt
>= DBG_LOG_SZ
) {
15796 dbg_cnt
= DBG_LOG_SZ
;
15799 if ((start_idx
+ dbg_cnt
) > (DBG_LOG_SZ
- 1)) {
15800 temp_idx
= (start_idx
+ dbg_cnt
) % DBG_LOG_SZ
;
15802 if (start_idx
< dbg_cnt
)
15803 start_idx
= DBG_LOG_SZ
- (dbg_cnt
- start_idx
);
15805 start_idx
-= dbg_cnt
;
15808 dev_info(&phba
->pcidev
->dev
, "start %d end %d cnt %d\n",
15809 start_idx
, temp_idx
, dbg_cnt
);
15811 for (i
= 0; i
< dbg_cnt
; i
++) {
15812 if ((start_idx
+ i
) < DBG_LOG_SZ
)
15813 temp_idx
= (start_idx
+ i
) % DBG_LOG_SZ
;
15816 rem_nsec
= do_div(phba
->dbg_log
[temp_idx
].t_ns
, NSEC_PER_SEC
);
15817 dev_info(&phba
->pcidev
->dev
, "%d: [%5lu.%06lu] %s",
15819 (unsigned long)phba
->dbg_log
[temp_idx
].t_ns
,
15821 phba
->dbg_log
[temp_idx
].log
);
15824 atomic_set(&phba
->dbg_log_cnt
, 0);
15825 atomic_set(&phba
->dbg_log_dmping
, 0);
15829 void lpfc_dbg_print(struct lpfc_hba
*phba
, const char *fmt
, ...)
15833 int dbg_dmping
= atomic_read(&phba
->dbg_log_dmping
);
15834 struct va_format vaf
;
15837 va_start(args
, fmt
);
15838 if (unlikely(dbg_dmping
)) {
15841 dev_info(&phba
->pcidev
->dev
, "%pV", &vaf
);
15845 idx
= (unsigned int)atomic_fetch_add(1, &phba
->dbg_log_idx
) %
15848 atomic_inc(&phba
->dbg_log_cnt
);
15850 vscnprintf(phba
->dbg_log
[idx
].log
,
15851 sizeof(phba
->dbg_log
[idx
].log
), fmt
, args
);
15854 phba
->dbg_log
[idx
].t_ns
= local_clock();
15858 * lpfc_exit - lpfc module removal routine
15860 * This routine is invoked when the lpfc module is removed from the kernel.
15861 * The special kernel macro module_exit() is used to indicate the role of
15862 * this routine to the kernel as lpfc module exit point.
15867 misc_deregister(&lpfc_mgmt_dev
);
15868 pci_unregister_driver(&lpfc_driver
);
15869 cpuhp_remove_multi_state(lpfc_cpuhp_state
);
15870 fc_release_transport(lpfc_transport_template
);
15871 fc_release_transport(lpfc_vport_transport_template
);
15872 idr_destroy(&lpfc_hba_index
);
15875 module_init(lpfc_init
);
15876 module_exit(lpfc_exit
);
15877 MODULE_LICENSE("GPL");
15878 MODULE_DESCRIPTION(LPFC_MODULE_DESC
);
15879 MODULE_AUTHOR("Broadcom");
15880 MODULE_VERSION("0:" LPFC_DRIVER_VERSION
);