2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
15 static int qla24xx_vport_disable(struct fc_vport
*, bool);
17 /* SYSFS attributes --------------------------------------------------------- */
20 qla2x00_sysfs_read_fw_dump(struct file
*filp
, struct kobject
*kobj
,
21 struct bin_attribute
*bin_attr
,
22 char *buf
, loff_t off
, size_t count
)
24 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
25 struct device
, kobj
)));
26 struct qla_hw_data
*ha
= vha
->hw
;
29 if (!(ha
->fw_dump_reading
|| ha
->mctp_dump_reading
))
32 if (IS_P3P_TYPE(ha
)) {
33 if (off
< ha
->md_template_size
) {
34 rval
= memory_read_from_buffer(buf
, count
,
35 &off
, ha
->md_tmplt_hdr
, ha
->md_template_size
);
38 off
-= ha
->md_template_size
;
39 rval
= memory_read_from_buffer(buf
, count
,
40 &off
, ha
->md_dump
, ha
->md_dump_size
);
42 } else if (ha
->mctp_dumped
&& ha
->mctp_dump_reading
)
43 return memory_read_from_buffer(buf
, count
, &off
, ha
->mctp_dump
,
45 else if (ha
->fw_dump_reading
)
46 return memory_read_from_buffer(buf
, count
, &off
, ha
->fw_dump
,
53 qla2x00_sysfs_write_fw_dump(struct file
*filp
, struct kobject
*kobj
,
54 struct bin_attribute
*bin_attr
,
55 char *buf
, loff_t off
, size_t count
)
57 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
58 struct device
, kobj
)));
59 struct qla_hw_data
*ha
= vha
->hw
;
65 reading
= simple_strtol(buf
, NULL
, 10);
68 if (!ha
->fw_dump_reading
)
71 ql_log(ql_log_info
, vha
, 0x705d,
72 "Firmware dump cleared on (%ld).\n", vha
->host_no
);
74 if (IS_P3P_TYPE(ha
)) {
78 ha
->fw_dump_reading
= 0;
82 if (ha
->fw_dumped
&& !ha
->fw_dump_reading
) {
83 ha
->fw_dump_reading
= 1;
85 ql_log(ql_log_info
, vha
, 0x705e,
86 "Raw firmware dump ready for read on (%ld).\n",
91 qla2x00_alloc_fw_dump(vha
);
96 qla82xx_set_reset_owner(vha
);
97 qla82xx_idc_unlock(ha
);
98 } else if (IS_QLA8044(ha
)) {
100 qla82xx_set_reset_owner(vha
);
101 qla8044_idc_unlock(ha
);
103 qla2x00_system_error(vha
);
106 if (IS_P3P_TYPE(ha
)) {
107 if (ha
->md_tmplt_hdr
)
108 ql_dbg(ql_dbg_user
, vha
, 0x705b,
109 "MiniDump supported with this firmware.\n");
111 ql_dbg(ql_dbg_user
, vha
, 0x709d,
112 "MiniDump not supported with this firmware.\n");
117 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
120 if (!ha
->mctp_dump_reading
)
122 ql_log(ql_log_info
, vha
, 0x70c1,
123 "MCTP dump cleared on (%ld).\n", vha
->host_no
);
124 ha
->mctp_dump_reading
= 0;
128 if (ha
->mctp_dumped
&& !ha
->mctp_dump_reading
) {
129 ha
->mctp_dump_reading
= 1;
130 ql_log(ql_log_info
, vha
, 0x70c2,
131 "Raw mctp dump ready for read on (%ld).\n",
139 static struct bin_attribute sysfs_fw_dump_attr
= {
142 .mode
= S_IRUSR
| S_IWUSR
,
145 .read
= qla2x00_sysfs_read_fw_dump
,
146 .write
= qla2x00_sysfs_write_fw_dump
,
150 qla2x00_sysfs_read_nvram(struct file
*filp
, struct kobject
*kobj
,
151 struct bin_attribute
*bin_attr
,
152 char *buf
, loff_t off
, size_t count
)
154 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
155 struct device
, kobj
)));
156 struct qla_hw_data
*ha
= vha
->hw
;
158 if (!capable(CAP_SYS_ADMIN
))
161 if (IS_NOCACHE_VPD_TYPE(ha
))
162 ha
->isp_ops
->read_optrom(vha
, ha
->nvram
, ha
->flt_region_nvram
<< 2,
164 return memory_read_from_buffer(buf
, count
, &off
, ha
->nvram
,
169 qla2x00_sysfs_write_nvram(struct file
*filp
, struct kobject
*kobj
,
170 struct bin_attribute
*bin_attr
,
171 char *buf
, loff_t off
, size_t count
)
173 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
174 struct device
, kobj
)));
175 struct qla_hw_data
*ha
= vha
->hw
;
178 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
!= ha
->nvram_size
||
179 !ha
->isp_ops
->write_nvram
)
182 /* Checksum NVRAM. */
183 if (IS_FWI2_CAPABLE(ha
)) {
187 iter
= (uint32_t *)buf
;
189 for (cnt
= 0; cnt
< ((count
>> 2) - 1); cnt
++, iter
++)
190 chksum
+= le32_to_cpu(*iter
);
191 chksum
= ~chksum
+ 1;
192 *iter
= cpu_to_le32(chksum
);
197 iter
= (uint8_t *)buf
;
199 for (cnt
= 0; cnt
< count
- 1; cnt
++)
201 chksum
= ~chksum
+ 1;
205 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
206 ql_log(ql_log_warn
, vha
, 0x705f,
207 "HBA not online, failing NVRAM update.\n");
212 ha
->isp_ops
->write_nvram(vha
, (uint8_t *)buf
, ha
->nvram_base
, count
);
213 ha
->isp_ops
->read_nvram(vha
, (uint8_t *)ha
->nvram
, ha
->nvram_base
,
216 ql_dbg(ql_dbg_user
, vha
, 0x7060,
217 "Setting ISP_ABORT_NEEDED\n");
218 /* NVRAM settings take effect immediately. */
219 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
220 qla2xxx_wake_dpc(vha
);
221 qla2x00_wait_for_chip_reset(vha
);
226 static struct bin_attribute sysfs_nvram_attr
= {
229 .mode
= S_IRUSR
| S_IWUSR
,
232 .read
= qla2x00_sysfs_read_nvram
,
233 .write
= qla2x00_sysfs_write_nvram
,
237 qla2x00_sysfs_read_optrom(struct file
*filp
, struct kobject
*kobj
,
238 struct bin_attribute
*bin_attr
,
239 char *buf
, loff_t off
, size_t count
)
241 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
242 struct device
, kobj
)));
243 struct qla_hw_data
*ha
= vha
->hw
;
246 if (ha
->optrom_state
!= QLA_SREADING
)
249 mutex_lock(&ha
->optrom_mutex
);
250 rval
= memory_read_from_buffer(buf
, count
, &off
, ha
->optrom_buffer
,
251 ha
->optrom_region_size
);
252 mutex_unlock(&ha
->optrom_mutex
);
258 qla2x00_sysfs_write_optrom(struct file
*filp
, struct kobject
*kobj
,
259 struct bin_attribute
*bin_attr
,
260 char *buf
, loff_t off
, size_t count
)
262 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
263 struct device
, kobj
)));
264 struct qla_hw_data
*ha
= vha
->hw
;
266 if (ha
->optrom_state
!= QLA_SWRITING
)
268 if (off
> ha
->optrom_region_size
)
270 if (off
+ count
> ha
->optrom_region_size
)
271 count
= ha
->optrom_region_size
- off
;
273 mutex_lock(&ha
->optrom_mutex
);
274 memcpy(&ha
->optrom_buffer
[off
], buf
, count
);
275 mutex_unlock(&ha
->optrom_mutex
);
280 static struct bin_attribute sysfs_optrom_attr
= {
283 .mode
= S_IRUSR
| S_IWUSR
,
286 .read
= qla2x00_sysfs_read_optrom
,
287 .write
= qla2x00_sysfs_write_optrom
,
291 qla2x00_sysfs_write_optrom_ctl(struct file
*filp
, struct kobject
*kobj
,
292 struct bin_attribute
*bin_attr
,
293 char *buf
, loff_t off
, size_t count
)
295 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
296 struct device
, kobj
)));
297 struct qla_hw_data
*ha
= vha
->hw
;
299 uint32_t size
= ha
->optrom_size
;
301 ssize_t rval
= count
;
306 if (unlikely(pci_channel_offline(ha
->pdev
)))
309 if (sscanf(buf
, "%d:%x:%x", &val
, &start
, &size
) < 1)
311 if (start
> ha
->optrom_size
)
314 mutex_lock(&ha
->optrom_mutex
);
317 if (ha
->optrom_state
!= QLA_SREADING
&&
318 ha
->optrom_state
!= QLA_SWRITING
) {
322 ha
->optrom_state
= QLA_SWAITING
;
324 ql_dbg(ql_dbg_user
, vha
, 0x7061,
325 "Freeing flash region allocation -- 0x%x bytes.\n",
326 ha
->optrom_region_size
);
328 vfree(ha
->optrom_buffer
);
329 ha
->optrom_buffer
= NULL
;
332 if (ha
->optrom_state
!= QLA_SWAITING
) {
337 ha
->optrom_region_start
= start
;
338 ha
->optrom_region_size
= start
+ size
> ha
->optrom_size
?
339 ha
->optrom_size
- start
: size
;
341 ha
->optrom_state
= QLA_SREADING
;
342 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
343 if (ha
->optrom_buffer
== NULL
) {
344 ql_log(ql_log_warn
, vha
, 0x7062,
345 "Unable to allocate memory for optrom retrieval "
346 "(%x).\n", ha
->optrom_region_size
);
348 ha
->optrom_state
= QLA_SWAITING
;
353 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
354 ql_log(ql_log_warn
, vha
, 0x7063,
355 "HBA not online, failing NVRAM update.\n");
360 ql_dbg(ql_dbg_user
, vha
, 0x7064,
361 "Reading flash region -- 0x%x/0x%x.\n",
362 ha
->optrom_region_start
, ha
->optrom_region_size
);
364 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
365 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
366 ha
->optrom_region_start
, ha
->optrom_region_size
);
369 if (ha
->optrom_state
!= QLA_SWAITING
) {
375 * We need to be more restrictive on which FLASH regions are
376 * allowed to be updated via user-space. Regions accessible
377 * via this method include:
379 * ISP21xx/ISP22xx/ISP23xx type boards:
381 * 0x000000 -> 0x020000 -- Boot code.
383 * ISP2322/ISP24xx type boards:
385 * 0x000000 -> 0x07ffff -- Boot code.
386 * 0x080000 -> 0x0fffff -- Firmware.
388 * ISP25xx type boards:
390 * 0x000000 -> 0x07ffff -- Boot code.
391 * 0x080000 -> 0x0fffff -- Firmware.
392 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
395 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
397 else if (start
== (ha
->flt_region_boot
* 4) ||
398 start
== (ha
->flt_region_fw
* 4))
400 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
)
401 || IS_CNA_CAPABLE(ha
) || IS_QLA2031(ha
)
405 ql_log(ql_log_warn
, vha
, 0x7065,
406 "Invalid start region 0x%x/0x%x.\n", start
, size
);
411 ha
->optrom_region_start
= start
;
412 ha
->optrom_region_size
= start
+ size
> ha
->optrom_size
?
413 ha
->optrom_size
- start
: size
;
415 ha
->optrom_state
= QLA_SWRITING
;
416 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
417 if (ha
->optrom_buffer
== NULL
) {
418 ql_log(ql_log_warn
, vha
, 0x7066,
419 "Unable to allocate memory for optrom update "
420 "(%x)\n", ha
->optrom_region_size
);
422 ha
->optrom_state
= QLA_SWAITING
;
427 ql_dbg(ql_dbg_user
, vha
, 0x7067,
428 "Staging flash region write -- 0x%x/0x%x.\n",
429 ha
->optrom_region_start
, ha
->optrom_region_size
);
431 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
434 if (ha
->optrom_state
!= QLA_SWRITING
) {
439 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
440 ql_log(ql_log_warn
, vha
, 0x7068,
441 "HBA not online, failing flash update.\n");
446 ql_dbg(ql_dbg_user
, vha
, 0x7069,
447 "Writing flash region -- 0x%x/0x%x.\n",
448 ha
->optrom_region_start
, ha
->optrom_region_size
);
450 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
451 ha
->optrom_region_start
, ha
->optrom_region_size
);
458 mutex_unlock(&ha
->optrom_mutex
);
462 static struct bin_attribute sysfs_optrom_ctl_attr
= {
464 .name
= "optrom_ctl",
468 .write
= qla2x00_sysfs_write_optrom_ctl
,
472 qla2x00_sysfs_read_vpd(struct file
*filp
, struct kobject
*kobj
,
473 struct bin_attribute
*bin_attr
,
474 char *buf
, loff_t off
, size_t count
)
476 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
477 struct device
, kobj
)));
478 struct qla_hw_data
*ha
= vha
->hw
;
481 if (unlikely(pci_channel_offline(ha
->pdev
)))
484 if (!capable(CAP_SYS_ADMIN
))
487 if (IS_NOCACHE_VPD_TYPE(ha
)) {
488 faddr
= ha
->flt_region_vpd
<< 2;
490 if (IS_QLA27XX(ha
) &&
491 qla27xx_find_valid_image(vha
) == QLA27XX_SECONDARY_IMAGE
)
492 faddr
= ha
->flt_region_vpd_sec
<< 2;
494 ha
->isp_ops
->read_optrom(vha
, ha
->vpd
, faddr
,
497 return memory_read_from_buffer(buf
, count
, &off
, ha
->vpd
, ha
->vpd_size
);
501 qla2x00_sysfs_write_vpd(struct file
*filp
, struct kobject
*kobj
,
502 struct bin_attribute
*bin_attr
,
503 char *buf
, loff_t off
, size_t count
)
505 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
506 struct device
, kobj
)));
507 struct qla_hw_data
*ha
= vha
->hw
;
510 if (unlikely(pci_channel_offline(ha
->pdev
)))
513 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
!= ha
->vpd_size
||
514 !ha
->isp_ops
->write_nvram
)
517 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
518 ql_log(ql_log_warn
, vha
, 0x706a,
519 "HBA not online, failing VPD update.\n");
524 ha
->isp_ops
->write_nvram(vha
, (uint8_t *)buf
, ha
->vpd_base
, count
);
525 ha
->isp_ops
->read_nvram(vha
, (uint8_t *)ha
->vpd
, ha
->vpd_base
, count
);
527 /* Update flash version information for 4Gb & above. */
528 if (!IS_FWI2_CAPABLE(ha
))
531 tmp_data
= vmalloc(256);
533 ql_log(ql_log_warn
, vha
, 0x706b,
534 "Unable to allocate memory for VPD information update.\n");
537 ha
->isp_ops
->get_flash_version(vha
, tmp_data
);
543 static struct bin_attribute sysfs_vpd_attr
= {
546 .mode
= S_IRUSR
| S_IWUSR
,
549 .read
= qla2x00_sysfs_read_vpd
,
550 .write
= qla2x00_sysfs_write_vpd
,
554 qla2x00_sysfs_read_sfp(struct file
*filp
, struct kobject
*kobj
,
555 struct bin_attribute
*bin_attr
,
556 char *buf
, loff_t off
, size_t count
)
558 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
559 struct device
, kobj
)));
560 struct qla_hw_data
*ha
= vha
->hw
;
561 uint16_t iter
, addr
, offset
;
564 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
!= SFP_DEV_SIZE
* 2)
570 ha
->sfp_data
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
,
573 ql_log(ql_log_warn
, vha
, 0x706c,
574 "Unable to allocate memory for SFP read-data.\n");
579 memset(ha
->sfp_data
, 0, SFP_BLOCK_SIZE
);
581 for (iter
= 0, offset
= 0; iter
< (SFP_DEV_SIZE
* 2) / SFP_BLOCK_SIZE
;
582 iter
++, offset
+= SFP_BLOCK_SIZE
) {
584 /* Skip to next device address. */
589 rval
= qla2x00_read_sfp(vha
, ha
->sfp_data_dma
, ha
->sfp_data
,
590 addr
, offset
, SFP_BLOCK_SIZE
, BIT_1
);
591 if (rval
!= QLA_SUCCESS
) {
592 ql_log(ql_log_warn
, vha
, 0x706d,
593 "Unable to read SFP data (%x/%x/%x).\n", rval
,
598 memcpy(buf
, ha
->sfp_data
, SFP_BLOCK_SIZE
);
599 buf
+= SFP_BLOCK_SIZE
;
605 static struct bin_attribute sysfs_sfp_attr
= {
608 .mode
= S_IRUSR
| S_IWUSR
,
610 .size
= SFP_DEV_SIZE
* 2,
611 .read
= qla2x00_sysfs_read_sfp
,
615 qla2x00_sysfs_write_reset(struct file
*filp
, struct kobject
*kobj
,
616 struct bin_attribute
*bin_attr
,
617 char *buf
, loff_t off
, size_t count
)
619 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
620 struct device
, kobj
)));
621 struct qla_hw_data
*ha
= vha
->hw
;
622 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
624 uint32_t idc_control
;
625 uint8_t *tmp_data
= NULL
;
629 type
= simple_strtol(buf
, NULL
, 10);
632 ql_log(ql_log_info
, vha
, 0x706e,
633 "Issuing ISP reset.\n");
635 scsi_block_requests(vha
->host
);
636 if (IS_QLA82XX(ha
)) {
637 ha
->flags
.isp82xx_no_md_cap
= 1;
638 qla82xx_idc_lock(ha
);
639 qla82xx_set_reset_owner(vha
);
640 qla82xx_idc_unlock(ha
);
641 } else if (IS_QLA8044(ha
)) {
642 qla8044_idc_lock(ha
);
643 idc_control
= qla8044_rd_reg(ha
,
644 QLA8044_IDC_DRV_CTRL
);
645 qla8044_wr_reg(ha
, QLA8044_IDC_DRV_CTRL
,
646 (idc_control
| GRACEFUL_RESET_BIT1
));
647 qla82xx_set_reset_owner(vha
);
648 qla8044_idc_unlock(ha
);
650 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
651 qla2xxx_wake_dpc(vha
);
653 qla2x00_wait_for_chip_reset(vha
);
654 scsi_unblock_requests(vha
->host
);
657 if (!IS_QLA81XX(ha
) && !IS_QLA83XX(ha
))
660 ql_log(ql_log_info
, vha
, 0x706f,
661 "Issuing MPI reset.\n");
663 if (IS_QLA83XX(ha
) || IS_QLA27XX(ha
)) {
664 uint32_t idc_control
;
666 qla83xx_idc_lock(vha
, 0);
667 __qla83xx_get_idc_control(vha
, &idc_control
);
668 idc_control
|= QLA83XX_IDC_GRACEFUL_RESET
;
669 __qla83xx_set_idc_control(vha
, idc_control
);
670 qla83xx_wr_reg(vha
, QLA83XX_IDC_DEV_STATE
,
671 QLA8XXX_DEV_NEED_RESET
);
672 qla83xx_idc_audit(vha
, IDC_AUDIT_TIMESTAMP
);
673 qla83xx_idc_unlock(vha
, 0);
676 /* Make sure FC side is not in reset */
677 qla2x00_wait_for_hba_online(vha
);
679 /* Issue MPI reset */
680 scsi_block_requests(vha
->host
);
681 if (qla81xx_restart_mpi_firmware(vha
) != QLA_SUCCESS
)
682 ql_log(ql_log_warn
, vha
, 0x7070,
683 "MPI reset failed.\n");
684 scsi_unblock_requests(vha
->host
);
688 if (!IS_P3P_TYPE(ha
) || vha
!= base_vha
) {
689 ql_log(ql_log_info
, vha
, 0x7071,
690 "FCoE ctx reset no supported.\n");
694 ql_log(ql_log_info
, vha
, 0x7072,
695 "Issuing FCoE ctx reset.\n");
696 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
697 qla2xxx_wake_dpc(vha
);
698 qla2x00_wait_for_fcoe_ctx_reset(vha
);
703 ql_log(ql_log_info
, vha
, 0x70bc,
704 "Disabling Reset by IDC control\n");
705 qla83xx_idc_lock(vha
, 0);
706 __qla83xx_get_idc_control(vha
, &idc_control
);
707 idc_control
|= QLA83XX_IDC_RESET_DISABLED
;
708 __qla83xx_set_idc_control(vha
, idc_control
);
709 qla83xx_idc_unlock(vha
, 0);
714 ql_log(ql_log_info
, vha
, 0x70bd,
715 "Enabling Reset by IDC control\n");
716 qla83xx_idc_lock(vha
, 0);
717 __qla83xx_get_idc_control(vha
, &idc_control
);
718 idc_control
&= ~QLA83XX_IDC_RESET_DISABLED
;
719 __qla83xx_set_idc_control(vha
, idc_control
);
720 qla83xx_idc_unlock(vha
, 0);
723 ql_dbg(ql_dbg_user
, vha
, 0x70e0,
724 "Updating cache versions without reset ");
726 tmp_data
= vmalloc(256);
728 ql_log(ql_log_warn
, vha
, 0x70e1,
729 "Unable to allocate memory for VPD information update.\n");
732 ha
->isp_ops
->get_flash_version(vha
, tmp_data
);
739 static struct bin_attribute sysfs_reset_attr
= {
745 .write
= qla2x00_sysfs_write_reset
,
749 qla2x00_issue_logo(struct file
*filp
, struct kobject
*kobj
,
750 struct bin_attribute
*bin_attr
,
751 char *buf
, loff_t off
, size_t count
)
753 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
754 struct device
, kobj
)));
759 type
= simple_strtol(buf
, NULL
, 10);
761 did
.b
.domain
= (type
& 0x00ff0000) >> 16;
762 did
.b
.area
= (type
& 0x0000ff00) >> 8;
763 did
.b
.al_pa
= (type
& 0x000000ff);
765 ql_log(ql_log_info
, vha
, 0x70e3, "portid=%02x%02x%02x done\n",
766 did
.b
.domain
, did
.b
.area
, did
.b
.al_pa
);
768 ql_log(ql_log_info
, vha
, 0x70e4, "%s: %d\n", __func__
, type
);
770 rval
= qla24xx_els_dcmd_iocb(vha
, ELS_DCMD_LOGO
, did
);
774 static struct bin_attribute sysfs_issue_logo_attr
= {
776 .name
= "issue_logo",
780 .write
= qla2x00_issue_logo
,
784 qla2x00_sysfs_read_xgmac_stats(struct file
*filp
, struct kobject
*kobj
,
785 struct bin_attribute
*bin_attr
,
786 char *buf
, loff_t off
, size_t count
)
788 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
789 struct device
, kobj
)));
790 struct qla_hw_data
*ha
= vha
->hw
;
792 uint16_t actual_size
;
794 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
> XGMAC_DATA_SIZE
)
800 ha
->xgmac_data
= dma_alloc_coherent(&ha
->pdev
->dev
, XGMAC_DATA_SIZE
,
801 &ha
->xgmac_data_dma
, GFP_KERNEL
);
802 if (!ha
->xgmac_data
) {
803 ql_log(ql_log_warn
, vha
, 0x7076,
804 "Unable to allocate memory for XGMAC read-data.\n");
810 memset(ha
->xgmac_data
, 0, XGMAC_DATA_SIZE
);
812 rval
= qla2x00_get_xgmac_stats(vha
, ha
->xgmac_data_dma
,
813 XGMAC_DATA_SIZE
, &actual_size
);
814 if (rval
!= QLA_SUCCESS
) {
815 ql_log(ql_log_warn
, vha
, 0x7077,
816 "Unable to read XGMAC data (%x).\n", rval
);
820 count
= actual_size
> count
? count
: actual_size
;
821 memcpy(buf
, ha
->xgmac_data
, count
);
826 static struct bin_attribute sysfs_xgmac_stats_attr
= {
828 .name
= "xgmac_stats",
832 .read
= qla2x00_sysfs_read_xgmac_stats
,
836 qla2x00_sysfs_read_dcbx_tlv(struct file
*filp
, struct kobject
*kobj
,
837 struct bin_attribute
*bin_attr
,
838 char *buf
, loff_t off
, size_t count
)
840 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
841 struct device
, kobj
)));
842 struct qla_hw_data
*ha
= vha
->hw
;
845 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
> DCBX_TLV_DATA_SIZE
)
851 ha
->dcbx_tlv
= dma_alloc_coherent(&ha
->pdev
->dev
, DCBX_TLV_DATA_SIZE
,
852 &ha
->dcbx_tlv_dma
, GFP_KERNEL
);
854 ql_log(ql_log_warn
, vha
, 0x7078,
855 "Unable to allocate memory for DCBX TLV read-data.\n");
860 memset(ha
->dcbx_tlv
, 0, DCBX_TLV_DATA_SIZE
);
862 rval
= qla2x00_get_dcbx_params(vha
, ha
->dcbx_tlv_dma
,
864 if (rval
!= QLA_SUCCESS
) {
865 ql_log(ql_log_warn
, vha
, 0x7079,
866 "Unable to read DCBX TLV (%x).\n", rval
);
870 memcpy(buf
, ha
->dcbx_tlv
, count
);
875 static struct bin_attribute sysfs_dcbx_tlv_attr
= {
881 .read
= qla2x00_sysfs_read_dcbx_tlv
,
884 static struct sysfs_entry
{
886 struct bin_attribute
*attr
;
888 } bin_file_entries
[] = {
889 { "fw_dump", &sysfs_fw_dump_attr
, },
890 { "nvram", &sysfs_nvram_attr
, },
891 { "optrom", &sysfs_optrom_attr
, },
892 { "optrom_ctl", &sysfs_optrom_ctl_attr
, },
893 { "vpd", &sysfs_vpd_attr
, 1 },
894 { "sfp", &sysfs_sfp_attr
, 1 },
895 { "reset", &sysfs_reset_attr
, },
896 { "issue_logo", &sysfs_issue_logo_attr
, },
897 { "xgmac_stats", &sysfs_xgmac_stats_attr
, 3 },
898 { "dcbx_tlv", &sysfs_dcbx_tlv_attr
, 3 },
903 qla2x00_alloc_sysfs_attr(scsi_qla_host_t
*vha
)
905 struct Scsi_Host
*host
= vha
->host
;
906 struct sysfs_entry
*iter
;
909 for (iter
= bin_file_entries
; iter
->name
; iter
++) {
910 if (iter
->is4GBp_only
&& !IS_FWI2_CAPABLE(vha
->hw
))
912 if (iter
->is4GBp_only
== 2 && !IS_QLA25XX(vha
->hw
))
914 if (iter
->is4GBp_only
== 3 && !(IS_CNA_CAPABLE(vha
->hw
)))
917 ret
= sysfs_create_bin_file(&host
->shost_gendev
.kobj
,
920 ql_log(ql_log_warn
, vha
, 0x00f3,
921 "Unable to create sysfs %s binary attribute (%d).\n",
924 ql_dbg(ql_dbg_init
, vha
, 0x00f4,
925 "Successfully created sysfs %s binary attribure.\n",
931 qla2x00_free_sysfs_attr(scsi_qla_host_t
*vha
, bool stop_beacon
)
933 struct Scsi_Host
*host
= vha
->host
;
934 struct sysfs_entry
*iter
;
935 struct qla_hw_data
*ha
= vha
->hw
;
937 for (iter
= bin_file_entries
; iter
->name
; iter
++) {
938 if (iter
->is4GBp_only
&& !IS_FWI2_CAPABLE(ha
))
940 if (iter
->is4GBp_only
== 2 && !IS_QLA25XX(ha
))
942 if (iter
->is4GBp_only
== 3 && !(IS_CNA_CAPABLE(vha
->hw
)))
944 if (iter
->is4GBp_only
== 0x27 && !IS_QLA27XX(vha
->hw
))
947 sysfs_remove_bin_file(&host
->shost_gendev
.kobj
,
951 if (stop_beacon
&& ha
->beacon_blink_led
== 1)
952 ha
->isp_ops
->beacon_off(vha
);
955 /* Scsi_Host attributes. */
958 qla2x00_drvr_version_show(struct device
*dev
,
959 struct device_attribute
*attr
, char *buf
)
961 return scnprintf(buf
, PAGE_SIZE
, "%s\n", qla2x00_version_str
);
965 qla2x00_fw_version_show(struct device
*dev
,
966 struct device_attribute
*attr
, char *buf
)
968 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
969 struct qla_hw_data
*ha
= vha
->hw
;
972 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
973 ha
->isp_ops
->fw_version_str(vha
, fw_str
, sizeof(fw_str
)));
977 qla2x00_serial_num_show(struct device
*dev
, struct device_attribute
*attr
,
980 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
981 struct qla_hw_data
*ha
= vha
->hw
;
984 if (IS_QLAFX00(vha
->hw
)) {
985 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
986 vha
->hw
->mr
.serial_num
);
987 } else if (IS_FWI2_CAPABLE(ha
)) {
988 qla2xxx_get_vpd_field(vha
, "SN", buf
, PAGE_SIZE
- 1);
989 return strlen(strcat(buf
, "\n"));
992 sn
= ((ha
->serial0
& 0x1f) << 16) | (ha
->serial2
<< 8) | ha
->serial1
;
993 return scnprintf(buf
, PAGE_SIZE
, "%c%05d\n", 'A' + sn
/ 100000,
998 qla2x00_isp_name_show(struct device
*dev
, struct device_attribute
*attr
,
1001 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1002 return scnprintf(buf
, PAGE_SIZE
, "ISP%04X\n", vha
->hw
->pdev
->device
);
1006 qla2x00_isp_id_show(struct device
*dev
, struct device_attribute
*attr
,
1009 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1010 struct qla_hw_data
*ha
= vha
->hw
;
1012 if (IS_QLAFX00(vha
->hw
))
1013 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
1014 vha
->hw
->mr
.hw_version
);
1016 return scnprintf(buf
, PAGE_SIZE
, "%04x %04x %04x %04x\n",
1017 ha
->product_id
[0], ha
->product_id
[1], ha
->product_id
[2],
1022 qla2x00_model_name_show(struct device
*dev
, struct device_attribute
*attr
,
1025 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1027 return scnprintf(buf
, PAGE_SIZE
, "%s\n", vha
->hw
->model_number
);
1031 qla2x00_model_desc_show(struct device
*dev
, struct device_attribute
*attr
,
1034 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1035 return scnprintf(buf
, PAGE_SIZE
, "%s\n", vha
->hw
->model_desc
);
1039 qla2x00_pci_info_show(struct device
*dev
, struct device_attribute
*attr
,
1042 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1045 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
1046 vha
->hw
->isp_ops
->pci_info_str(vha
, pci_info
));
1050 qla2x00_link_state_show(struct device
*dev
, struct device_attribute
*attr
,
1053 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1054 struct qla_hw_data
*ha
= vha
->hw
;
1057 if (atomic_read(&vha
->loop_state
) == LOOP_DOWN
||
1058 atomic_read(&vha
->loop_state
) == LOOP_DEAD
||
1059 vha
->device_flags
& DFLG_NO_CABLE
)
1060 len
= scnprintf(buf
, PAGE_SIZE
, "Link Down\n");
1061 else if (atomic_read(&vha
->loop_state
) != LOOP_READY
||
1062 qla2x00_reset_active(vha
))
1063 len
= scnprintf(buf
, PAGE_SIZE
, "Unknown Link State\n");
1065 len
= scnprintf(buf
, PAGE_SIZE
, "Link Up - ");
1067 switch (ha
->current_topology
) {
1069 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "Loop\n");
1072 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "FL_Port\n");
1075 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
,
1076 "N_Port to N_Port\n");
1079 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "F_Port\n");
1082 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "Loop\n");
1090 qla2x00_zio_show(struct device
*dev
, struct device_attribute
*attr
,
1093 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1096 switch (vha
->hw
->zio_mode
) {
1097 case QLA_ZIO_MODE_6
:
1098 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "Mode 6\n");
1100 case QLA_ZIO_DISABLED
:
1101 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "Disabled\n");
1108 qla2x00_zio_store(struct device
*dev
, struct device_attribute
*attr
,
1109 const char *buf
, size_t count
)
1111 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1112 struct qla_hw_data
*ha
= vha
->hw
;
1116 if (!IS_ZIO_SUPPORTED(ha
))
1119 if (sscanf(buf
, "%d", &val
) != 1)
1123 zio_mode
= QLA_ZIO_MODE_6
;
1125 zio_mode
= QLA_ZIO_DISABLED
;
1127 /* Update per-hba values and queue a reset. */
1128 if (zio_mode
!= QLA_ZIO_DISABLED
|| ha
->zio_mode
!= QLA_ZIO_DISABLED
) {
1129 ha
->zio_mode
= zio_mode
;
1130 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1136 qla2x00_zio_timer_show(struct device
*dev
, struct device_attribute
*attr
,
1139 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1141 return scnprintf(buf
, PAGE_SIZE
, "%d us\n", vha
->hw
->zio_timer
* 100);
1145 qla2x00_zio_timer_store(struct device
*dev
, struct device_attribute
*attr
,
1146 const char *buf
, size_t count
)
1148 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1152 if (sscanf(buf
, "%d", &val
) != 1)
1154 if (val
> 25500 || val
< 100)
1157 zio_timer
= (uint16_t)(val
/ 100);
1158 vha
->hw
->zio_timer
= zio_timer
;
1164 qla2x00_beacon_show(struct device
*dev
, struct device_attribute
*attr
,
1167 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1170 if (vha
->hw
->beacon_blink_led
)
1171 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "Enabled\n");
1173 len
+= scnprintf(buf
+ len
, PAGE_SIZE
-len
, "Disabled\n");
1178 qla2x00_beacon_store(struct device
*dev
, struct device_attribute
*attr
,
1179 const char *buf
, size_t count
)
1181 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1182 struct qla_hw_data
*ha
= vha
->hw
;
1186 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
1189 if (test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)) {
1190 ql_log(ql_log_warn
, vha
, 0x707a,
1191 "Abort ISP active -- ignoring beacon request.\n");
1195 if (sscanf(buf
, "%d", &val
) != 1)
1199 rval
= ha
->isp_ops
->beacon_on(vha
);
1201 rval
= ha
->isp_ops
->beacon_off(vha
);
1203 if (rval
!= QLA_SUCCESS
)
1210 qla2x00_optrom_bios_version_show(struct device
*dev
,
1211 struct device_attribute
*attr
, char *buf
)
1213 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1214 struct qla_hw_data
*ha
= vha
->hw
;
1215 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d\n", ha
->bios_revision
[1],
1216 ha
->bios_revision
[0]);
1220 qla2x00_optrom_efi_version_show(struct device
*dev
,
1221 struct device_attribute
*attr
, char *buf
)
1223 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1224 struct qla_hw_data
*ha
= vha
->hw
;
1225 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d\n", ha
->efi_revision
[1],
1226 ha
->efi_revision
[0]);
1230 qla2x00_optrom_fcode_version_show(struct device
*dev
,
1231 struct device_attribute
*attr
, char *buf
)
1233 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1234 struct qla_hw_data
*ha
= vha
->hw
;
1235 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d\n", ha
->fcode_revision
[1],
1236 ha
->fcode_revision
[0]);
1240 qla2x00_optrom_fw_version_show(struct device
*dev
,
1241 struct device_attribute
*attr
, char *buf
)
1243 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1244 struct qla_hw_data
*ha
= vha
->hw
;
1245 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d %d\n",
1246 ha
->fw_revision
[0], ha
->fw_revision
[1], ha
->fw_revision
[2],
1247 ha
->fw_revision
[3]);
1251 qla2x00_optrom_gold_fw_version_show(struct device
*dev
,
1252 struct device_attribute
*attr
, char *buf
)
1254 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1255 struct qla_hw_data
*ha
= vha
->hw
;
1257 if (!IS_QLA81XX(ha
) && !IS_QLA83XX(ha
) && !IS_QLA27XX(ha
))
1258 return scnprintf(buf
, PAGE_SIZE
, "\n");
1260 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d (%d)\n",
1261 ha
->gold_fw_version
[0], ha
->gold_fw_version
[1],
1262 ha
->gold_fw_version
[2], ha
->gold_fw_version
[3]);
1266 qla2x00_total_isp_aborts_show(struct device
*dev
,
1267 struct device_attribute
*attr
, char *buf
)
1269 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1270 return scnprintf(buf
, PAGE_SIZE
, "%d\n",
1271 vha
->qla_stats
.total_isp_aborts
);
1275 qla24xx_84xx_fw_version_show(struct device
*dev
,
1276 struct device_attribute
*attr
, char *buf
)
1278 int rval
= QLA_SUCCESS
;
1279 uint16_t status
[2] = {0, 0};
1280 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1281 struct qla_hw_data
*ha
= vha
->hw
;
1283 if (!IS_QLA84XX(ha
))
1284 return scnprintf(buf
, PAGE_SIZE
, "\n");
1286 if (ha
->cs84xx
->op_fw_version
== 0)
1287 rval
= qla84xx_verify_chip(vha
, status
);
1289 if ((rval
== QLA_SUCCESS
) && (status
[0] == 0))
1290 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
1291 (uint32_t)ha
->cs84xx
->op_fw_version
);
1293 return scnprintf(buf
, PAGE_SIZE
, "\n");
1297 qla2x00_mpi_version_show(struct device
*dev
, struct device_attribute
*attr
,
1300 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1301 struct qla_hw_data
*ha
= vha
->hw
;
1303 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
) && !IS_QLA8044(ha
) &&
1305 return scnprintf(buf
, PAGE_SIZE
, "\n");
1307 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d (%x)\n",
1308 ha
->mpi_version
[0], ha
->mpi_version
[1], ha
->mpi_version
[2],
1309 ha
->mpi_capabilities
);
1313 qla2x00_phy_version_show(struct device
*dev
, struct device_attribute
*attr
,
1316 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1317 struct qla_hw_data
*ha
= vha
->hw
;
1319 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
))
1320 return scnprintf(buf
, PAGE_SIZE
, "\n");
1322 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d\n",
1323 ha
->phy_version
[0], ha
->phy_version
[1], ha
->phy_version
[2]);
1327 qla2x00_flash_block_size_show(struct device
*dev
,
1328 struct device_attribute
*attr
, char *buf
)
1330 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1331 struct qla_hw_data
*ha
= vha
->hw
;
1333 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", ha
->fdt_block_size
);
1337 qla2x00_vlan_id_show(struct device
*dev
, struct device_attribute
*attr
,
1340 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1342 if (!IS_CNA_CAPABLE(vha
->hw
))
1343 return scnprintf(buf
, PAGE_SIZE
, "\n");
1345 return scnprintf(buf
, PAGE_SIZE
, "%d\n", vha
->fcoe_vlan_id
);
1349 qla2x00_vn_port_mac_address_show(struct device
*dev
,
1350 struct device_attribute
*attr
, char *buf
)
1352 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1354 if (!IS_CNA_CAPABLE(vha
->hw
))
1355 return scnprintf(buf
, PAGE_SIZE
, "\n");
1357 return scnprintf(buf
, PAGE_SIZE
, "%pMR\n", vha
->fcoe_vn_port_mac
);
1361 qla2x00_fabric_param_show(struct device
*dev
, struct device_attribute
*attr
,
1364 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1366 return scnprintf(buf
, PAGE_SIZE
, "%d\n", vha
->hw
->switch_cap
);
1370 qla2x00_thermal_temp_show(struct device
*dev
,
1371 struct device_attribute
*attr
, char *buf
)
1373 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1376 if (qla2x00_reset_active(vha
)) {
1377 ql_log(ql_log_warn
, vha
, 0x70dc, "ISP reset active.\n");
1381 if (vha
->hw
->flags
.eeh_busy
) {
1382 ql_log(ql_log_warn
, vha
, 0x70dd, "PCI EEH busy.\n");
1386 if (qla2x00_get_thermal_temp(vha
, &temp
) == QLA_SUCCESS
)
1387 return scnprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
1390 return scnprintf(buf
, PAGE_SIZE
, "\n");
1394 qla2x00_fw_state_show(struct device
*dev
, struct device_attribute
*attr
,
1397 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1398 int rval
= QLA_FUNCTION_FAILED
;
1402 if (IS_QLAFX00(vha
->hw
)) {
1403 pstate
= qlafx00_fw_state_show(dev
, attr
, buf
);
1404 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", pstate
);
1407 if (qla2x00_reset_active(vha
))
1408 ql_log(ql_log_warn
, vha
, 0x707c,
1409 "ISP reset active.\n");
1410 else if (!vha
->hw
->flags
.eeh_busy
)
1411 rval
= qla2x00_get_firmware_state(vha
, state
);
1412 if (rval
!= QLA_SUCCESS
)
1413 memset(state
, -1, sizeof(state
));
1415 return scnprintf(buf
, PAGE_SIZE
, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1416 state
[0], state
[1], state
[2], state
[3], state
[4], state
[5]);
1420 qla2x00_diag_requests_show(struct device
*dev
,
1421 struct device_attribute
*attr
, char *buf
)
1423 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1425 if (!IS_BIDI_CAPABLE(vha
->hw
))
1426 return scnprintf(buf
, PAGE_SIZE
, "\n");
1428 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", vha
->bidi_stats
.io_count
);
1432 qla2x00_diag_megabytes_show(struct device
*dev
,
1433 struct device_attribute
*attr
, char *buf
)
1435 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1437 if (!IS_BIDI_CAPABLE(vha
->hw
))
1438 return scnprintf(buf
, PAGE_SIZE
, "\n");
1440 return scnprintf(buf
, PAGE_SIZE
, "%llu\n",
1441 vha
->bidi_stats
.transfer_bytes
>> 20);
1445 qla2x00_fw_dump_size_show(struct device
*dev
, struct device_attribute
*attr
,
1448 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1449 struct qla_hw_data
*ha
= vha
->hw
;
1454 else if (IS_P3P_TYPE(ha
))
1455 size
= ha
->md_template_size
+ ha
->md_dump_size
;
1457 size
= ha
->fw_dump_len
;
1459 return scnprintf(buf
, PAGE_SIZE
, "%d\n", size
);
1463 qla2x00_allow_cna_fw_dump_show(struct device
*dev
,
1464 struct device_attribute
*attr
, char *buf
)
1466 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1468 if (!IS_P3P_TYPE(vha
->hw
))
1469 return scnprintf(buf
, PAGE_SIZE
, "\n");
1471 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
1472 vha
->hw
->allow_cna_fw_dump
? "true" : "false");
1476 qla2x00_allow_cna_fw_dump_store(struct device
*dev
,
1477 struct device_attribute
*attr
, const char *buf
, size_t count
)
1479 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1482 if (!IS_P3P_TYPE(vha
->hw
))
1485 if (sscanf(buf
, "%d", &val
) != 1)
1488 vha
->hw
->allow_cna_fw_dump
= val
!= 0;
1494 qla2x00_pep_version_show(struct device
*dev
, struct device_attribute
*attr
,
1497 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1498 struct qla_hw_data
*ha
= vha
->hw
;
1500 if (!IS_QLA27XX(ha
))
1501 return scnprintf(buf
, PAGE_SIZE
, "\n");
1503 return scnprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d\n",
1504 ha
->pep_version
[0], ha
->pep_version
[1], ha
->pep_version
[2]);
1507 static DEVICE_ATTR(driver_version
, S_IRUGO
, qla2x00_drvr_version_show
, NULL
);
1508 static DEVICE_ATTR(fw_version
, S_IRUGO
, qla2x00_fw_version_show
, NULL
);
1509 static DEVICE_ATTR(serial_num
, S_IRUGO
, qla2x00_serial_num_show
, NULL
);
1510 static DEVICE_ATTR(isp_name
, S_IRUGO
, qla2x00_isp_name_show
, NULL
);
1511 static DEVICE_ATTR(isp_id
, S_IRUGO
, qla2x00_isp_id_show
, NULL
);
1512 static DEVICE_ATTR(model_name
, S_IRUGO
, qla2x00_model_name_show
, NULL
);
1513 static DEVICE_ATTR(model_desc
, S_IRUGO
, qla2x00_model_desc_show
, NULL
);
1514 static DEVICE_ATTR(pci_info
, S_IRUGO
, qla2x00_pci_info_show
, NULL
);
1515 static DEVICE_ATTR(link_state
, S_IRUGO
, qla2x00_link_state_show
, NULL
);
1516 static DEVICE_ATTR(zio
, S_IRUGO
| S_IWUSR
, qla2x00_zio_show
, qla2x00_zio_store
);
1517 static DEVICE_ATTR(zio_timer
, S_IRUGO
| S_IWUSR
, qla2x00_zio_timer_show
,
1518 qla2x00_zio_timer_store
);
1519 static DEVICE_ATTR(beacon
, S_IRUGO
| S_IWUSR
, qla2x00_beacon_show
,
1520 qla2x00_beacon_store
);
1521 static DEVICE_ATTR(optrom_bios_version
, S_IRUGO
,
1522 qla2x00_optrom_bios_version_show
, NULL
);
1523 static DEVICE_ATTR(optrom_efi_version
, S_IRUGO
,
1524 qla2x00_optrom_efi_version_show
, NULL
);
1525 static DEVICE_ATTR(optrom_fcode_version
, S_IRUGO
,
1526 qla2x00_optrom_fcode_version_show
, NULL
);
1527 static DEVICE_ATTR(optrom_fw_version
, S_IRUGO
, qla2x00_optrom_fw_version_show
,
1529 static DEVICE_ATTR(optrom_gold_fw_version
, S_IRUGO
,
1530 qla2x00_optrom_gold_fw_version_show
, NULL
);
1531 static DEVICE_ATTR(84xx_fw_version
, S_IRUGO
, qla24xx_84xx_fw_version_show
,
1533 static DEVICE_ATTR(total_isp_aborts
, S_IRUGO
, qla2x00_total_isp_aborts_show
,
1535 static DEVICE_ATTR(mpi_version
, S_IRUGO
, qla2x00_mpi_version_show
, NULL
);
1536 static DEVICE_ATTR(phy_version
, S_IRUGO
, qla2x00_phy_version_show
, NULL
);
1537 static DEVICE_ATTR(flash_block_size
, S_IRUGO
, qla2x00_flash_block_size_show
,
1539 static DEVICE_ATTR(vlan_id
, S_IRUGO
, qla2x00_vlan_id_show
, NULL
);
1540 static DEVICE_ATTR(vn_port_mac_address
, S_IRUGO
,
1541 qla2x00_vn_port_mac_address_show
, NULL
);
1542 static DEVICE_ATTR(fabric_param
, S_IRUGO
, qla2x00_fabric_param_show
, NULL
);
1543 static DEVICE_ATTR(fw_state
, S_IRUGO
, qla2x00_fw_state_show
, NULL
);
1544 static DEVICE_ATTR(thermal_temp
, S_IRUGO
, qla2x00_thermal_temp_show
, NULL
);
1545 static DEVICE_ATTR(diag_requests
, S_IRUGO
, qla2x00_diag_requests_show
, NULL
);
1546 static DEVICE_ATTR(diag_megabytes
, S_IRUGO
, qla2x00_diag_megabytes_show
, NULL
);
1547 static DEVICE_ATTR(fw_dump_size
, S_IRUGO
, qla2x00_fw_dump_size_show
, NULL
);
1548 static DEVICE_ATTR(allow_cna_fw_dump
, S_IRUGO
| S_IWUSR
,
1549 qla2x00_allow_cna_fw_dump_show
,
1550 qla2x00_allow_cna_fw_dump_store
);
1551 static DEVICE_ATTR(pep_version
, S_IRUGO
, qla2x00_pep_version_show
, NULL
);
1553 struct device_attribute
*qla2x00_host_attrs
[] = {
1554 &dev_attr_driver_version
,
1555 &dev_attr_fw_version
,
1556 &dev_attr_serial_num
,
1559 &dev_attr_model_name
,
1560 &dev_attr_model_desc
,
1562 &dev_attr_link_state
,
1564 &dev_attr_zio_timer
,
1566 &dev_attr_optrom_bios_version
,
1567 &dev_attr_optrom_efi_version
,
1568 &dev_attr_optrom_fcode_version
,
1569 &dev_attr_optrom_fw_version
,
1570 &dev_attr_84xx_fw_version
,
1571 &dev_attr_total_isp_aborts
,
1572 &dev_attr_mpi_version
,
1573 &dev_attr_phy_version
,
1574 &dev_attr_flash_block_size
,
1576 &dev_attr_vn_port_mac_address
,
1577 &dev_attr_fabric_param
,
1579 &dev_attr_optrom_gold_fw_version
,
1580 &dev_attr_thermal_temp
,
1581 &dev_attr_diag_requests
,
1582 &dev_attr_diag_megabytes
,
1583 &dev_attr_fw_dump_size
,
1584 &dev_attr_allow_cna_fw_dump
,
1585 &dev_attr_pep_version
,
1589 /* Host attributes. */
1592 qla2x00_get_host_port_id(struct Scsi_Host
*shost
)
1594 scsi_qla_host_t
*vha
= shost_priv(shost
);
1596 fc_host_port_id(shost
) = vha
->d_id
.b
.domain
<< 16 |
1597 vha
->d_id
.b
.area
<< 8 | vha
->d_id
.b
.al_pa
;
1601 qla2x00_get_host_speed(struct Scsi_Host
*shost
)
1603 struct qla_hw_data
*ha
= ((struct scsi_qla_host
*)
1604 (shost_priv(shost
)))->hw
;
1605 u32 speed
= FC_PORTSPEED_UNKNOWN
;
1607 if (IS_QLAFX00(ha
)) {
1608 qlafx00_get_host_speed(shost
);
1612 switch (ha
->link_data_rate
) {
1613 case PORT_SPEED_1GB
:
1614 speed
= FC_PORTSPEED_1GBIT
;
1616 case PORT_SPEED_2GB
:
1617 speed
= FC_PORTSPEED_2GBIT
;
1619 case PORT_SPEED_4GB
:
1620 speed
= FC_PORTSPEED_4GBIT
;
1622 case PORT_SPEED_8GB
:
1623 speed
= FC_PORTSPEED_8GBIT
;
1625 case PORT_SPEED_10GB
:
1626 speed
= FC_PORTSPEED_10GBIT
;
1628 case PORT_SPEED_16GB
:
1629 speed
= FC_PORTSPEED_16GBIT
;
1631 case PORT_SPEED_32GB
:
1632 speed
= FC_PORTSPEED_32GBIT
;
1635 fc_host_speed(shost
) = speed
;
1639 qla2x00_get_host_port_type(struct Scsi_Host
*shost
)
1641 scsi_qla_host_t
*vha
= shost_priv(shost
);
1642 uint32_t port_type
= FC_PORTTYPE_UNKNOWN
;
1645 fc_host_port_type(shost
) = FC_PORTTYPE_NPIV
;
1648 switch (vha
->hw
->current_topology
) {
1650 port_type
= FC_PORTTYPE_LPORT
;
1653 port_type
= FC_PORTTYPE_NLPORT
;
1656 port_type
= FC_PORTTYPE_PTP
;
1659 port_type
= FC_PORTTYPE_NPORT
;
1662 fc_host_port_type(shost
) = port_type
;
1666 qla2x00_get_starget_node_name(struct scsi_target
*starget
)
1668 struct Scsi_Host
*host
= dev_to_shost(starget
->dev
.parent
);
1669 scsi_qla_host_t
*vha
= shost_priv(host
);
1673 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1674 if (fcport
->rport
&&
1675 starget
->id
== fcport
->rport
->scsi_target_id
) {
1676 node_name
= wwn_to_u64(fcport
->node_name
);
1681 fc_starget_node_name(starget
) = node_name
;
1685 qla2x00_get_starget_port_name(struct scsi_target
*starget
)
1687 struct Scsi_Host
*host
= dev_to_shost(starget
->dev
.parent
);
1688 scsi_qla_host_t
*vha
= shost_priv(host
);
1692 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1693 if (fcport
->rport
&&
1694 starget
->id
== fcport
->rport
->scsi_target_id
) {
1695 port_name
= wwn_to_u64(fcport
->port_name
);
1700 fc_starget_port_name(starget
) = port_name
;
1704 qla2x00_get_starget_port_id(struct scsi_target
*starget
)
1706 struct Scsi_Host
*host
= dev_to_shost(starget
->dev
.parent
);
1707 scsi_qla_host_t
*vha
= shost_priv(host
);
1709 uint32_t port_id
= ~0U;
1711 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1712 if (fcport
->rport
&&
1713 starget
->id
== fcport
->rport
->scsi_target_id
) {
1714 port_id
= fcport
->d_id
.b
.domain
<< 16 |
1715 fcport
->d_id
.b
.area
<< 8 | fcport
->d_id
.b
.al_pa
;
1720 fc_starget_port_id(starget
) = port_id
;
1724 qla2x00_set_rport_loss_tmo(struct fc_rport
*rport
, uint32_t timeout
)
1727 rport
->dev_loss_tmo
= timeout
;
1729 rport
->dev_loss_tmo
= 1;
1733 qla2x00_dev_loss_tmo_callbk(struct fc_rport
*rport
)
1735 struct Scsi_Host
*host
= rport_to_shost(rport
);
1736 fc_port_t
*fcport
= *(fc_port_t
**)rport
->dd_data
;
1737 unsigned long flags
;
1742 /* Now that the rport has been deleted, set the fcport state to
1744 qla2x00_set_fcport_state(fcport
, FCS_DEVICE_DEAD
);
1747 * Transport has effectively 'deleted' the rport, clear
1748 * all local references.
1750 spin_lock_irqsave(host
->host_lock
, flags
);
1751 fcport
->rport
= fcport
->drport
= NULL
;
1752 *((fc_port_t
**)rport
->dd_data
) = NULL
;
1753 spin_unlock_irqrestore(host
->host_lock
, flags
);
1755 if (test_bit(ABORT_ISP_ACTIVE
, &fcport
->vha
->dpc_flags
))
1758 if (unlikely(pci_channel_offline(fcport
->vha
->hw
->pdev
))) {
1759 qla2x00_abort_all_cmds(fcport
->vha
, DID_NO_CONNECT
<< 16);
1765 qla2x00_terminate_rport_io(struct fc_rport
*rport
)
1767 fc_port_t
*fcport
= *(fc_port_t
**)rport
->dd_data
;
1772 if (test_bit(UNLOADING
, &fcport
->vha
->dpc_flags
))
1775 if (test_bit(ABORT_ISP_ACTIVE
, &fcport
->vha
->dpc_flags
))
1778 if (unlikely(pci_channel_offline(fcport
->vha
->hw
->pdev
))) {
1779 qla2x00_abort_all_cmds(fcport
->vha
, DID_NO_CONNECT
<< 16);
1783 * At this point all fcport's software-states are cleared. Perform any
1784 * final cleanup of firmware resources (PCBs and XCBs).
1786 if (fcport
->loop_id
!= FC_NO_LOOP_ID
) {
1787 if (IS_FWI2_CAPABLE(fcport
->vha
->hw
))
1788 fcport
->vha
->hw
->isp_ops
->fabric_logout(fcport
->vha
,
1789 fcport
->loop_id
, fcport
->d_id
.b
.domain
,
1790 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
1792 qla2x00_port_logout(fcport
->vha
, fcport
);
1797 qla2x00_issue_lip(struct Scsi_Host
*shost
)
1799 scsi_qla_host_t
*vha
= shost_priv(shost
);
1801 if (IS_QLAFX00(vha
->hw
))
1804 qla2x00_loop_reset(vha
);
1808 static struct fc_host_statistics
*
1809 qla2x00_get_fc_host_stats(struct Scsi_Host
*shost
)
1811 scsi_qla_host_t
*vha
= shost_priv(shost
);
1812 struct qla_hw_data
*ha
= vha
->hw
;
1813 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
1815 struct link_statistics
*stats
;
1816 dma_addr_t stats_dma
;
1817 struct fc_host_statistics
*p
= &vha
->fc_host_stat
;
1819 memset(p
, -1, sizeof(*p
));
1821 if (IS_QLAFX00(vha
->hw
))
1824 if (test_bit(UNLOADING
, &vha
->dpc_flags
))
1827 if (unlikely(pci_channel_offline(ha
->pdev
)))
1830 if (qla2x00_reset_active(vha
))
1833 stats
= dma_alloc_coherent(&ha
->pdev
->dev
,
1834 sizeof(*stats
), &stats_dma
, GFP_KERNEL
);
1836 ql_log(ql_log_warn
, vha
, 0x707d,
1837 "Failed to allocate memory for stats.\n");
1840 memset(stats
, 0, sizeof(*stats
));
1842 rval
= QLA_FUNCTION_FAILED
;
1843 if (IS_FWI2_CAPABLE(ha
)) {
1844 rval
= qla24xx_get_isp_stats(base_vha
, stats
, stats_dma
, 0);
1845 } else if (atomic_read(&base_vha
->loop_state
) == LOOP_READY
&&
1847 /* Must be in a 'READY' state for statistics retrieval. */
1848 rval
= qla2x00_get_link_status(base_vha
, base_vha
->loop_id
,
1852 if (rval
!= QLA_SUCCESS
)
1855 p
->link_failure_count
= stats
->link_fail_cnt
;
1856 p
->loss_of_sync_count
= stats
->loss_sync_cnt
;
1857 p
->loss_of_signal_count
= stats
->loss_sig_cnt
;
1858 p
->prim_seq_protocol_err_count
= stats
->prim_seq_err_cnt
;
1859 p
->invalid_tx_word_count
= stats
->inval_xmit_word_cnt
;
1860 p
->invalid_crc_count
= stats
->inval_crc_cnt
;
1861 if (IS_FWI2_CAPABLE(ha
)) {
1862 p
->lip_count
= stats
->lip_cnt
;
1863 p
->tx_frames
= stats
->tx_frames
;
1864 p
->rx_frames
= stats
->rx_frames
;
1865 p
->dumped_frames
= stats
->discarded_frames
;
1866 p
->nos_count
= stats
->nos_rcvd
;
1868 stats
->dropped_frames
+ stats
->discarded_frames
;
1869 p
->rx_words
= vha
->qla_stats
.input_bytes
;
1870 p
->tx_words
= vha
->qla_stats
.output_bytes
;
1872 p
->fcp_control_requests
= vha
->qla_stats
.control_requests
;
1873 p
->fcp_input_requests
= vha
->qla_stats
.input_requests
;
1874 p
->fcp_output_requests
= vha
->qla_stats
.output_requests
;
1875 p
->fcp_input_megabytes
= vha
->qla_stats
.input_bytes
>> 20;
1876 p
->fcp_output_megabytes
= vha
->qla_stats
.output_bytes
>> 20;
1877 p
->seconds_since_last_reset
=
1878 get_jiffies_64() - vha
->qla_stats
.jiffies_at_last_reset
;
1879 do_div(p
->seconds_since_last_reset
, HZ
);
1882 dma_free_coherent(&ha
->pdev
->dev
, sizeof(struct link_statistics
),
1889 qla2x00_reset_host_stats(struct Scsi_Host
*shost
)
1891 scsi_qla_host_t
*vha
= shost_priv(shost
);
1892 struct qla_hw_data
*ha
= vha
->hw
;
1893 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
1894 struct link_statistics
*stats
;
1895 dma_addr_t stats_dma
;
1897 memset(&vha
->qla_stats
, 0, sizeof(vha
->qla_stats
));
1898 memset(&vha
->fc_host_stat
, 0, sizeof(vha
->fc_host_stat
));
1900 vha
->qla_stats
.jiffies_at_last_reset
= get_jiffies_64();
1902 if (IS_FWI2_CAPABLE(ha
)) {
1903 stats
= dma_alloc_coherent(&ha
->pdev
->dev
,
1904 sizeof(*stats
), &stats_dma
, GFP_KERNEL
);
1906 ql_log(ql_log_warn
, vha
, 0x70d7,
1907 "Failed to allocate memory for stats.\n");
1911 /* reset firmware statistics */
1912 qla24xx_get_isp_stats(base_vha
, stats
, stats_dma
, BIT_0
);
1914 dma_free_coherent(&ha
->pdev
->dev
, sizeof(*stats
),
1920 qla2x00_get_host_symbolic_name(struct Scsi_Host
*shost
)
1922 scsi_qla_host_t
*vha
= shost_priv(shost
);
1924 qla2x00_get_sym_node_name(vha
, fc_host_symbolic_name(shost
),
1925 sizeof(fc_host_symbolic_name(shost
)));
1929 qla2x00_set_host_system_hostname(struct Scsi_Host
*shost
)
1931 scsi_qla_host_t
*vha
= shost_priv(shost
);
1933 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
1937 qla2x00_get_host_fabric_name(struct Scsi_Host
*shost
)
1939 scsi_qla_host_t
*vha
= shost_priv(shost
);
1940 uint8_t node_name
[WWN_SIZE
] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1941 0xFF, 0xFF, 0xFF, 0xFF};
1942 u64 fabric_name
= wwn_to_u64(node_name
);
1944 if (vha
->device_flags
& SWITCH_FOUND
)
1945 fabric_name
= wwn_to_u64(vha
->fabric_node_name
);
1947 fc_host_fabric_name(shost
) = fabric_name
;
1951 qla2x00_get_host_port_state(struct Scsi_Host
*shost
)
1953 scsi_qla_host_t
*vha
= shost_priv(shost
);
1954 struct scsi_qla_host
*base_vha
= pci_get_drvdata(vha
->hw
->pdev
);
1956 if (!base_vha
->flags
.online
) {
1957 fc_host_port_state(shost
) = FC_PORTSTATE_OFFLINE
;
1961 switch (atomic_read(&base_vha
->loop_state
)) {
1963 fc_host_port_state(shost
) = FC_PORTSTATE_DIAGNOSTICS
;
1966 if (test_bit(LOOP_RESYNC_NEEDED
, &base_vha
->dpc_flags
))
1967 fc_host_port_state(shost
) = FC_PORTSTATE_DIAGNOSTICS
;
1969 fc_host_port_state(shost
) = FC_PORTSTATE_LINKDOWN
;
1972 fc_host_port_state(shost
) = FC_PORTSTATE_LINKDOWN
;
1975 fc_host_port_state(shost
) = FC_PORTSTATE_ONLINE
;
1978 fc_host_port_state(shost
) = FC_PORTSTATE_UNKNOWN
;
1984 qla24xx_vport_create(struct fc_vport
*fc_vport
, bool disable
)
1988 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
1989 scsi_qla_host_t
*vha
= NULL
;
1990 struct qla_hw_data
*ha
= base_vha
->hw
;
1991 uint16_t options
= 0;
1993 struct req_que
*req
= ha
->req_q_map
[0];
1995 ret
= qla24xx_vport_create_req_sanity_check(fc_vport
);
1997 ql_log(ql_log_warn
, vha
, 0x707e,
1998 "Vport sanity check failed, status %x\n", ret
);
2002 vha
= qla24xx_create_vhost(fc_vport
);
2004 ql_log(ql_log_warn
, vha
, 0x707f, "Vport create host failed.\n");
2005 return FC_VPORT_FAILED
;
2008 atomic_set(&vha
->vp_state
, VP_OFFLINE
);
2009 fc_vport_set_state(fc_vport
, FC_VPORT_DISABLED
);
2011 atomic_set(&vha
->vp_state
, VP_FAILED
);
2013 /* ready to create vport */
2014 ql_log(ql_log_info
, vha
, 0x7080,
2015 "VP entry id %d assigned.\n", vha
->vp_idx
);
2017 /* initialized vport states */
2018 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
2019 vha
->vp_err_state
= VP_ERR_PORTDWN
;
2020 vha
->vp_prev_err_state
= VP_ERR_UNKWN
;
2021 /* Check if physical ha port is Up */
2022 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
2023 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
) {
2024 /* Don't retry or attempt login of this virtual port */
2025 ql_dbg(ql_dbg_user
, vha
, 0x7081,
2026 "Vport loop state is not UP.\n");
2027 atomic_set(&vha
->loop_state
, LOOP_DEAD
);
2029 fc_vport_set_state(fc_vport
, FC_VPORT_LINKDOWN
);
2032 if (IS_T10_PI_CAPABLE(ha
) && ql2xenabledif
) {
2033 if (ha
->fw_attributes
& BIT_4
) {
2034 int prot
= 0, guard
;
2035 vha
->flags
.difdix_supported
= 1;
2036 ql_dbg(ql_dbg_user
, vha
, 0x7082,
2037 "Registered for DIF/DIX type 1 and 3 protection.\n");
2038 if (ql2xenabledif
== 1)
2039 prot
= SHOST_DIX_TYPE0_PROTECTION
;
2040 scsi_host_set_prot(vha
->host
,
2041 prot
| SHOST_DIF_TYPE1_PROTECTION
2042 | SHOST_DIF_TYPE2_PROTECTION
2043 | SHOST_DIF_TYPE3_PROTECTION
2044 | SHOST_DIX_TYPE1_PROTECTION
2045 | SHOST_DIX_TYPE2_PROTECTION
2046 | SHOST_DIX_TYPE3_PROTECTION
);
2048 guard
= SHOST_DIX_GUARD_CRC
;
2050 if (IS_PI_IPGUARD_CAPABLE(ha
) &&
2051 (ql2xenabledif
> 1 || IS_PI_DIFB_DIX0_CAPABLE(ha
)))
2052 guard
|= SHOST_DIX_GUARD_IP
;
2054 scsi_host_set_guard(vha
->host
, guard
);
2056 vha
->flags
.difdix_supported
= 0;
2059 if (scsi_add_host_with_dma(vha
->host
, &fc_vport
->dev
,
2061 ql_dbg(ql_dbg_user
, vha
, 0x7083,
2062 "scsi_add_host failure for VP[%d].\n", vha
->vp_idx
);
2063 goto vport_create_failed_2
;
2066 /* initialize attributes */
2067 fc_host_dev_loss_tmo(vha
->host
) = ha
->port_down_retry_count
;
2068 fc_host_node_name(vha
->host
) = wwn_to_u64(vha
->node_name
);
2069 fc_host_port_name(vha
->host
) = wwn_to_u64(vha
->port_name
);
2070 fc_host_supported_classes(vha
->host
) =
2071 fc_host_supported_classes(base_vha
->host
);
2072 fc_host_supported_speeds(vha
->host
) =
2073 fc_host_supported_speeds(base_vha
->host
);
2075 qlt_vport_create(vha
, ha
);
2076 qla24xx_vport_disable(fc_vport
, disable
);
2078 if (ha
->flags
.cpu_affinity_enabled
) {
2079 req
= ha
->req_q_map
[1];
2080 ql_dbg(ql_dbg_multiq
, vha
, 0xc000,
2081 "Request queue %p attached with "
2082 "VP[%d], cpu affinity =%d\n",
2083 req
, vha
->vp_idx
, ha
->flags
.cpu_affinity_enabled
);
2085 } else if (ql2xmaxqueues
== 1 || !ha
->npiv_info
)
2087 /* Create a request queue in QoS mode for the vport */
2088 for (cnt
= 0; cnt
< ha
->nvram_npiv_size
; cnt
++) {
2089 if (memcmp(ha
->npiv_info
[cnt
].port_name
, vha
->port_name
, 8) == 0
2090 && memcmp(ha
->npiv_info
[cnt
].node_name
, vha
->node_name
,
2092 qos
= ha
->npiv_info
[cnt
].q_qos
;
2098 ret
= qla25xx_create_req_que(ha
, options
, vha
->vp_idx
, 0, 0,
2101 ql_log(ql_log_warn
, vha
, 0x7084,
2102 "Can't create request queue for VP[%d]\n",
2105 ql_dbg(ql_dbg_multiq
, vha
, 0xc001,
2106 "Request Que:%d Q0s: %d) created for VP[%d]\n",
2107 ret
, qos
, vha
->vp_idx
);
2108 ql_dbg(ql_dbg_user
, vha
, 0x7085,
2109 "Request Que:%d Q0s: %d) created for VP[%d]\n",
2110 ret
, qos
, vha
->vp_idx
);
2111 req
= ha
->req_q_map
[ret
];
2119 vport_create_failed_2
:
2120 qla24xx_disable_vp(vha
);
2121 qla24xx_deallocate_vp_id(vha
);
2122 scsi_host_put(vha
->host
);
2123 return FC_VPORT_FAILED
;
2127 qla24xx_vport_delete(struct fc_vport
*fc_vport
)
2129 scsi_qla_host_t
*vha
= fc_vport
->dd_data
;
2130 struct qla_hw_data
*ha
= vha
->hw
;
2131 uint16_t id
= vha
->vp_idx
;
2133 while (test_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
) ||
2134 test_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
))
2137 qla24xx_disable_vp(vha
);
2139 vha
->flags
.delete_progress
= 1;
2141 qlt_remove_target(ha
, vha
);
2143 fc_remove_host(vha
->host
);
2145 scsi_remove_host(vha
->host
);
2147 /* Allow timer to run to drain queued items, when removing vp */
2148 qla24xx_deallocate_vp_id(vha
);
2150 if (vha
->timer_active
) {
2151 qla2x00_vp_stop_timer(vha
);
2152 ql_dbg(ql_dbg_user
, vha
, 0x7086,
2153 "Timer for the VP[%d] has stopped\n", vha
->vp_idx
);
2156 BUG_ON(atomic_read(&vha
->vref_count
));
2158 qla2x00_free_fcports(vha
);
2160 mutex_lock(&ha
->vport_lock
);
2161 ha
->cur_vport_count
--;
2162 clear_bit(vha
->vp_idx
, ha
->vp_idx_map
);
2163 mutex_unlock(&ha
->vport_lock
);
2165 if (vha
->req
->id
&& !ha
->flags
.cpu_affinity_enabled
) {
2166 if (qla25xx_delete_req_que(vha
, vha
->req
) != QLA_SUCCESS
)
2167 ql_log(ql_log_warn
, vha
, 0x7087,
2168 "Queue delete failed.\n");
2171 ql_log(ql_log_info
, vha
, 0x7088, "VP[%d] deleted.\n", id
);
2172 scsi_host_put(vha
->host
);
2177 qla24xx_vport_disable(struct fc_vport
*fc_vport
, bool disable
)
2179 scsi_qla_host_t
*vha
= fc_vport
->dd_data
;
2182 qla24xx_disable_vp(vha
);
2184 qla24xx_enable_vp(vha
);
2189 struct fc_function_template qla2xxx_transport_functions
= {
2191 .show_host_node_name
= 1,
2192 .show_host_port_name
= 1,
2193 .show_host_supported_classes
= 1,
2194 .show_host_supported_speeds
= 1,
2196 .get_host_port_id
= qla2x00_get_host_port_id
,
2197 .show_host_port_id
= 1,
2198 .get_host_speed
= qla2x00_get_host_speed
,
2199 .show_host_speed
= 1,
2200 .get_host_port_type
= qla2x00_get_host_port_type
,
2201 .show_host_port_type
= 1,
2202 .get_host_symbolic_name
= qla2x00_get_host_symbolic_name
,
2203 .show_host_symbolic_name
= 1,
2204 .set_host_system_hostname
= qla2x00_set_host_system_hostname
,
2205 .show_host_system_hostname
= 1,
2206 .get_host_fabric_name
= qla2x00_get_host_fabric_name
,
2207 .show_host_fabric_name
= 1,
2208 .get_host_port_state
= qla2x00_get_host_port_state
,
2209 .show_host_port_state
= 1,
2211 .dd_fcrport_size
= sizeof(struct fc_port
*),
2212 .show_rport_supported_classes
= 1,
2214 .get_starget_node_name
= qla2x00_get_starget_node_name
,
2215 .show_starget_node_name
= 1,
2216 .get_starget_port_name
= qla2x00_get_starget_port_name
,
2217 .show_starget_port_name
= 1,
2218 .get_starget_port_id
= qla2x00_get_starget_port_id
,
2219 .show_starget_port_id
= 1,
2221 .set_rport_dev_loss_tmo
= qla2x00_set_rport_loss_tmo
,
2222 .show_rport_dev_loss_tmo
= 1,
2224 .issue_fc_host_lip
= qla2x00_issue_lip
,
2225 .dev_loss_tmo_callbk
= qla2x00_dev_loss_tmo_callbk
,
2226 .terminate_rport_io
= qla2x00_terminate_rport_io
,
2227 .get_fc_host_stats
= qla2x00_get_fc_host_stats
,
2228 .reset_fc_host_stats
= qla2x00_reset_host_stats
,
2230 .vport_create
= qla24xx_vport_create
,
2231 .vport_disable
= qla24xx_vport_disable
,
2232 .vport_delete
= qla24xx_vport_delete
,
2233 .bsg_request
= qla24xx_bsg_request
,
2234 .bsg_timeout
= qla24xx_bsg_timeout
,
2237 struct fc_function_template qla2xxx_transport_vport_functions
= {
2239 .show_host_node_name
= 1,
2240 .show_host_port_name
= 1,
2241 .show_host_supported_classes
= 1,
2243 .get_host_port_id
= qla2x00_get_host_port_id
,
2244 .show_host_port_id
= 1,
2245 .get_host_speed
= qla2x00_get_host_speed
,
2246 .show_host_speed
= 1,
2247 .get_host_port_type
= qla2x00_get_host_port_type
,
2248 .show_host_port_type
= 1,
2249 .get_host_symbolic_name
= qla2x00_get_host_symbolic_name
,
2250 .show_host_symbolic_name
= 1,
2251 .set_host_system_hostname
= qla2x00_set_host_system_hostname
,
2252 .show_host_system_hostname
= 1,
2253 .get_host_fabric_name
= qla2x00_get_host_fabric_name
,
2254 .show_host_fabric_name
= 1,
2255 .get_host_port_state
= qla2x00_get_host_port_state
,
2256 .show_host_port_state
= 1,
2258 .dd_fcrport_size
= sizeof(struct fc_port
*),
2259 .show_rport_supported_classes
= 1,
2261 .get_starget_node_name
= qla2x00_get_starget_node_name
,
2262 .show_starget_node_name
= 1,
2263 .get_starget_port_name
= qla2x00_get_starget_port_name
,
2264 .show_starget_port_name
= 1,
2265 .get_starget_port_id
= qla2x00_get_starget_port_id
,
2266 .show_starget_port_id
= 1,
2268 .set_rport_dev_loss_tmo
= qla2x00_set_rport_loss_tmo
,
2269 .show_rport_dev_loss_tmo
= 1,
2271 .issue_fc_host_lip
= qla2x00_issue_lip
,
2272 .dev_loss_tmo_callbk
= qla2x00_dev_loss_tmo_callbk
,
2273 .terminate_rport_io
= qla2x00_terminate_rport_io
,
2274 .get_fc_host_stats
= qla2x00_get_fc_host_stats
,
2275 .reset_fc_host_stats
= qla2x00_reset_host_stats
,
2277 .bsg_request
= qla24xx_bsg_request
,
2278 .bsg_timeout
= qla24xx_bsg_timeout
,
2282 qla2x00_init_host_attr(scsi_qla_host_t
*vha
)
2284 struct qla_hw_data
*ha
= vha
->hw
;
2285 u32 speed
= FC_PORTSPEED_UNKNOWN
;
2287 fc_host_dev_loss_tmo(vha
->host
) = ha
->port_down_retry_count
;
2288 fc_host_node_name(vha
->host
) = wwn_to_u64(vha
->node_name
);
2289 fc_host_port_name(vha
->host
) = wwn_to_u64(vha
->port_name
);
2290 fc_host_supported_classes(vha
->host
) = ha
->tgt
.enable_class_2
?
2291 (FC_COS_CLASS2
|FC_COS_CLASS3
) : FC_COS_CLASS3
;
2292 fc_host_max_npiv_vports(vha
->host
) = ha
->max_npiv_vports
;
2293 fc_host_npiv_vports_inuse(vha
->host
) = ha
->cur_vport_count
;
2295 if (IS_CNA_CAPABLE(ha
))
2296 speed
= FC_PORTSPEED_10GBIT
;
2297 else if (IS_QLA2031(ha
))
2298 speed
= FC_PORTSPEED_16GBIT
| FC_PORTSPEED_8GBIT
|
2300 else if (IS_QLA25XX(ha
))
2301 speed
= FC_PORTSPEED_8GBIT
| FC_PORTSPEED_4GBIT
|
2302 FC_PORTSPEED_2GBIT
| FC_PORTSPEED_1GBIT
;
2303 else if (IS_QLA24XX_TYPE(ha
))
2304 speed
= FC_PORTSPEED_4GBIT
| FC_PORTSPEED_2GBIT
|
2306 else if (IS_QLA23XX(ha
))
2307 speed
= FC_PORTSPEED_2GBIT
| FC_PORTSPEED_1GBIT
;
2308 else if (IS_QLAFX00(ha
))
2309 speed
= FC_PORTSPEED_8GBIT
| FC_PORTSPEED_4GBIT
|
2310 FC_PORTSPEED_2GBIT
| FC_PORTSPEED_1GBIT
;
2311 else if (IS_QLA27XX(ha
))
2312 speed
= FC_PORTSPEED_32GBIT
| FC_PORTSPEED_16GBIT
|
2315 speed
= FC_PORTSPEED_1GBIT
;
2316 fc_host_supported_speeds(vha
->host
) = speed
;