2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
14 static int qla24xx_vport_disable(struct fc_vport
*, bool);
16 /* SYSFS attributes --------------------------------------------------------- */
19 qla2x00_sysfs_read_fw_dump(struct file
*filp
, struct kobject
*kobj
,
20 struct bin_attribute
*bin_attr
,
21 char *buf
, loff_t off
, size_t count
)
23 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
24 struct device
, kobj
)));
25 struct qla_hw_data
*ha
= vha
->hw
;
27 if (ha
->fw_dump_reading
== 0)
30 return memory_read_from_buffer(buf
, count
, &off
, ha
->fw_dump
,
35 qla2x00_sysfs_write_fw_dump(struct file
*filp
, struct kobject
*kobj
,
36 struct bin_attribute
*bin_attr
,
37 char *buf
, loff_t off
, size_t count
)
39 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
40 struct device
, kobj
)));
41 struct qla_hw_data
*ha
= vha
->hw
;
45 ql_dbg(ql_dbg_user
, vha
, 0x705b,
46 "Firmware dump not supported for ISP82xx\n");
53 reading
= simple_strtol(buf
, NULL
, 10);
56 if (!ha
->fw_dump_reading
)
59 ql_log(ql_log_info
, vha
, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha
->host_no
);
62 ha
->fw_dump_reading
= 0;
66 if (ha
->fw_dumped
&& !ha
->fw_dump_reading
) {
67 ha
->fw_dump_reading
= 1;
69 ql_log(ql_log_info
, vha
, 0x705e,
70 "Raw firmware dump ready for read on (%ld).\n",
75 qla2x00_alloc_fw_dump(vha
);
78 qla2x00_system_error(vha
);
84 static struct bin_attribute sysfs_fw_dump_attr
= {
87 .mode
= S_IRUSR
| S_IWUSR
,
90 .read
= qla2x00_sysfs_read_fw_dump
,
91 .write
= qla2x00_sysfs_write_fw_dump
,
95 qla2x00_sysfs_read_nvram(struct file
*filp
, struct kobject
*kobj
,
96 struct bin_attribute
*bin_attr
,
97 char *buf
, loff_t off
, size_t count
)
99 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
100 struct device
, kobj
)));
101 struct qla_hw_data
*ha
= vha
->hw
;
103 if (!capable(CAP_SYS_ADMIN
))
106 if (IS_NOCACHE_VPD_TYPE(ha
))
107 ha
->isp_ops
->read_optrom(vha
, ha
->nvram
, ha
->flt_region_nvram
<< 2,
109 return memory_read_from_buffer(buf
, count
, &off
, ha
->nvram
,
114 qla2x00_sysfs_write_nvram(struct file
*filp
, struct kobject
*kobj
,
115 struct bin_attribute
*bin_attr
,
116 char *buf
, loff_t off
, size_t count
)
118 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
119 struct device
, kobj
)));
120 struct qla_hw_data
*ha
= vha
->hw
;
123 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
!= ha
->nvram_size
||
124 !ha
->isp_ops
->write_nvram
)
127 /* Checksum NVRAM. */
128 if (IS_FWI2_CAPABLE(ha
)) {
132 iter
= (uint32_t *)buf
;
134 for (cnt
= 0; cnt
< ((count
>> 2) - 1); cnt
++)
135 chksum
+= le32_to_cpu(*iter
++);
136 chksum
= ~chksum
+ 1;
137 *iter
= cpu_to_le32(chksum
);
142 iter
= (uint8_t *)buf
;
144 for (cnt
= 0; cnt
< count
- 1; cnt
++)
146 chksum
= ~chksum
+ 1;
150 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
151 ql_log(ql_log_warn
, vha
, 0x705f,
152 "HBA not online, failing NVRAM update.\n");
157 ha
->isp_ops
->write_nvram(vha
, (uint8_t *)buf
, ha
->nvram_base
, count
);
158 ha
->isp_ops
->read_nvram(vha
, (uint8_t *)ha
->nvram
, ha
->nvram_base
,
161 ql_dbg(ql_dbg_user
, vha
, 0x7060,
162 "Setting ISP_ABORT_NEEDED\n");
163 /* NVRAM settings take effect immediately. */
164 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
165 qla2xxx_wake_dpc(vha
);
166 qla2x00_wait_for_chip_reset(vha
);
171 static struct bin_attribute sysfs_nvram_attr
= {
174 .mode
= S_IRUSR
| S_IWUSR
,
177 .read
= qla2x00_sysfs_read_nvram
,
178 .write
= qla2x00_sysfs_write_nvram
,
182 qla2x00_sysfs_read_optrom(struct file
*filp
, struct kobject
*kobj
,
183 struct bin_attribute
*bin_attr
,
184 char *buf
, loff_t off
, size_t count
)
186 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
187 struct device
, kobj
)));
188 struct qla_hw_data
*ha
= vha
->hw
;
190 if (ha
->optrom_state
!= QLA_SREADING
)
193 return memory_read_from_buffer(buf
, count
, &off
, ha
->optrom_buffer
,
194 ha
->optrom_region_size
);
198 qla2x00_sysfs_write_optrom(struct file
*filp
, struct kobject
*kobj
,
199 struct bin_attribute
*bin_attr
,
200 char *buf
, loff_t off
, size_t count
)
202 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
203 struct device
, kobj
)));
204 struct qla_hw_data
*ha
= vha
->hw
;
206 if (ha
->optrom_state
!= QLA_SWRITING
)
208 if (off
> ha
->optrom_region_size
)
210 if (off
+ count
> ha
->optrom_region_size
)
211 count
= ha
->optrom_region_size
- off
;
213 memcpy(&ha
->optrom_buffer
[off
], buf
, count
);
218 static struct bin_attribute sysfs_optrom_attr
= {
221 .mode
= S_IRUSR
| S_IWUSR
,
224 .read
= qla2x00_sysfs_read_optrom
,
225 .write
= qla2x00_sysfs_write_optrom
,
229 qla2x00_sysfs_write_optrom_ctl(struct file
*filp
, struct kobject
*kobj
,
230 struct bin_attribute
*bin_attr
,
231 char *buf
, loff_t off
, size_t count
)
233 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
234 struct device
, kobj
)));
235 struct qla_hw_data
*ha
= vha
->hw
;
238 uint32_t size
= ha
->optrom_size
;
244 if (unlikely(pci_channel_offline(ha
->pdev
)))
247 if (sscanf(buf
, "%d:%x:%x", &val
, &start
, &size
) < 1)
249 if (start
> ha
->optrom_size
)
254 if (ha
->optrom_state
!= QLA_SREADING
&&
255 ha
->optrom_state
!= QLA_SWRITING
)
258 ha
->optrom_state
= QLA_SWAITING
;
260 ql_dbg(ql_dbg_user
, vha
, 0x7061,
261 "Freeing flash region allocation -- 0x%x bytes.\n",
262 ha
->optrom_region_size
);
264 vfree(ha
->optrom_buffer
);
265 ha
->optrom_buffer
= NULL
;
268 if (ha
->optrom_state
!= QLA_SWAITING
)
271 ha
->optrom_region_start
= start
;
272 ha
->optrom_region_size
= start
+ size
> ha
->optrom_size
?
273 ha
->optrom_size
- start
: size
;
275 ha
->optrom_state
= QLA_SREADING
;
276 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
277 if (ha
->optrom_buffer
== NULL
) {
278 ql_log(ql_log_warn
, vha
, 0x7062,
279 "Unable to allocate memory for optrom retrieval "
280 "(%x).\n", ha
->optrom_region_size
);
282 ha
->optrom_state
= QLA_SWAITING
;
286 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
287 ql_log(ql_log_warn
, vha
, 0x7063,
288 "HBA not online, failing NVRAM update.\n");
292 ql_dbg(ql_dbg_user
, vha
, 0x7064,
293 "Reading flash region -- 0x%x/0x%x.\n",
294 ha
->optrom_region_start
, ha
->optrom_region_size
);
296 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
297 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
298 ha
->optrom_region_start
, ha
->optrom_region_size
);
301 if (ha
->optrom_state
!= QLA_SWAITING
)
305 * We need to be more restrictive on which FLASH regions are
306 * allowed to be updated via user-space. Regions accessible
307 * via this method include:
309 * ISP21xx/ISP22xx/ISP23xx type boards:
311 * 0x000000 -> 0x020000 -- Boot code.
313 * ISP2322/ISP24xx type boards:
315 * 0x000000 -> 0x07ffff -- Boot code.
316 * 0x080000 -> 0x0fffff -- Firmware.
318 * ISP25xx type boards:
320 * 0x000000 -> 0x07ffff -- Boot code.
321 * 0x080000 -> 0x0fffff -- Firmware.
322 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
325 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
327 else if (start
== (ha
->flt_region_boot
* 4) ||
328 start
== (ha
->flt_region_fw
* 4))
330 else if (IS_QLA25XX(ha
) || IS_QLA8XXX_TYPE(ha
))
333 ql_log(ql_log_warn
, vha
, 0x7065,
334 "Invalid start region 0x%x/0x%x.\n", start
, size
);
338 ha
->optrom_region_start
= start
;
339 ha
->optrom_region_size
= start
+ size
> ha
->optrom_size
?
340 ha
->optrom_size
- start
: size
;
342 ha
->optrom_state
= QLA_SWRITING
;
343 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
344 if (ha
->optrom_buffer
== NULL
) {
345 ql_log(ql_log_warn
, vha
, 0x7066,
346 "Unable to allocate memory for optrom update "
347 "(%x)\n", ha
->optrom_region_size
);
349 ha
->optrom_state
= QLA_SWAITING
;
353 ql_dbg(ql_dbg_user
, vha
, 0x7067,
354 "Staging flash region write -- 0x%x/0x%x.\n",
355 ha
->optrom_region_start
, ha
->optrom_region_size
);
357 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
360 if (ha
->optrom_state
!= QLA_SWRITING
)
363 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
364 ql_log(ql_log_warn
, vha
, 0x7068,
365 "HBA not online, failing flash update.\n");
369 ql_dbg(ql_dbg_user
, vha
, 0x7069,
370 "Writing flash region -- 0x%x/0x%x.\n",
371 ha
->optrom_region_start
, ha
->optrom_region_size
);
373 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
374 ha
->optrom_region_start
, ha
->optrom_region_size
);
382 static struct bin_attribute sysfs_optrom_ctl_attr
= {
384 .name
= "optrom_ctl",
388 .write
= qla2x00_sysfs_write_optrom_ctl
,
392 qla2x00_sysfs_read_vpd(struct file
*filp
, struct kobject
*kobj
,
393 struct bin_attribute
*bin_attr
,
394 char *buf
, loff_t off
, size_t count
)
396 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
397 struct device
, kobj
)));
398 struct qla_hw_data
*ha
= vha
->hw
;
400 if (unlikely(pci_channel_offline(ha
->pdev
)))
403 if (!capable(CAP_SYS_ADMIN
))
406 if (IS_NOCACHE_VPD_TYPE(ha
))
407 ha
->isp_ops
->read_optrom(vha
, ha
->vpd
, ha
->flt_region_vpd
<< 2,
409 return memory_read_from_buffer(buf
, count
, &off
, ha
->vpd
, ha
->vpd_size
);
413 qla2x00_sysfs_write_vpd(struct file
*filp
, struct kobject
*kobj
,
414 struct bin_attribute
*bin_attr
,
415 char *buf
, loff_t off
, size_t count
)
417 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
418 struct device
, kobj
)));
419 struct qla_hw_data
*ha
= vha
->hw
;
422 if (unlikely(pci_channel_offline(ha
->pdev
)))
425 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
!= ha
->vpd_size
||
426 !ha
->isp_ops
->write_nvram
)
429 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
) {
430 ql_log(ql_log_warn
, vha
, 0x706a,
431 "HBA not online, failing VPD update.\n");
436 ha
->isp_ops
->write_nvram(vha
, (uint8_t *)buf
, ha
->vpd_base
, count
);
437 ha
->isp_ops
->read_nvram(vha
, (uint8_t *)ha
->vpd
, ha
->vpd_base
, count
);
439 /* Update flash version information for 4Gb & above. */
440 if (!IS_FWI2_CAPABLE(ha
))
443 tmp_data
= vmalloc(256);
445 ql_log(ql_log_warn
, vha
, 0x706b,
446 "Unable to allocate memory for VPD information update.\n");
449 ha
->isp_ops
->get_flash_version(vha
, tmp_data
);
455 static struct bin_attribute sysfs_vpd_attr
= {
458 .mode
= S_IRUSR
| S_IWUSR
,
461 .read
= qla2x00_sysfs_read_vpd
,
462 .write
= qla2x00_sysfs_write_vpd
,
466 qla2x00_sysfs_read_sfp(struct file
*filp
, struct kobject
*kobj
,
467 struct bin_attribute
*bin_attr
,
468 char *buf
, loff_t off
, size_t count
)
470 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
471 struct device
, kobj
)));
472 struct qla_hw_data
*ha
= vha
->hw
;
473 uint16_t iter
, addr
, offset
;
476 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
!= SFP_DEV_SIZE
* 2)
482 ha
->sfp_data
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
,
485 ql_log(ql_log_warn
, vha
, 0x706c,
486 "Unable to allocate memory for SFP read-data.\n");
491 memset(ha
->sfp_data
, 0, SFP_BLOCK_SIZE
);
493 for (iter
= 0, offset
= 0; iter
< (SFP_DEV_SIZE
* 2) / SFP_BLOCK_SIZE
;
494 iter
++, offset
+= SFP_BLOCK_SIZE
) {
496 /* Skip to next device address. */
501 rval
= qla2x00_read_sfp(vha
, ha
->sfp_data_dma
, ha
->sfp_data
,
502 addr
, offset
, SFP_BLOCK_SIZE
, 0);
503 if (rval
!= QLA_SUCCESS
) {
504 ql_log(ql_log_warn
, vha
, 0x706d,
505 "Unable to read SFP data (%x/%x/%x).\n", rval
,
511 memcpy(buf
, ha
->sfp_data
, SFP_BLOCK_SIZE
);
512 buf
+= SFP_BLOCK_SIZE
;
518 static struct bin_attribute sysfs_sfp_attr
= {
521 .mode
= S_IRUSR
| S_IWUSR
,
523 .size
= SFP_DEV_SIZE
* 2,
524 .read
= qla2x00_sysfs_read_sfp
,
528 qla2x00_sysfs_write_reset(struct file
*filp
, struct kobject
*kobj
,
529 struct bin_attribute
*bin_attr
,
530 char *buf
, loff_t off
, size_t count
)
532 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
533 struct device
, kobj
)));
534 struct qla_hw_data
*ha
= vha
->hw
;
535 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
541 type
= simple_strtol(buf
, NULL
, 10);
544 ql_log(ql_log_info
, vha
, 0x706e,
545 "Issuing ISP reset.\n");
547 scsi_block_requests(vha
->host
);
548 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
549 qla2xxx_wake_dpc(vha
);
550 qla2x00_wait_for_chip_reset(vha
);
551 scsi_unblock_requests(vha
->host
);
557 ql_log(ql_log_info
, vha
, 0x706f,
558 "Issuing MPI reset.\n");
560 /* Make sure FC side is not in reset */
561 qla2x00_wait_for_hba_online(vha
);
563 /* Issue MPI reset */
564 scsi_block_requests(vha
->host
);
565 if (qla81xx_restart_mpi_firmware(vha
) != QLA_SUCCESS
)
566 ql_log(ql_log_warn
, vha
, 0x7070,
567 "MPI reset failed.\n");
568 scsi_unblock_requests(vha
->host
);
571 if (!IS_QLA82XX(ha
) || vha
!= base_vha
) {
572 ql_log(ql_log_info
, vha
, 0x7071,
573 "FCoE ctx reset no supported.\n");
577 ql_log(ql_log_info
, vha
, 0x7072,
578 "Issuing FCoE ctx reset.\n");
579 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
580 qla2xxx_wake_dpc(vha
);
581 qla2x00_wait_for_fcoe_ctx_reset(vha
);
587 static struct bin_attribute sysfs_reset_attr
= {
593 .write
= qla2x00_sysfs_write_reset
,
597 qla2x00_sysfs_write_edc(struct file
*filp
, struct kobject
*kobj
,
598 struct bin_attribute
*bin_attr
,
599 char *buf
, loff_t off
, size_t count
)
601 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
602 struct device
, kobj
)));
603 struct qla_hw_data
*ha
= vha
->hw
;
604 uint16_t dev
, adr
, opt
, len
;
607 ha
->edc_data_len
= 0;
609 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
< 8)
613 ha
->edc_data
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
,
616 ql_log(ql_log_warn
, vha
, 0x7073,
617 "Unable to allocate memory for EDC write.\n");
622 dev
= le16_to_cpup((void *)&buf
[0]);
623 adr
= le16_to_cpup((void *)&buf
[2]);
624 opt
= le16_to_cpup((void *)&buf
[4]);
625 len
= le16_to_cpup((void *)&buf
[6]);
628 if (len
== 0 || len
> DMA_POOL_SIZE
|| len
> count
- 8)
631 memcpy(ha
->edc_data
, &buf
[8], len
);
633 rval
= qla2x00_write_sfp(vha
, ha
->edc_data_dma
, ha
->edc_data
,
635 if (rval
!= QLA_SUCCESS
) {
636 ql_log(ql_log_warn
, vha
, 0x7074,
637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
638 rval
, dev
, adr
, opt
, len
, buf
[8]);
645 static struct bin_attribute sysfs_edc_attr
= {
651 .write
= qla2x00_sysfs_write_edc
,
655 qla2x00_sysfs_write_edc_status(struct file
*filp
, struct kobject
*kobj
,
656 struct bin_attribute
*bin_attr
,
657 char *buf
, loff_t off
, size_t count
)
659 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
660 struct device
, kobj
)));
661 struct qla_hw_data
*ha
= vha
->hw
;
662 uint16_t dev
, adr
, opt
, len
;
665 ha
->edc_data_len
= 0;
667 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
< 8)
671 ha
->edc_data
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
,
674 ql_log(ql_log_warn
, vha
, 0x708c,
675 "Unable to allocate memory for EDC status.\n");
680 dev
= le16_to_cpup((void *)&buf
[0]);
681 adr
= le16_to_cpup((void *)&buf
[2]);
682 opt
= le16_to_cpup((void *)&buf
[4]);
683 len
= le16_to_cpup((void *)&buf
[6]);
686 if (len
== 0 || len
> DMA_POOL_SIZE
)
689 memset(ha
->edc_data
, 0, len
);
690 rval
= qla2x00_read_sfp(vha
, ha
->edc_data_dma
, ha
->edc_data
,
692 if (rval
!= QLA_SUCCESS
) {
693 ql_log(ql_log_info
, vha
, 0x7075,
694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
695 rval
, dev
, adr
, opt
, len
);
699 ha
->edc_data_len
= len
;
705 qla2x00_sysfs_read_edc_status(struct file
*filp
, struct kobject
*kobj
,
706 struct bin_attribute
*bin_attr
,
707 char *buf
, loff_t off
, size_t count
)
709 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
710 struct device
, kobj
)));
711 struct qla_hw_data
*ha
= vha
->hw
;
713 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
== 0)
716 if (!ha
->edc_data
|| ha
->edc_data_len
== 0 || ha
->edc_data_len
> count
)
719 memcpy(buf
, ha
->edc_data
, ha
->edc_data_len
);
721 return ha
->edc_data_len
;
724 static struct bin_attribute sysfs_edc_status_attr
= {
726 .name
= "edc_status",
727 .mode
= S_IRUSR
| S_IWUSR
,
730 .write
= qla2x00_sysfs_write_edc_status
,
731 .read
= qla2x00_sysfs_read_edc_status
,
735 qla2x00_sysfs_read_xgmac_stats(struct file
*filp
, struct kobject
*kobj
,
736 struct bin_attribute
*bin_attr
,
737 char *buf
, loff_t off
, size_t count
)
739 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
740 struct device
, kobj
)));
741 struct qla_hw_data
*ha
= vha
->hw
;
743 uint16_t actual_size
;
745 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
> XGMAC_DATA_SIZE
)
751 ha
->xgmac_data
= dma_alloc_coherent(&ha
->pdev
->dev
, XGMAC_DATA_SIZE
,
752 &ha
->xgmac_data_dma
, GFP_KERNEL
);
753 if (!ha
->xgmac_data
) {
754 ql_log(ql_log_warn
, vha
, 0x7076,
755 "Unable to allocate memory for XGMAC read-data.\n");
761 memset(ha
->xgmac_data
, 0, XGMAC_DATA_SIZE
);
763 rval
= qla2x00_get_xgmac_stats(vha
, ha
->xgmac_data_dma
,
764 XGMAC_DATA_SIZE
, &actual_size
);
765 if (rval
!= QLA_SUCCESS
) {
766 ql_log(ql_log_warn
, vha
, 0x7077,
767 "Unable to read XGMAC data (%x).\n", rval
);
771 count
= actual_size
> count
? count
: actual_size
;
772 memcpy(buf
, ha
->xgmac_data
, count
);
777 static struct bin_attribute sysfs_xgmac_stats_attr
= {
779 .name
= "xgmac_stats",
783 .read
= qla2x00_sysfs_read_xgmac_stats
,
787 qla2x00_sysfs_read_dcbx_tlv(struct file
*filp
, struct kobject
*kobj
,
788 struct bin_attribute
*bin_attr
,
789 char *buf
, loff_t off
, size_t count
)
791 struct scsi_qla_host
*vha
= shost_priv(dev_to_shost(container_of(kobj
,
792 struct device
, kobj
)));
793 struct qla_hw_data
*ha
= vha
->hw
;
795 uint16_t actual_size
;
797 if (!capable(CAP_SYS_ADMIN
) || off
!= 0 || count
> DCBX_TLV_DATA_SIZE
)
803 ha
->dcbx_tlv
= dma_alloc_coherent(&ha
->pdev
->dev
, DCBX_TLV_DATA_SIZE
,
804 &ha
->dcbx_tlv_dma
, GFP_KERNEL
);
806 ql_log(ql_log_warn
, vha
, 0x7078,
807 "Unable to allocate memory for DCBX TLV read-data.\n");
813 memset(ha
->dcbx_tlv
, 0, DCBX_TLV_DATA_SIZE
);
815 rval
= qla2x00_get_dcbx_params(vha
, ha
->dcbx_tlv_dma
,
817 if (rval
!= QLA_SUCCESS
) {
818 ql_log(ql_log_warn
, vha
, 0x7079,
819 "Unable to read DCBX TLV (%x).\n", rval
);
823 memcpy(buf
, ha
->dcbx_tlv
, count
);
828 static struct bin_attribute sysfs_dcbx_tlv_attr
= {
834 .read
= qla2x00_sysfs_read_dcbx_tlv
,
837 static struct sysfs_entry
{
839 struct bin_attribute
*attr
;
841 } bin_file_entries
[] = {
842 { "fw_dump", &sysfs_fw_dump_attr
, },
843 { "nvram", &sysfs_nvram_attr
, },
844 { "optrom", &sysfs_optrom_attr
, },
845 { "optrom_ctl", &sysfs_optrom_ctl_attr
, },
846 { "vpd", &sysfs_vpd_attr
, 1 },
847 { "sfp", &sysfs_sfp_attr
, 1 },
848 { "reset", &sysfs_reset_attr
, },
849 { "edc", &sysfs_edc_attr
, 2 },
850 { "edc_status", &sysfs_edc_status_attr
, 2 },
851 { "xgmac_stats", &sysfs_xgmac_stats_attr
, 3 },
852 { "dcbx_tlv", &sysfs_dcbx_tlv_attr
, 3 },
857 qla2x00_alloc_sysfs_attr(scsi_qla_host_t
*vha
)
859 struct Scsi_Host
*host
= vha
->host
;
860 struct sysfs_entry
*iter
;
863 for (iter
= bin_file_entries
; iter
->name
; iter
++) {
864 if (iter
->is4GBp_only
&& !IS_FWI2_CAPABLE(vha
->hw
))
866 if (iter
->is4GBp_only
== 2 && !IS_QLA25XX(vha
->hw
))
868 if (iter
->is4GBp_only
== 3 && !(IS_QLA8XXX_TYPE(vha
->hw
)))
871 ret
= sysfs_create_bin_file(&host
->shost_gendev
.kobj
,
874 ql_log(ql_log_warn
, vha
, 0x00f3,
875 "Unable to create sysfs %s binary attribute (%d).\n",
878 ql_dbg(ql_dbg_init
, vha
, 0x00f4,
879 "Successfully created sysfs %s binary attribure.\n",
885 qla2x00_free_sysfs_attr(scsi_qla_host_t
*vha
)
887 struct Scsi_Host
*host
= vha
->host
;
888 struct sysfs_entry
*iter
;
889 struct qla_hw_data
*ha
= vha
->hw
;
891 for (iter
= bin_file_entries
; iter
->name
; iter
++) {
892 if (iter
->is4GBp_only
&& !IS_FWI2_CAPABLE(ha
))
894 if (iter
->is4GBp_only
== 2 && !IS_QLA25XX(ha
))
896 if (iter
->is4GBp_only
== 3 && !!(IS_QLA8XXX_TYPE(vha
->hw
)))
899 sysfs_remove_bin_file(&host
->shost_gendev
.kobj
,
903 if (ha
->beacon_blink_led
== 1)
904 ha
->isp_ops
->beacon_off(vha
);
907 /* Scsi_Host attributes. */
910 qla2x00_drvr_version_show(struct device
*dev
,
911 struct device_attribute
*attr
, char *buf
)
913 return snprintf(buf
, PAGE_SIZE
, "%s\n", qla2x00_version_str
);
917 qla2x00_fw_version_show(struct device
*dev
,
918 struct device_attribute
*attr
, char *buf
)
920 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
921 struct qla_hw_data
*ha
= vha
->hw
;
924 return snprintf(buf
, PAGE_SIZE
, "%s\n",
925 ha
->isp_ops
->fw_version_str(vha
, fw_str
));
929 qla2x00_serial_num_show(struct device
*dev
, struct device_attribute
*attr
,
932 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
933 struct qla_hw_data
*ha
= vha
->hw
;
936 if (IS_FWI2_CAPABLE(ha
)) {
937 qla2xxx_get_vpd_field(vha
, "SN", buf
, PAGE_SIZE
);
938 return snprintf(buf
, PAGE_SIZE
, "%s\n", buf
);
941 sn
= ((ha
->serial0
& 0x1f) << 16) | (ha
->serial2
<< 8) | ha
->serial1
;
942 return snprintf(buf
, PAGE_SIZE
, "%c%05d\n", 'A' + sn
/ 100000,
947 qla2x00_isp_name_show(struct device
*dev
, struct device_attribute
*attr
,
950 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
951 return snprintf(buf
, PAGE_SIZE
, "ISP%04X\n", vha
->hw
->pdev
->device
);
955 qla2x00_isp_id_show(struct device
*dev
, struct device_attribute
*attr
,
958 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
959 struct qla_hw_data
*ha
= vha
->hw
;
960 return snprintf(buf
, PAGE_SIZE
, "%04x %04x %04x %04x\n",
961 ha
->product_id
[0], ha
->product_id
[1], ha
->product_id
[2],
966 qla2x00_model_name_show(struct device
*dev
, struct device_attribute
*attr
,
969 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
970 return snprintf(buf
, PAGE_SIZE
, "%s\n", vha
->hw
->model_number
);
974 qla2x00_model_desc_show(struct device
*dev
, struct device_attribute
*attr
,
977 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
978 return snprintf(buf
, PAGE_SIZE
, "%s\n",
979 vha
->hw
->model_desc
? vha
->hw
->model_desc
: "");
983 qla2x00_pci_info_show(struct device
*dev
, struct device_attribute
*attr
,
986 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
989 return snprintf(buf
, PAGE_SIZE
, "%s\n",
990 vha
->hw
->isp_ops
->pci_info_str(vha
, pci_info
));
994 qla2x00_link_state_show(struct device
*dev
, struct device_attribute
*attr
,
997 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
998 struct qla_hw_data
*ha
= vha
->hw
;
1001 if (atomic_read(&vha
->loop_state
) == LOOP_DOWN
||
1002 atomic_read(&vha
->loop_state
) == LOOP_DEAD
||
1003 vha
->device_flags
& DFLG_NO_CABLE
)
1004 len
= snprintf(buf
, PAGE_SIZE
, "Link Down\n");
1005 else if (atomic_read(&vha
->loop_state
) != LOOP_READY
||
1006 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1007 test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
))
1008 len
= snprintf(buf
, PAGE_SIZE
, "Unknown Link State\n");
1010 len
= snprintf(buf
, PAGE_SIZE
, "Link Up - ");
1012 switch (ha
->current_topology
) {
1014 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "Loop\n");
1017 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "FL_Port\n");
1020 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
,
1021 "N_Port to N_Port\n");
1024 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "F_Port\n");
1027 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "Loop\n");
1035 qla2x00_zio_show(struct device
*dev
, struct device_attribute
*attr
,
1038 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1041 switch (vha
->hw
->zio_mode
) {
1042 case QLA_ZIO_MODE_6
:
1043 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "Mode 6\n");
1045 case QLA_ZIO_DISABLED
:
1046 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "Disabled\n");
1053 qla2x00_zio_store(struct device
*dev
, struct device_attribute
*attr
,
1054 const char *buf
, size_t count
)
1056 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1057 struct qla_hw_data
*ha
= vha
->hw
;
1061 if (!IS_ZIO_SUPPORTED(ha
))
1064 if (sscanf(buf
, "%d", &val
) != 1)
1068 zio_mode
= QLA_ZIO_MODE_6
;
1070 zio_mode
= QLA_ZIO_DISABLED
;
1072 /* Update per-hba values and queue a reset. */
1073 if (zio_mode
!= QLA_ZIO_DISABLED
|| ha
->zio_mode
!= QLA_ZIO_DISABLED
) {
1074 ha
->zio_mode
= zio_mode
;
1075 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1081 qla2x00_zio_timer_show(struct device
*dev
, struct device_attribute
*attr
,
1084 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1086 return snprintf(buf
, PAGE_SIZE
, "%d us\n", vha
->hw
->zio_timer
* 100);
1090 qla2x00_zio_timer_store(struct device
*dev
, struct device_attribute
*attr
,
1091 const char *buf
, size_t count
)
1093 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1097 if (sscanf(buf
, "%d", &val
) != 1)
1099 if (val
> 25500 || val
< 100)
1102 zio_timer
= (uint16_t)(val
/ 100);
1103 vha
->hw
->zio_timer
= zio_timer
;
1109 qla2x00_beacon_show(struct device
*dev
, struct device_attribute
*attr
,
1112 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1115 if (vha
->hw
->beacon_blink_led
)
1116 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "Enabled\n");
1118 len
+= snprintf(buf
+ len
, PAGE_SIZE
-len
, "Disabled\n");
1123 qla2x00_beacon_store(struct device
*dev
, struct device_attribute
*attr
,
1124 const char *buf
, size_t count
)
1126 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1127 struct qla_hw_data
*ha
= vha
->hw
;
1131 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
1134 if (test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)) {
1135 ql_log(ql_log_warn
, vha
, 0x707a,
1136 "Abort ISP active -- ignoring beacon request.\n");
1140 if (sscanf(buf
, "%d", &val
) != 1)
1144 rval
= ha
->isp_ops
->beacon_on(vha
);
1146 rval
= ha
->isp_ops
->beacon_off(vha
);
1148 if (rval
!= QLA_SUCCESS
)
1155 qla2x00_optrom_bios_version_show(struct device
*dev
,
1156 struct device_attribute
*attr
, char *buf
)
1158 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1159 struct qla_hw_data
*ha
= vha
->hw
;
1160 return snprintf(buf
, PAGE_SIZE
, "%d.%02d\n", ha
->bios_revision
[1],
1161 ha
->bios_revision
[0]);
1165 qla2x00_optrom_efi_version_show(struct device
*dev
,
1166 struct device_attribute
*attr
, char *buf
)
1168 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1169 struct qla_hw_data
*ha
= vha
->hw
;
1170 return snprintf(buf
, PAGE_SIZE
, "%d.%02d\n", ha
->efi_revision
[1],
1171 ha
->efi_revision
[0]);
1175 qla2x00_optrom_fcode_version_show(struct device
*dev
,
1176 struct device_attribute
*attr
, char *buf
)
1178 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1179 struct qla_hw_data
*ha
= vha
->hw
;
1180 return snprintf(buf
, PAGE_SIZE
, "%d.%02d\n", ha
->fcode_revision
[1],
1181 ha
->fcode_revision
[0]);
1185 qla2x00_optrom_fw_version_show(struct device
*dev
,
1186 struct device_attribute
*attr
, char *buf
)
1188 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1189 struct qla_hw_data
*ha
= vha
->hw
;
1190 return snprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d %d\n",
1191 ha
->fw_revision
[0], ha
->fw_revision
[1], ha
->fw_revision
[2],
1192 ha
->fw_revision
[3]);
1196 qla2x00_optrom_gold_fw_version_show(struct device
*dev
,
1197 struct device_attribute
*attr
, char *buf
)
1199 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1200 struct qla_hw_data
*ha
= vha
->hw
;
1202 if (!IS_QLA81XX(ha
))
1203 return snprintf(buf
, PAGE_SIZE
, "\n");
1205 return snprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d (%d)\n",
1206 ha
->gold_fw_version
[0], ha
->gold_fw_version
[1],
1207 ha
->gold_fw_version
[2], ha
->gold_fw_version
[3]);
1211 qla2x00_total_isp_aborts_show(struct device
*dev
,
1212 struct device_attribute
*attr
, char *buf
)
1214 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1215 struct qla_hw_data
*ha
= vha
->hw
;
1216 return snprintf(buf
, PAGE_SIZE
, "%d\n",
1217 ha
->qla_stats
.total_isp_aborts
);
1221 qla24xx_84xx_fw_version_show(struct device
*dev
,
1222 struct device_attribute
*attr
, char *buf
)
1224 int rval
= QLA_SUCCESS
;
1225 uint16_t status
[2] = {0, 0};
1226 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1227 struct qla_hw_data
*ha
= vha
->hw
;
1229 if (!IS_QLA84XX(ha
))
1230 return snprintf(buf
, PAGE_SIZE
, "\n");
1232 if (ha
->cs84xx
->op_fw_version
== 0)
1233 rval
= qla84xx_verify_chip(vha
, status
);
1235 if ((rval
== QLA_SUCCESS
) && (status
[0] == 0))
1236 return snprintf(buf
, PAGE_SIZE
, "%u\n",
1237 (uint32_t)ha
->cs84xx
->op_fw_version
);
1239 return snprintf(buf
, PAGE_SIZE
, "\n");
1243 qla2x00_mpi_version_show(struct device
*dev
, struct device_attribute
*attr
,
1246 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1247 struct qla_hw_data
*ha
= vha
->hw
;
1249 if (!IS_QLA81XX(ha
))
1250 return snprintf(buf
, PAGE_SIZE
, "\n");
1252 return snprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d (%x)\n",
1253 ha
->mpi_version
[0], ha
->mpi_version
[1], ha
->mpi_version
[2],
1254 ha
->mpi_capabilities
);
1258 qla2x00_phy_version_show(struct device
*dev
, struct device_attribute
*attr
,
1261 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1262 struct qla_hw_data
*ha
= vha
->hw
;
1264 if (!IS_QLA81XX(ha
))
1265 return snprintf(buf
, PAGE_SIZE
, "\n");
1267 return snprintf(buf
, PAGE_SIZE
, "%d.%02d.%02d\n",
1268 ha
->phy_version
[0], ha
->phy_version
[1], ha
->phy_version
[2]);
1272 qla2x00_flash_block_size_show(struct device
*dev
,
1273 struct device_attribute
*attr
, char *buf
)
1275 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1276 struct qla_hw_data
*ha
= vha
->hw
;
1278 return snprintf(buf
, PAGE_SIZE
, "0x%x\n", ha
->fdt_block_size
);
1282 qla2x00_vlan_id_show(struct device
*dev
, struct device_attribute
*attr
,
1285 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1287 if (!IS_QLA8XXX_TYPE(vha
->hw
))
1288 return snprintf(buf
, PAGE_SIZE
, "\n");
1290 return snprintf(buf
, PAGE_SIZE
, "%d\n", vha
->fcoe_vlan_id
);
1294 qla2x00_vn_port_mac_address_show(struct device
*dev
,
1295 struct device_attribute
*attr
, char *buf
)
1297 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1299 if (!IS_QLA8XXX_TYPE(vha
->hw
))
1300 return snprintf(buf
, PAGE_SIZE
, "\n");
1302 return snprintf(buf
, PAGE_SIZE
, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1303 vha
->fcoe_vn_port_mac
[5], vha
->fcoe_vn_port_mac
[4],
1304 vha
->fcoe_vn_port_mac
[3], vha
->fcoe_vn_port_mac
[2],
1305 vha
->fcoe_vn_port_mac
[1], vha
->fcoe_vn_port_mac
[0]);
1309 qla2x00_fabric_param_show(struct device
*dev
, struct device_attribute
*attr
,
1312 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1314 return snprintf(buf
, PAGE_SIZE
, "%d\n", vha
->hw
->switch_cap
);
1318 qla2x00_thermal_temp_show(struct device
*dev
,
1319 struct device_attribute
*attr
, char *buf
)
1321 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1322 int rval
= QLA_FUNCTION_FAILED
;
1323 uint16_t temp
, frac
;
1325 if (!vha
->hw
->flags
.thermal_supported
)
1326 return snprintf(buf
, PAGE_SIZE
, "\n");
1329 if (test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1330 test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
))
1331 ql_log(ql_log_warn
, vha
, 0x707b,
1332 "ISP reset active.\n");
1333 else if (!vha
->hw
->flags
.eeh_busy
)
1334 rval
= qla2x00_get_thermal_temp(vha
, &temp
, &frac
);
1335 if (rval
!= QLA_SUCCESS
)
1338 return snprintf(buf
, PAGE_SIZE
, "%d.%02d\n", temp
, frac
);
1342 qla2x00_fw_state_show(struct device
*dev
, struct device_attribute
*attr
,
1345 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
1346 int rval
= QLA_FUNCTION_FAILED
;
1349 if (test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1350 test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
))
1351 ql_log(ql_log_warn
, vha
, 0x707c,
1352 "ISP reset active.\n");
1353 else if (!vha
->hw
->flags
.eeh_busy
)
1354 rval
= qla2x00_get_firmware_state(vha
, state
);
1355 if (rval
!= QLA_SUCCESS
)
1356 memset(state
, -1, sizeof(state
));
1358 return snprintf(buf
, PAGE_SIZE
, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state
[0],
1359 state
[1], state
[2], state
[3], state
[4]);
1362 static DEVICE_ATTR(driver_version
, S_IRUGO
, qla2x00_drvr_version_show
, NULL
);
1363 static DEVICE_ATTR(fw_version
, S_IRUGO
, qla2x00_fw_version_show
, NULL
);
1364 static DEVICE_ATTR(serial_num
, S_IRUGO
, qla2x00_serial_num_show
, NULL
);
1365 static DEVICE_ATTR(isp_name
, S_IRUGO
, qla2x00_isp_name_show
, NULL
);
1366 static DEVICE_ATTR(isp_id
, S_IRUGO
, qla2x00_isp_id_show
, NULL
);
1367 static DEVICE_ATTR(model_name
, S_IRUGO
, qla2x00_model_name_show
, NULL
);
1368 static DEVICE_ATTR(model_desc
, S_IRUGO
, qla2x00_model_desc_show
, NULL
);
1369 static DEVICE_ATTR(pci_info
, S_IRUGO
, qla2x00_pci_info_show
, NULL
);
1370 static DEVICE_ATTR(link_state
, S_IRUGO
, qla2x00_link_state_show
, NULL
);
1371 static DEVICE_ATTR(zio
, S_IRUGO
| S_IWUSR
, qla2x00_zio_show
, qla2x00_zio_store
);
1372 static DEVICE_ATTR(zio_timer
, S_IRUGO
| S_IWUSR
, qla2x00_zio_timer_show
,
1373 qla2x00_zio_timer_store
);
1374 static DEVICE_ATTR(beacon
, S_IRUGO
| S_IWUSR
, qla2x00_beacon_show
,
1375 qla2x00_beacon_store
);
1376 static DEVICE_ATTR(optrom_bios_version
, S_IRUGO
,
1377 qla2x00_optrom_bios_version_show
, NULL
);
1378 static DEVICE_ATTR(optrom_efi_version
, S_IRUGO
,
1379 qla2x00_optrom_efi_version_show
, NULL
);
1380 static DEVICE_ATTR(optrom_fcode_version
, S_IRUGO
,
1381 qla2x00_optrom_fcode_version_show
, NULL
);
1382 static DEVICE_ATTR(optrom_fw_version
, S_IRUGO
, qla2x00_optrom_fw_version_show
,
1384 static DEVICE_ATTR(optrom_gold_fw_version
, S_IRUGO
,
1385 qla2x00_optrom_gold_fw_version_show
, NULL
);
1386 static DEVICE_ATTR(84xx_fw_version
, S_IRUGO
, qla24xx_84xx_fw_version_show
,
1388 static DEVICE_ATTR(total_isp_aborts
, S_IRUGO
, qla2x00_total_isp_aborts_show
,
1390 static DEVICE_ATTR(mpi_version
, S_IRUGO
, qla2x00_mpi_version_show
, NULL
);
1391 static DEVICE_ATTR(phy_version
, S_IRUGO
, qla2x00_phy_version_show
, NULL
);
1392 static DEVICE_ATTR(flash_block_size
, S_IRUGO
, qla2x00_flash_block_size_show
,
1394 static DEVICE_ATTR(vlan_id
, S_IRUGO
, qla2x00_vlan_id_show
, NULL
);
1395 static DEVICE_ATTR(vn_port_mac_address
, S_IRUGO
,
1396 qla2x00_vn_port_mac_address_show
, NULL
);
1397 static DEVICE_ATTR(fabric_param
, S_IRUGO
, qla2x00_fabric_param_show
, NULL
);
1398 static DEVICE_ATTR(fw_state
, S_IRUGO
, qla2x00_fw_state_show
, NULL
);
1399 static DEVICE_ATTR(thermal_temp
, S_IRUGO
, qla2x00_thermal_temp_show
, NULL
);
1401 struct device_attribute
*qla2x00_host_attrs
[] = {
1402 &dev_attr_driver_version
,
1403 &dev_attr_fw_version
,
1404 &dev_attr_serial_num
,
1407 &dev_attr_model_name
,
1408 &dev_attr_model_desc
,
1410 &dev_attr_link_state
,
1412 &dev_attr_zio_timer
,
1414 &dev_attr_optrom_bios_version
,
1415 &dev_attr_optrom_efi_version
,
1416 &dev_attr_optrom_fcode_version
,
1417 &dev_attr_optrom_fw_version
,
1418 &dev_attr_84xx_fw_version
,
1419 &dev_attr_total_isp_aborts
,
1420 &dev_attr_mpi_version
,
1421 &dev_attr_phy_version
,
1422 &dev_attr_flash_block_size
,
1424 &dev_attr_vn_port_mac_address
,
1425 &dev_attr_fabric_param
,
1427 &dev_attr_optrom_gold_fw_version
,
1428 &dev_attr_thermal_temp
,
1432 /* Host attributes. */
1435 qla2x00_get_host_port_id(struct Scsi_Host
*shost
)
1437 scsi_qla_host_t
*vha
= shost_priv(shost
);
1439 fc_host_port_id(shost
) = vha
->d_id
.b
.domain
<< 16 |
1440 vha
->d_id
.b
.area
<< 8 | vha
->d_id
.b
.al_pa
;
1444 qla2x00_get_host_speed(struct Scsi_Host
*shost
)
1446 struct qla_hw_data
*ha
= ((struct scsi_qla_host
*)
1447 (shost_priv(shost
)))->hw
;
1448 u32 speed
= FC_PORTSPEED_UNKNOWN
;
1450 switch (ha
->link_data_rate
) {
1451 case PORT_SPEED_1GB
:
1452 speed
= FC_PORTSPEED_1GBIT
;
1454 case PORT_SPEED_2GB
:
1455 speed
= FC_PORTSPEED_2GBIT
;
1457 case PORT_SPEED_4GB
:
1458 speed
= FC_PORTSPEED_4GBIT
;
1460 case PORT_SPEED_8GB
:
1461 speed
= FC_PORTSPEED_8GBIT
;
1463 case PORT_SPEED_10GB
:
1464 speed
= FC_PORTSPEED_10GBIT
;
1467 fc_host_speed(shost
) = speed
;
1471 qla2x00_get_host_port_type(struct Scsi_Host
*shost
)
1473 scsi_qla_host_t
*vha
= shost_priv(shost
);
1474 uint32_t port_type
= FC_PORTTYPE_UNKNOWN
;
1477 fc_host_port_type(shost
) = FC_PORTTYPE_NPIV
;
1480 switch (vha
->hw
->current_topology
) {
1482 port_type
= FC_PORTTYPE_LPORT
;
1485 port_type
= FC_PORTTYPE_NLPORT
;
1488 port_type
= FC_PORTTYPE_PTP
;
1491 port_type
= FC_PORTTYPE_NPORT
;
1494 fc_host_port_type(shost
) = port_type
;
1498 qla2x00_get_starget_node_name(struct scsi_target
*starget
)
1500 struct Scsi_Host
*host
= dev_to_shost(starget
->dev
.parent
);
1501 scsi_qla_host_t
*vha
= shost_priv(host
);
1505 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1506 if (fcport
->rport
&&
1507 starget
->id
== fcport
->rport
->scsi_target_id
) {
1508 node_name
= wwn_to_u64(fcport
->node_name
);
1513 fc_starget_node_name(starget
) = node_name
;
1517 qla2x00_get_starget_port_name(struct scsi_target
*starget
)
1519 struct Scsi_Host
*host
= dev_to_shost(starget
->dev
.parent
);
1520 scsi_qla_host_t
*vha
= shost_priv(host
);
1524 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1525 if (fcport
->rport
&&
1526 starget
->id
== fcport
->rport
->scsi_target_id
) {
1527 port_name
= wwn_to_u64(fcport
->port_name
);
1532 fc_starget_port_name(starget
) = port_name
;
1536 qla2x00_get_starget_port_id(struct scsi_target
*starget
)
1538 struct Scsi_Host
*host
= dev_to_shost(starget
->dev
.parent
);
1539 scsi_qla_host_t
*vha
= shost_priv(host
);
1541 uint32_t port_id
= ~0U;
1543 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1544 if (fcport
->rport
&&
1545 starget
->id
== fcport
->rport
->scsi_target_id
) {
1546 port_id
= fcport
->d_id
.b
.domain
<< 16 |
1547 fcport
->d_id
.b
.area
<< 8 | fcport
->d_id
.b
.al_pa
;
1552 fc_starget_port_id(starget
) = port_id
;
1556 qla2x00_set_rport_loss_tmo(struct fc_rport
*rport
, uint32_t timeout
)
1559 rport
->dev_loss_tmo
= timeout
;
1561 rport
->dev_loss_tmo
= 1;
1565 qla2x00_dev_loss_tmo_callbk(struct fc_rport
*rport
)
1567 struct Scsi_Host
*host
= rport_to_shost(rport
);
1568 fc_port_t
*fcport
= *(fc_port_t
**)rport
->dd_data
;
1569 unsigned long flags
;
1574 /* Now that the rport has been deleted, set the fcport state to
1576 qla2x00_set_fcport_state(fcport
, FCS_DEVICE_DEAD
);
1579 * Transport has effectively 'deleted' the rport, clear
1580 * all local references.
1582 spin_lock_irqsave(host
->host_lock
, flags
);
1583 fcport
->rport
= fcport
->drport
= NULL
;
1584 *((fc_port_t
**)rport
->dd_data
) = NULL
;
1585 spin_unlock_irqrestore(host
->host_lock
, flags
);
1587 if (test_bit(ABORT_ISP_ACTIVE
, &fcport
->vha
->dpc_flags
))
1590 if (unlikely(pci_channel_offline(fcport
->vha
->hw
->pdev
))) {
1591 qla2x00_abort_all_cmds(fcport
->vha
, DID_NO_CONNECT
<< 16);
1597 qla2x00_terminate_rport_io(struct fc_rport
*rport
)
1599 fc_port_t
*fcport
= *(fc_port_t
**)rport
->dd_data
;
1604 if (test_bit(ABORT_ISP_ACTIVE
, &fcport
->vha
->dpc_flags
))
1607 if (unlikely(pci_channel_offline(fcport
->vha
->hw
->pdev
))) {
1608 qla2x00_abort_all_cmds(fcport
->vha
, DID_NO_CONNECT
<< 16);
1612 * At this point all fcport's software-states are cleared. Perform any
1613 * final cleanup of firmware resources (PCBs and XCBs).
1615 if (fcport
->loop_id
!= FC_NO_LOOP_ID
&&
1616 !test_bit(UNLOADING
, &fcport
->vha
->dpc_flags
))
1617 fcport
->vha
->hw
->isp_ops
->fabric_logout(fcport
->vha
,
1618 fcport
->loop_id
, fcport
->d_id
.b
.domain
,
1619 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
1623 qla2x00_issue_lip(struct Scsi_Host
*shost
)
1625 scsi_qla_host_t
*vha
= shost_priv(shost
);
1627 qla2x00_loop_reset(vha
);
1631 static struct fc_host_statistics
*
1632 qla2x00_get_fc_host_stats(struct Scsi_Host
*shost
)
1634 scsi_qla_host_t
*vha
= shost_priv(shost
);
1635 struct qla_hw_data
*ha
= vha
->hw
;
1636 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
1638 struct link_statistics
*stats
;
1639 dma_addr_t stats_dma
;
1640 struct fc_host_statistics
*pfc_host_stat
;
1642 pfc_host_stat
= &ha
->fc_host_stat
;
1643 memset(pfc_host_stat
, -1, sizeof(struct fc_host_statistics
));
1645 if (test_bit(UNLOADING
, &vha
->dpc_flags
))
1648 if (unlikely(pci_channel_offline(ha
->pdev
)))
1651 stats
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &stats_dma
);
1652 if (stats
== NULL
) {
1653 ql_log(ql_log_warn
, vha
, 0x707d,
1654 "Failed to allocate memory for stats.\n");
1657 memset(stats
, 0, DMA_POOL_SIZE
);
1659 rval
= QLA_FUNCTION_FAILED
;
1660 if (IS_FWI2_CAPABLE(ha
)) {
1661 rval
= qla24xx_get_isp_stats(base_vha
, stats
, stats_dma
);
1662 } else if (atomic_read(&base_vha
->loop_state
) == LOOP_READY
&&
1663 !test_bit(ABORT_ISP_ACTIVE
, &base_vha
->dpc_flags
) &&
1664 !test_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
) &&
1666 /* Must be in a 'READY' state for statistics retrieval. */
1667 rval
= qla2x00_get_link_status(base_vha
, base_vha
->loop_id
,
1671 if (rval
!= QLA_SUCCESS
)
1674 pfc_host_stat
->link_failure_count
= stats
->link_fail_cnt
;
1675 pfc_host_stat
->loss_of_sync_count
= stats
->loss_sync_cnt
;
1676 pfc_host_stat
->loss_of_signal_count
= stats
->loss_sig_cnt
;
1677 pfc_host_stat
->prim_seq_protocol_err_count
= stats
->prim_seq_err_cnt
;
1678 pfc_host_stat
->invalid_tx_word_count
= stats
->inval_xmit_word_cnt
;
1679 pfc_host_stat
->invalid_crc_count
= stats
->inval_crc_cnt
;
1680 if (IS_FWI2_CAPABLE(ha
)) {
1681 pfc_host_stat
->lip_count
= stats
->lip_cnt
;
1682 pfc_host_stat
->tx_frames
= stats
->tx_frames
;
1683 pfc_host_stat
->rx_frames
= stats
->rx_frames
;
1684 pfc_host_stat
->dumped_frames
= stats
->dumped_frames
;
1685 pfc_host_stat
->nos_count
= stats
->nos_rcvd
;
1687 pfc_host_stat
->fcp_input_megabytes
= ha
->qla_stats
.input_bytes
>> 20;
1688 pfc_host_stat
->fcp_output_megabytes
= ha
->qla_stats
.output_bytes
>> 20;
1691 dma_pool_free(ha
->s_dma_pool
, stats
, stats_dma
);
1693 return pfc_host_stat
;
1697 qla2x00_get_host_symbolic_name(struct Scsi_Host
*shost
)
1699 scsi_qla_host_t
*vha
= shost_priv(shost
);
1701 qla2x00_get_sym_node_name(vha
, fc_host_symbolic_name(shost
));
1705 qla2x00_set_host_system_hostname(struct Scsi_Host
*shost
)
1707 scsi_qla_host_t
*vha
= shost_priv(shost
);
1709 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
1713 qla2x00_get_host_fabric_name(struct Scsi_Host
*shost
)
1715 scsi_qla_host_t
*vha
= shost_priv(shost
);
1716 uint8_t node_name
[WWN_SIZE
] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1717 0xFF, 0xFF, 0xFF, 0xFF};
1718 u64 fabric_name
= wwn_to_u64(node_name
);
1720 if (vha
->device_flags
& SWITCH_FOUND
)
1721 fabric_name
= wwn_to_u64(vha
->fabric_node_name
);
1723 fc_host_fabric_name(shost
) = fabric_name
;
1727 qla2x00_get_host_port_state(struct Scsi_Host
*shost
)
1729 scsi_qla_host_t
*vha
= shost_priv(shost
);
1730 struct scsi_qla_host
*base_vha
= pci_get_drvdata(vha
->hw
->pdev
);
1732 if (!base_vha
->flags
.online
)
1733 fc_host_port_state(shost
) = FC_PORTSTATE_OFFLINE
;
1734 else if (atomic_read(&base_vha
->loop_state
) == LOOP_TIMEOUT
)
1735 fc_host_port_state(shost
) = FC_PORTSTATE_UNKNOWN
;
1737 fc_host_port_state(shost
) = FC_PORTSTATE_ONLINE
;
1741 qla24xx_vport_create(struct fc_vport
*fc_vport
, bool disable
)
1745 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
1746 scsi_qla_host_t
*vha
= NULL
;
1747 struct qla_hw_data
*ha
= base_vha
->hw
;
1748 uint16_t options
= 0;
1750 struct req_que
*req
= ha
->req_q_map
[0];
1752 ret
= qla24xx_vport_create_req_sanity_check(fc_vport
);
1754 ql_log(ql_log_warn
, vha
, 0x707e,
1755 "Vport sanity check failed, status %x\n", ret
);
1759 vha
= qla24xx_create_vhost(fc_vport
);
1761 ql_log(ql_log_warn
, vha
, 0x707f, "Vport create host failed.\n");
1762 return FC_VPORT_FAILED
;
1765 atomic_set(&vha
->vp_state
, VP_OFFLINE
);
1766 fc_vport_set_state(fc_vport
, FC_VPORT_DISABLED
);
1768 atomic_set(&vha
->vp_state
, VP_FAILED
);
1770 /* ready to create vport */
1771 ql_log(ql_log_info
, vha
, 0x7080,
1772 "VP entry id %d assigned.\n", vha
->vp_idx
);
1774 /* initialized vport states */
1775 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
1776 vha
->vp_err_state
= VP_ERR_PORTDWN
;
1777 vha
->vp_prev_err_state
= VP_ERR_UNKWN
;
1778 /* Check if physical ha port is Up */
1779 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
1780 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
) {
1781 /* Don't retry or attempt login of this virtual port */
1782 ql_dbg(ql_dbg_user
, vha
, 0x7081,
1783 "Vport loop state is not UP.\n");
1784 atomic_set(&vha
->loop_state
, LOOP_DEAD
);
1786 fc_vport_set_state(fc_vport
, FC_VPORT_LINKDOWN
);
1789 if ((IS_QLA25XX(ha
) || IS_QLA81XX(ha
)) && ql2xenabledif
) {
1790 if (ha
->fw_attributes
& BIT_4
) {
1791 vha
->flags
.difdix_supported
= 1;
1792 ql_dbg(ql_dbg_user
, vha
, 0x7082,
1793 "Registered for DIF/DIX type 1 and 3 protection.\n");
1794 scsi_host_set_prot(vha
->host
,
1795 SHOST_DIF_TYPE1_PROTECTION
1796 | SHOST_DIF_TYPE2_PROTECTION
1797 | SHOST_DIF_TYPE3_PROTECTION
1798 | SHOST_DIX_TYPE1_PROTECTION
1799 | SHOST_DIX_TYPE2_PROTECTION
1800 | SHOST_DIX_TYPE3_PROTECTION
);
1801 scsi_host_set_guard(vha
->host
, SHOST_DIX_GUARD_CRC
);
1803 vha
->flags
.difdix_supported
= 0;
1806 if (scsi_add_host_with_dma(vha
->host
, &fc_vport
->dev
,
1808 ql_dbg(ql_dbg_user
, vha
, 0x7083,
1809 "scsi_add_host failure for VP[%d].\n", vha
->vp_idx
);
1810 goto vport_create_failed_2
;
1813 /* initialize attributes */
1814 fc_host_dev_loss_tmo(vha
->host
) = ha
->port_down_retry_count
;
1815 fc_host_node_name(vha
->host
) = wwn_to_u64(vha
->node_name
);
1816 fc_host_port_name(vha
->host
) = wwn_to_u64(vha
->port_name
);
1817 fc_host_supported_classes(vha
->host
) =
1818 fc_host_supported_classes(base_vha
->host
);
1819 fc_host_supported_speeds(vha
->host
) =
1820 fc_host_supported_speeds(base_vha
->host
);
1822 qla24xx_vport_disable(fc_vport
, disable
);
1824 if (ha
->flags
.cpu_affinity_enabled
) {
1825 req
= ha
->req_q_map
[1];
1826 ql_dbg(ql_dbg_multiq
, vha
, 0xc000,
1827 "Request queue %p attached with "
1828 "VP[%d], cpu affinity =%d\n",
1829 req
, vha
->vp_idx
, ha
->flags
.cpu_affinity_enabled
);
1831 } else if (ql2xmaxqueues
== 1 || !ha
->npiv_info
)
1833 /* Create a request queue in QoS mode for the vport */
1834 for (cnt
= 0; cnt
< ha
->nvram_npiv_size
; cnt
++) {
1835 if (memcmp(ha
->npiv_info
[cnt
].port_name
, vha
->port_name
, 8) == 0
1836 && memcmp(ha
->npiv_info
[cnt
].node_name
, vha
->node_name
,
1838 qos
= ha
->npiv_info
[cnt
].q_qos
;
1843 ret
= qla25xx_create_req_que(ha
, options
, vha
->vp_idx
, 0, 0,
1846 ql_log(ql_log_warn
, vha
, 0x7084,
1847 "Can't create request queue for VP[%d]\n",
1850 ql_dbg(ql_dbg_multiq
, vha
, 0xc001,
1851 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1852 ret
, qos
, vha
->vp_idx
);
1853 ql_dbg(ql_dbg_user
, vha
, 0x7085,
1854 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1855 ret
, qos
, vha
->vp_idx
);
1856 req
= ha
->req_q_map
[ret
];
1864 vport_create_failed_2
:
1865 qla24xx_disable_vp(vha
);
1866 qla24xx_deallocate_vp_id(vha
);
1867 scsi_host_put(vha
->host
);
1868 return FC_VPORT_FAILED
;
1872 qla24xx_vport_delete(struct fc_vport
*fc_vport
)
1874 scsi_qla_host_t
*vha
= fc_vport
->dd_data
;
1875 struct qla_hw_data
*ha
= vha
->hw
;
1876 uint16_t id
= vha
->vp_idx
;
1878 while (test_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
) ||
1879 test_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
))
1882 qla24xx_disable_vp(vha
);
1884 vha
->flags
.delete_progress
= 1;
1886 fc_remove_host(vha
->host
);
1888 scsi_remove_host(vha
->host
);
1890 /* Allow timer to run to drain queued items, when removing vp */
1891 qla24xx_deallocate_vp_id(vha
);
1893 if (vha
->timer_active
) {
1894 qla2x00_vp_stop_timer(vha
);
1895 ql_dbg(ql_dbg_user
, vha
, 0x7086,
1896 "Timer for the VP[%d] has stopped\n", vha
->vp_idx
);
1899 /* No pending activities shall be there on the vha now */
1900 if (ql2xextended_error_logging
& ql_dbg_user
)
1901 msleep(random32()%10); /* Just to see if something falls on
1902 * the net we have placed below */
1904 BUG_ON(atomic_read(&vha
->vref_count
));
1906 qla2x00_free_fcports(vha
);
1908 mutex_lock(&ha
->vport_lock
);
1909 ha
->cur_vport_count
--;
1910 clear_bit(vha
->vp_idx
, ha
->vp_idx_map
);
1911 mutex_unlock(&ha
->vport_lock
);
1913 if (vha
->req
->id
&& !ha
->flags
.cpu_affinity_enabled
) {
1914 if (qla25xx_delete_req_que(vha
, vha
->req
) != QLA_SUCCESS
)
1915 ql_log(ql_log_warn
, vha
, 0x7087,
1916 "Queue delete failed.\n");
1919 scsi_host_put(vha
->host
);
1920 ql_log(ql_log_info
, vha
, 0x7088, "VP[%d] deleted.\n", id
);
1925 qla24xx_vport_disable(struct fc_vport
*fc_vport
, bool disable
)
1927 scsi_qla_host_t
*vha
= fc_vport
->dd_data
;
1930 qla24xx_disable_vp(vha
);
1932 qla24xx_enable_vp(vha
);
1937 struct fc_function_template qla2xxx_transport_functions
= {
1939 .show_host_node_name
= 1,
1940 .show_host_port_name
= 1,
1941 .show_host_supported_classes
= 1,
1942 .show_host_supported_speeds
= 1,
1944 .get_host_port_id
= qla2x00_get_host_port_id
,
1945 .show_host_port_id
= 1,
1946 .get_host_speed
= qla2x00_get_host_speed
,
1947 .show_host_speed
= 1,
1948 .get_host_port_type
= qla2x00_get_host_port_type
,
1949 .show_host_port_type
= 1,
1950 .get_host_symbolic_name
= qla2x00_get_host_symbolic_name
,
1951 .show_host_symbolic_name
= 1,
1952 .set_host_system_hostname
= qla2x00_set_host_system_hostname
,
1953 .show_host_system_hostname
= 1,
1954 .get_host_fabric_name
= qla2x00_get_host_fabric_name
,
1955 .show_host_fabric_name
= 1,
1956 .get_host_port_state
= qla2x00_get_host_port_state
,
1957 .show_host_port_state
= 1,
1959 .dd_fcrport_size
= sizeof(struct fc_port
*),
1960 .show_rport_supported_classes
= 1,
1962 .get_starget_node_name
= qla2x00_get_starget_node_name
,
1963 .show_starget_node_name
= 1,
1964 .get_starget_port_name
= qla2x00_get_starget_port_name
,
1965 .show_starget_port_name
= 1,
1966 .get_starget_port_id
= qla2x00_get_starget_port_id
,
1967 .show_starget_port_id
= 1,
1969 .set_rport_dev_loss_tmo
= qla2x00_set_rport_loss_tmo
,
1970 .show_rport_dev_loss_tmo
= 1,
1972 .issue_fc_host_lip
= qla2x00_issue_lip
,
1973 .dev_loss_tmo_callbk
= qla2x00_dev_loss_tmo_callbk
,
1974 .terminate_rport_io
= qla2x00_terminate_rport_io
,
1975 .get_fc_host_stats
= qla2x00_get_fc_host_stats
,
1977 .vport_create
= qla24xx_vport_create
,
1978 .vport_disable
= qla24xx_vport_disable
,
1979 .vport_delete
= qla24xx_vport_delete
,
1980 .bsg_request
= qla24xx_bsg_request
,
1981 .bsg_timeout
= qla24xx_bsg_timeout
,
1984 struct fc_function_template qla2xxx_transport_vport_functions
= {
1986 .show_host_node_name
= 1,
1987 .show_host_port_name
= 1,
1988 .show_host_supported_classes
= 1,
1990 .get_host_port_id
= qla2x00_get_host_port_id
,
1991 .show_host_port_id
= 1,
1992 .get_host_speed
= qla2x00_get_host_speed
,
1993 .show_host_speed
= 1,
1994 .get_host_port_type
= qla2x00_get_host_port_type
,
1995 .show_host_port_type
= 1,
1996 .get_host_symbolic_name
= qla2x00_get_host_symbolic_name
,
1997 .show_host_symbolic_name
= 1,
1998 .set_host_system_hostname
= qla2x00_set_host_system_hostname
,
1999 .show_host_system_hostname
= 1,
2000 .get_host_fabric_name
= qla2x00_get_host_fabric_name
,
2001 .show_host_fabric_name
= 1,
2002 .get_host_port_state
= qla2x00_get_host_port_state
,
2003 .show_host_port_state
= 1,
2005 .dd_fcrport_size
= sizeof(struct fc_port
*),
2006 .show_rport_supported_classes
= 1,
2008 .get_starget_node_name
= qla2x00_get_starget_node_name
,
2009 .show_starget_node_name
= 1,
2010 .get_starget_port_name
= qla2x00_get_starget_port_name
,
2011 .show_starget_port_name
= 1,
2012 .get_starget_port_id
= qla2x00_get_starget_port_id
,
2013 .show_starget_port_id
= 1,
2015 .set_rport_dev_loss_tmo
= qla2x00_set_rport_loss_tmo
,
2016 .show_rport_dev_loss_tmo
= 1,
2018 .issue_fc_host_lip
= qla2x00_issue_lip
,
2019 .dev_loss_tmo_callbk
= qla2x00_dev_loss_tmo_callbk
,
2020 .terminate_rport_io
= qla2x00_terminate_rport_io
,
2021 .get_fc_host_stats
= qla2x00_get_fc_host_stats
,
2022 .bsg_request
= qla24xx_bsg_request
,
2023 .bsg_timeout
= qla24xx_bsg_timeout
,
2027 qla2x00_init_host_attr(scsi_qla_host_t
*vha
)
2029 struct qla_hw_data
*ha
= vha
->hw
;
2030 u32 speed
= FC_PORTSPEED_UNKNOWN
;
2032 fc_host_dev_loss_tmo(vha
->host
) = ha
->port_down_retry_count
;
2033 fc_host_node_name(vha
->host
) = wwn_to_u64(vha
->node_name
);
2034 fc_host_port_name(vha
->host
) = wwn_to_u64(vha
->port_name
);
2035 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
2036 fc_host_max_npiv_vports(vha
->host
) = ha
->max_npiv_vports
;
2037 fc_host_npiv_vports_inuse(vha
->host
) = ha
->cur_vport_count
;
2039 if (IS_QLA8XXX_TYPE(ha
))
2040 speed
= FC_PORTSPEED_10GBIT
;
2041 else if (IS_QLA25XX(ha
))
2042 speed
= FC_PORTSPEED_8GBIT
| FC_PORTSPEED_4GBIT
|
2043 FC_PORTSPEED_2GBIT
| FC_PORTSPEED_1GBIT
;
2044 else if (IS_QLA24XX_TYPE(ha
))
2045 speed
= FC_PORTSPEED_4GBIT
| FC_PORTSPEED_2GBIT
|
2047 else if (IS_QLA23XX(ha
))
2048 speed
= FC_PORTSPEED_2GBIT
| FC_PORTSPEED_1GBIT
;
2050 speed
= FC_PORTSPEED_1GBIT
;
2051 fc_host_supported_speeds(vha
->host
) = speed
;