treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_attr.c
blobd7e7043f9eab283c69ed417da79a28ff5e5f50c0
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
17 /* SYSFS attributes --------------------------------------------------------- */
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 struct bin_attribute *bin_attr,
22 char *buf, loff_t off, size_t count)
24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 struct device, kobj)));
26 struct qla_hw_data *ha = vha->hw;
27 int rval = 0;
29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0;
32 mutex_lock(&ha->optrom_mutex);
33 if (IS_P3P_TYPE(ha)) {
34 if (off < ha->md_template_size) {
35 rval = memory_read_from_buffer(buf, count,
36 &off, ha->md_tmplt_hdr, ha->md_template_size);
37 } else {
38 off -= ha->md_template_size;
39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size);
42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 } else if (ha->fw_dump_reading) {
46 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47 ha->fw_dump_len);
48 } else {
49 rval = 0;
51 mutex_unlock(&ha->optrom_mutex);
52 return rval;
55 static ssize_t
56 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
57 struct bin_attribute *bin_attr,
58 char *buf, loff_t off, size_t count)
60 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
61 struct device, kobj)));
62 struct qla_hw_data *ha = vha->hw;
63 int reading;
65 if (off != 0)
66 return (0);
68 reading = simple_strtol(buf, NULL, 10);
69 switch (reading) {
70 case 0:
71 if (!ha->fw_dump_reading)
72 break;
74 ql_log(ql_log_info, vha, 0x705d,
75 "Firmware dump cleared on (%ld).\n", vha->host_no);
77 if (IS_P3P_TYPE(ha)) {
78 qla82xx_md_free(vha);
79 qla82xx_md_prep(vha);
81 ha->fw_dump_reading = 0;
82 ha->fw_dumped = 0;
83 break;
84 case 1:
85 if (ha->fw_dumped && !ha->fw_dump_reading) {
86 ha->fw_dump_reading = 1;
88 ql_log(ql_log_info, vha, 0x705e,
89 "Raw firmware dump ready for read on (%ld).\n",
90 vha->host_no);
92 break;
93 case 2:
94 qla2x00_alloc_fw_dump(vha);
95 break;
96 case 3:
97 if (IS_QLA82XX(ha)) {
98 qla82xx_idc_lock(ha);
99 qla82xx_set_reset_owner(vha);
100 qla82xx_idc_unlock(ha);
101 } else if (IS_QLA8044(ha)) {
102 qla8044_idc_lock(ha);
103 qla82xx_set_reset_owner(vha);
104 qla8044_idc_unlock(ha);
105 } else {
106 ha->fw_dump_mpi = 1;
107 qla2x00_system_error(vha);
109 break;
110 case 4:
111 if (IS_P3P_TYPE(ha)) {
112 if (ha->md_tmplt_hdr)
113 ql_dbg(ql_dbg_user, vha, 0x705b,
114 "MiniDump supported with this firmware.\n");
115 else
116 ql_dbg(ql_dbg_user, vha, 0x709d,
117 "MiniDump not supported with this firmware.\n");
119 break;
120 case 5:
121 if (IS_P3P_TYPE(ha))
122 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
123 break;
124 case 6:
125 if (!ha->mctp_dump_reading)
126 break;
127 ql_log(ql_log_info, vha, 0x70c1,
128 "MCTP dump cleared on (%ld).\n", vha->host_no);
129 ha->mctp_dump_reading = 0;
130 ha->mctp_dumped = 0;
131 break;
132 case 7:
133 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
134 ha->mctp_dump_reading = 1;
135 ql_log(ql_log_info, vha, 0x70c2,
136 "Raw mctp dump ready for read on (%ld).\n",
137 vha->host_no);
139 break;
141 return count;
144 static struct bin_attribute sysfs_fw_dump_attr = {
145 .attr = {
146 .name = "fw_dump",
147 .mode = S_IRUSR | S_IWUSR,
149 .size = 0,
150 .read = qla2x00_sysfs_read_fw_dump,
151 .write = qla2x00_sysfs_write_fw_dump,
154 static ssize_t
155 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
156 struct bin_attribute *bin_attr,
157 char *buf, loff_t off, size_t count)
159 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
160 struct device, kobj)));
161 struct qla_hw_data *ha = vha->hw;
162 uint32_t faddr;
163 struct active_regions active_regions = { };
165 if (!capable(CAP_SYS_ADMIN))
166 return 0;
168 mutex_lock(&ha->optrom_mutex);
169 if (qla2x00_chip_is_down(vha)) {
170 mutex_unlock(&ha->optrom_mutex);
171 return -EAGAIN;
174 if (!IS_NOCACHE_VPD_TYPE(ha)) {
175 mutex_unlock(&ha->optrom_mutex);
176 goto skip;
179 faddr = ha->flt_region_nvram;
180 if (IS_QLA28XX(ha)) {
181 qla28xx_get_aux_images(vha, &active_regions);
182 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
183 faddr = ha->flt_region_nvram_sec;
185 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
187 mutex_unlock(&ha->optrom_mutex);
189 skip:
190 return memory_read_from_buffer(buf, count, &off, ha->nvram,
191 ha->nvram_size);
194 static ssize_t
195 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
196 struct bin_attribute *bin_attr,
197 char *buf, loff_t off, size_t count)
199 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
200 struct device, kobj)));
201 struct qla_hw_data *ha = vha->hw;
202 uint16_t cnt;
204 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
205 !ha->isp_ops->write_nvram)
206 return -EINVAL;
208 /* Checksum NVRAM. */
209 if (IS_FWI2_CAPABLE(ha)) {
210 uint32_t *iter;
211 uint32_t chksum;
213 iter = (uint32_t *)buf;
214 chksum = 0;
215 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
216 chksum += le32_to_cpu(*iter);
217 chksum = ~chksum + 1;
218 *iter = cpu_to_le32(chksum);
219 } else {
220 uint8_t *iter;
221 uint8_t chksum;
223 iter = (uint8_t *)buf;
224 chksum = 0;
225 for (cnt = 0; cnt < count - 1; cnt++)
226 chksum += *iter++;
227 chksum = ~chksum + 1;
228 *iter = chksum;
231 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
232 ql_log(ql_log_warn, vha, 0x705f,
233 "HBA not online, failing NVRAM update.\n");
234 return -EAGAIN;
237 mutex_lock(&ha->optrom_mutex);
238 if (qla2x00_chip_is_down(vha)) {
239 mutex_unlock(&ha->optrom_mutex);
240 return -EAGAIN;
243 /* Write NVRAM. */
244 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
245 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
246 count);
247 mutex_unlock(&ha->optrom_mutex);
249 ql_dbg(ql_dbg_user, vha, 0x7060,
250 "Setting ISP_ABORT_NEEDED\n");
251 /* NVRAM settings take effect immediately. */
252 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
253 qla2xxx_wake_dpc(vha);
254 qla2x00_wait_for_chip_reset(vha);
256 return count;
259 static struct bin_attribute sysfs_nvram_attr = {
260 .attr = {
261 .name = "nvram",
262 .mode = S_IRUSR | S_IWUSR,
264 .size = 512,
265 .read = qla2x00_sysfs_read_nvram,
266 .write = qla2x00_sysfs_write_nvram,
269 static ssize_t
270 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
271 struct bin_attribute *bin_attr,
272 char *buf, loff_t off, size_t count)
274 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
275 struct device, kobj)));
276 struct qla_hw_data *ha = vha->hw;
277 ssize_t rval = 0;
279 mutex_lock(&ha->optrom_mutex);
281 if (ha->optrom_state != QLA_SREADING)
282 goto out;
284 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
285 ha->optrom_region_size);
287 out:
288 mutex_unlock(&ha->optrom_mutex);
290 return rval;
293 static ssize_t
294 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
295 struct bin_attribute *bin_attr,
296 char *buf, loff_t off, size_t count)
298 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
299 struct device, kobj)));
300 struct qla_hw_data *ha = vha->hw;
302 mutex_lock(&ha->optrom_mutex);
304 if (ha->optrom_state != QLA_SWRITING) {
305 mutex_unlock(&ha->optrom_mutex);
306 return -EINVAL;
308 if (off > ha->optrom_region_size) {
309 mutex_unlock(&ha->optrom_mutex);
310 return -ERANGE;
312 if (off + count > ha->optrom_region_size)
313 count = ha->optrom_region_size - off;
315 memcpy(&ha->optrom_buffer[off], buf, count);
316 mutex_unlock(&ha->optrom_mutex);
318 return count;
321 static struct bin_attribute sysfs_optrom_attr = {
322 .attr = {
323 .name = "optrom",
324 .mode = S_IRUSR | S_IWUSR,
326 .size = 0,
327 .read = qla2x00_sysfs_read_optrom,
328 .write = qla2x00_sysfs_write_optrom,
331 static ssize_t
332 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
333 struct bin_attribute *bin_attr,
334 char *buf, loff_t off, size_t count)
336 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
337 struct device, kobj)));
338 struct qla_hw_data *ha = vha->hw;
339 uint32_t start = 0;
340 uint32_t size = ha->optrom_size;
341 int val, valid;
342 ssize_t rval = count;
344 if (off)
345 return -EINVAL;
347 if (unlikely(pci_channel_offline(ha->pdev)))
348 return -EAGAIN;
350 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
351 return -EINVAL;
352 if (start > ha->optrom_size)
353 return -EINVAL;
354 if (size > ha->optrom_size - start)
355 size = ha->optrom_size - start;
357 mutex_lock(&ha->optrom_mutex);
358 if (qla2x00_chip_is_down(vha)) {
359 mutex_unlock(&ha->optrom_mutex);
360 return -EAGAIN;
362 switch (val) {
363 case 0:
364 if (ha->optrom_state != QLA_SREADING &&
365 ha->optrom_state != QLA_SWRITING) {
366 rval = -EINVAL;
367 goto out;
369 ha->optrom_state = QLA_SWAITING;
371 ql_dbg(ql_dbg_user, vha, 0x7061,
372 "Freeing flash region allocation -- 0x%x bytes.\n",
373 ha->optrom_region_size);
375 vfree(ha->optrom_buffer);
376 ha->optrom_buffer = NULL;
377 break;
378 case 1:
379 if (ha->optrom_state != QLA_SWAITING) {
380 rval = -EINVAL;
381 goto out;
384 ha->optrom_region_start = start;
385 ha->optrom_region_size = size;
387 ha->optrom_state = QLA_SREADING;
388 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
389 if (ha->optrom_buffer == NULL) {
390 ql_log(ql_log_warn, vha, 0x7062,
391 "Unable to allocate memory for optrom retrieval "
392 "(%x).\n", ha->optrom_region_size);
394 ha->optrom_state = QLA_SWAITING;
395 rval = -ENOMEM;
396 goto out;
399 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
400 ql_log(ql_log_warn, vha, 0x7063,
401 "HBA not online, failing NVRAM update.\n");
402 rval = -EAGAIN;
403 goto out;
406 ql_dbg(ql_dbg_user, vha, 0x7064,
407 "Reading flash region -- 0x%x/0x%x.\n",
408 ha->optrom_region_start, ha->optrom_region_size);
410 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
411 ha->optrom_region_start, ha->optrom_region_size);
412 break;
413 case 2:
414 if (ha->optrom_state != QLA_SWAITING) {
415 rval = -EINVAL;
416 goto out;
420 * We need to be more restrictive on which FLASH regions are
421 * allowed to be updated via user-space. Regions accessible
422 * via this method include:
424 * ISP21xx/ISP22xx/ISP23xx type boards:
426 * 0x000000 -> 0x020000 -- Boot code.
428 * ISP2322/ISP24xx type boards:
430 * 0x000000 -> 0x07ffff -- Boot code.
431 * 0x080000 -> 0x0fffff -- Firmware.
433 * ISP25xx type boards:
435 * 0x000000 -> 0x07ffff -- Boot code.
436 * 0x080000 -> 0x0fffff -- Firmware.
437 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
439 * > ISP25xx type boards:
441 * None -- should go through BSG.
443 valid = 0;
444 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
445 valid = 1;
446 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
447 valid = 1;
448 if (!valid) {
449 ql_log(ql_log_warn, vha, 0x7065,
450 "Invalid start region 0x%x/0x%x.\n", start, size);
451 rval = -EINVAL;
452 goto out;
455 ha->optrom_region_start = start;
456 ha->optrom_region_size = size;
458 ha->optrom_state = QLA_SWRITING;
459 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
460 if (ha->optrom_buffer == NULL) {
461 ql_log(ql_log_warn, vha, 0x7066,
462 "Unable to allocate memory for optrom update "
463 "(%x)\n", ha->optrom_region_size);
465 ha->optrom_state = QLA_SWAITING;
466 rval = -ENOMEM;
467 goto out;
470 ql_dbg(ql_dbg_user, vha, 0x7067,
471 "Staging flash region write -- 0x%x/0x%x.\n",
472 ha->optrom_region_start, ha->optrom_region_size);
474 break;
475 case 3:
476 if (ha->optrom_state != QLA_SWRITING) {
477 rval = -EINVAL;
478 goto out;
481 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
482 ql_log(ql_log_warn, vha, 0x7068,
483 "HBA not online, failing flash update.\n");
484 rval = -EAGAIN;
485 goto out;
488 ql_dbg(ql_dbg_user, vha, 0x7069,
489 "Writing flash region -- 0x%x/0x%x.\n",
490 ha->optrom_region_start, ha->optrom_region_size);
492 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
493 ha->optrom_region_start, ha->optrom_region_size);
494 if (rval)
495 rval = -EIO;
496 break;
497 default:
498 rval = -EINVAL;
501 out:
502 mutex_unlock(&ha->optrom_mutex);
503 return rval;
506 static struct bin_attribute sysfs_optrom_ctl_attr = {
507 .attr = {
508 .name = "optrom_ctl",
509 .mode = S_IWUSR,
511 .size = 0,
512 .write = qla2x00_sysfs_write_optrom_ctl,
515 static ssize_t
516 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
517 struct bin_attribute *bin_attr,
518 char *buf, loff_t off, size_t count)
520 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
521 struct device, kobj)));
522 struct qla_hw_data *ha = vha->hw;
523 uint32_t faddr;
524 struct active_regions active_regions = { };
526 if (unlikely(pci_channel_offline(ha->pdev)))
527 return -EAGAIN;
529 if (!capable(CAP_SYS_ADMIN))
530 return -EINVAL;
532 if (IS_NOCACHE_VPD_TYPE(ha))
533 goto skip;
535 faddr = ha->flt_region_vpd << 2;
537 if (IS_QLA28XX(ha)) {
538 qla28xx_get_aux_images(vha, &active_regions);
539 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
540 faddr = ha->flt_region_vpd_sec << 2;
542 ql_dbg(ql_dbg_init, vha, 0x7070,
543 "Loading %s nvram image.\n",
544 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
545 "primary" : "secondary");
548 mutex_lock(&ha->optrom_mutex);
549 if (qla2x00_chip_is_down(vha)) {
550 mutex_unlock(&ha->optrom_mutex);
551 return -EAGAIN;
554 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
555 mutex_unlock(&ha->optrom_mutex);
557 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
558 skip:
559 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
562 static ssize_t
563 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
564 struct bin_attribute *bin_attr,
565 char *buf, loff_t off, size_t count)
567 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
568 struct device, kobj)));
569 struct qla_hw_data *ha = vha->hw;
570 uint8_t *tmp_data;
572 if (unlikely(pci_channel_offline(ha->pdev)))
573 return 0;
575 if (qla2x00_chip_is_down(vha))
576 return 0;
578 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
579 !ha->isp_ops->write_nvram)
580 return 0;
582 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
583 ql_log(ql_log_warn, vha, 0x706a,
584 "HBA not online, failing VPD update.\n");
585 return -EAGAIN;
588 mutex_lock(&ha->optrom_mutex);
589 if (qla2x00_chip_is_down(vha)) {
590 mutex_unlock(&ha->optrom_mutex);
591 return -EAGAIN;
594 /* Write NVRAM. */
595 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
596 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
598 /* Update flash version information for 4Gb & above. */
599 if (!IS_FWI2_CAPABLE(ha)) {
600 mutex_unlock(&ha->optrom_mutex);
601 return -EINVAL;
604 tmp_data = vmalloc(256);
605 if (!tmp_data) {
606 mutex_unlock(&ha->optrom_mutex);
607 ql_log(ql_log_warn, vha, 0x706b,
608 "Unable to allocate memory for VPD information update.\n");
609 return -ENOMEM;
611 ha->isp_ops->get_flash_version(vha, tmp_data);
612 vfree(tmp_data);
614 mutex_unlock(&ha->optrom_mutex);
616 return count;
619 static struct bin_attribute sysfs_vpd_attr = {
620 .attr = {
621 .name = "vpd",
622 .mode = S_IRUSR | S_IWUSR,
624 .size = 0,
625 .read = qla2x00_sysfs_read_vpd,
626 .write = qla2x00_sysfs_write_vpd,
629 static ssize_t
630 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
631 struct bin_attribute *bin_attr,
632 char *buf, loff_t off, size_t count)
634 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
635 struct device, kobj)));
636 int rval;
638 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
639 return 0;
641 mutex_lock(&vha->hw->optrom_mutex);
642 if (qla2x00_chip_is_down(vha)) {
643 mutex_unlock(&vha->hw->optrom_mutex);
644 return 0;
647 rval = qla2x00_read_sfp_dev(vha, buf, count);
648 mutex_unlock(&vha->hw->optrom_mutex);
650 if (rval)
651 return -EIO;
653 return count;
656 static struct bin_attribute sysfs_sfp_attr = {
657 .attr = {
658 .name = "sfp",
659 .mode = S_IRUSR | S_IWUSR,
661 .size = SFP_DEV_SIZE,
662 .read = qla2x00_sysfs_read_sfp,
665 static ssize_t
666 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
667 struct bin_attribute *bin_attr,
668 char *buf, loff_t off, size_t count)
670 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
671 struct device, kobj)));
672 struct qla_hw_data *ha = vha->hw;
673 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
674 int type;
675 uint32_t idc_control;
676 uint8_t *tmp_data = NULL;
678 if (off != 0)
679 return -EINVAL;
681 type = simple_strtol(buf, NULL, 10);
682 switch (type) {
683 case 0x2025c:
684 ql_log(ql_log_info, vha, 0x706e,
685 "Issuing ISP reset.\n");
687 scsi_block_requests(vha->host);
688 if (IS_QLA82XX(ha)) {
689 ha->flags.isp82xx_no_md_cap = 1;
690 qla82xx_idc_lock(ha);
691 qla82xx_set_reset_owner(vha);
692 qla82xx_idc_unlock(ha);
693 } else if (IS_QLA8044(ha)) {
694 qla8044_idc_lock(ha);
695 idc_control = qla8044_rd_reg(ha,
696 QLA8044_IDC_DRV_CTRL);
697 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
698 (idc_control | GRACEFUL_RESET_BIT1));
699 qla82xx_set_reset_owner(vha);
700 qla8044_idc_unlock(ha);
701 } else {
702 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
703 qla2xxx_wake_dpc(vha);
705 qla2x00_wait_for_chip_reset(vha);
706 scsi_unblock_requests(vha->host);
707 break;
708 case 0x2025d:
709 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
710 return -EPERM;
712 ql_log(ql_log_info, vha, 0x706f,
713 "Issuing MPI reset.\n");
715 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
716 uint32_t idc_control;
718 qla83xx_idc_lock(vha, 0);
719 __qla83xx_get_idc_control(vha, &idc_control);
720 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
721 __qla83xx_set_idc_control(vha, idc_control);
722 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
723 QLA8XXX_DEV_NEED_RESET);
724 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
725 qla83xx_idc_unlock(vha, 0);
726 break;
727 } else {
728 /* Make sure FC side is not in reset */
729 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
730 QLA_SUCCESS);
732 /* Issue MPI reset */
733 scsi_block_requests(vha->host);
734 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
735 ql_log(ql_log_warn, vha, 0x7070,
736 "MPI reset failed.\n");
737 scsi_unblock_requests(vha->host);
738 break;
740 case 0x2025e:
741 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
742 ql_log(ql_log_info, vha, 0x7071,
743 "FCoE ctx reset not supported.\n");
744 return -EPERM;
747 ql_log(ql_log_info, vha, 0x7072,
748 "Issuing FCoE ctx reset.\n");
749 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
750 qla2xxx_wake_dpc(vha);
751 qla2x00_wait_for_fcoe_ctx_reset(vha);
752 break;
753 case 0x2025f:
754 if (!IS_QLA8031(ha))
755 return -EPERM;
756 ql_log(ql_log_info, vha, 0x70bc,
757 "Disabling Reset by IDC control\n");
758 qla83xx_idc_lock(vha, 0);
759 __qla83xx_get_idc_control(vha, &idc_control);
760 idc_control |= QLA83XX_IDC_RESET_DISABLED;
761 __qla83xx_set_idc_control(vha, idc_control);
762 qla83xx_idc_unlock(vha, 0);
763 break;
764 case 0x20260:
765 if (!IS_QLA8031(ha))
766 return -EPERM;
767 ql_log(ql_log_info, vha, 0x70bd,
768 "Enabling Reset by IDC control\n");
769 qla83xx_idc_lock(vha, 0);
770 __qla83xx_get_idc_control(vha, &idc_control);
771 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
772 __qla83xx_set_idc_control(vha, idc_control);
773 qla83xx_idc_unlock(vha, 0);
774 break;
775 case 0x20261:
776 ql_dbg(ql_dbg_user, vha, 0x70e0,
777 "Updating cache versions without reset ");
779 tmp_data = vmalloc(256);
780 if (!tmp_data) {
781 ql_log(ql_log_warn, vha, 0x70e1,
782 "Unable to allocate memory for VPD information update.\n");
783 return -ENOMEM;
785 ha->isp_ops->get_flash_version(vha, tmp_data);
786 vfree(tmp_data);
787 break;
789 return count;
792 static struct bin_attribute sysfs_reset_attr = {
793 .attr = {
794 .name = "reset",
795 .mode = S_IWUSR,
797 .size = 0,
798 .write = qla2x00_sysfs_write_reset,
801 static ssize_t
802 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
803 struct bin_attribute *bin_attr,
804 char *buf, loff_t off, size_t count)
806 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
807 struct device, kobj)));
808 int type;
809 port_id_t did;
811 if (!capable(CAP_SYS_ADMIN))
812 return 0;
814 if (unlikely(pci_channel_offline(vha->hw->pdev)))
815 return 0;
817 if (qla2x00_chip_is_down(vha))
818 return 0;
820 type = simple_strtol(buf, NULL, 10);
822 did.b.domain = (type & 0x00ff0000) >> 16;
823 did.b.area = (type & 0x0000ff00) >> 8;
824 did.b.al_pa = (type & 0x000000ff);
826 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
827 did.b.domain, did.b.area, did.b.al_pa);
829 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
831 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
832 return count;
835 static struct bin_attribute sysfs_issue_logo_attr = {
836 .attr = {
837 .name = "issue_logo",
838 .mode = S_IWUSR,
840 .size = 0,
841 .write = qla2x00_issue_logo,
844 static ssize_t
845 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
846 struct bin_attribute *bin_attr,
847 char *buf, loff_t off, size_t count)
849 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
850 struct device, kobj)));
851 struct qla_hw_data *ha = vha->hw;
852 int rval;
853 uint16_t actual_size;
855 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
856 return 0;
858 if (unlikely(pci_channel_offline(ha->pdev)))
859 return 0;
860 mutex_lock(&vha->hw->optrom_mutex);
861 if (qla2x00_chip_is_down(vha)) {
862 mutex_unlock(&vha->hw->optrom_mutex);
863 return 0;
866 if (ha->xgmac_data)
867 goto do_read;
869 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
870 &ha->xgmac_data_dma, GFP_KERNEL);
871 if (!ha->xgmac_data) {
872 mutex_unlock(&vha->hw->optrom_mutex);
873 ql_log(ql_log_warn, vha, 0x7076,
874 "Unable to allocate memory for XGMAC read-data.\n");
875 return 0;
878 do_read:
879 actual_size = 0;
880 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
882 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
883 XGMAC_DATA_SIZE, &actual_size);
885 mutex_unlock(&vha->hw->optrom_mutex);
886 if (rval != QLA_SUCCESS) {
887 ql_log(ql_log_warn, vha, 0x7077,
888 "Unable to read XGMAC data (%x).\n", rval);
889 count = 0;
892 count = actual_size > count ? count : actual_size;
893 memcpy(buf, ha->xgmac_data, count);
895 return count;
898 static struct bin_attribute sysfs_xgmac_stats_attr = {
899 .attr = {
900 .name = "xgmac_stats",
901 .mode = S_IRUSR,
903 .size = 0,
904 .read = qla2x00_sysfs_read_xgmac_stats,
907 static ssize_t
908 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
909 struct bin_attribute *bin_attr,
910 char *buf, loff_t off, size_t count)
912 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
913 struct device, kobj)));
914 struct qla_hw_data *ha = vha->hw;
915 int rval;
917 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
918 return 0;
920 if (ha->dcbx_tlv)
921 goto do_read;
922 mutex_lock(&vha->hw->optrom_mutex);
923 if (qla2x00_chip_is_down(vha)) {
924 mutex_unlock(&vha->hw->optrom_mutex);
925 return 0;
928 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
929 &ha->dcbx_tlv_dma, GFP_KERNEL);
930 if (!ha->dcbx_tlv) {
931 mutex_unlock(&vha->hw->optrom_mutex);
932 ql_log(ql_log_warn, vha, 0x7078,
933 "Unable to allocate memory for DCBX TLV read-data.\n");
934 return -ENOMEM;
937 do_read:
938 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
940 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
941 DCBX_TLV_DATA_SIZE);
943 mutex_unlock(&vha->hw->optrom_mutex);
945 if (rval != QLA_SUCCESS) {
946 ql_log(ql_log_warn, vha, 0x7079,
947 "Unable to read DCBX TLV (%x).\n", rval);
948 return -EIO;
951 memcpy(buf, ha->dcbx_tlv, count);
953 return count;
956 static struct bin_attribute sysfs_dcbx_tlv_attr = {
957 .attr = {
958 .name = "dcbx_tlv",
959 .mode = S_IRUSR,
961 .size = 0,
962 .read = qla2x00_sysfs_read_dcbx_tlv,
965 static struct sysfs_entry {
966 char *name;
967 struct bin_attribute *attr;
968 int type;
969 } bin_file_entries[] = {
970 { "fw_dump", &sysfs_fw_dump_attr, },
971 { "nvram", &sysfs_nvram_attr, },
972 { "optrom", &sysfs_optrom_attr, },
973 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
974 { "vpd", &sysfs_vpd_attr, 1 },
975 { "sfp", &sysfs_sfp_attr, 1 },
976 { "reset", &sysfs_reset_attr, },
977 { "issue_logo", &sysfs_issue_logo_attr, },
978 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
979 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
980 { NULL },
983 void
984 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
986 struct Scsi_Host *host = vha->host;
987 struct sysfs_entry *iter;
988 int ret;
990 for (iter = bin_file_entries; iter->name; iter++) {
991 if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
992 continue;
993 if (iter->type == 2 && !IS_QLA25XX(vha->hw))
994 continue;
995 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
996 continue;
998 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
999 iter->attr);
1000 if (ret)
1001 ql_log(ql_log_warn, vha, 0x00f3,
1002 "Unable to create sysfs %s binary attribute (%d).\n",
1003 iter->name, ret);
1004 else
1005 ql_dbg(ql_dbg_init, vha, 0x00f4,
1006 "Successfully created sysfs %s binary attribute.\n",
1007 iter->name);
1011 void
1012 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1014 struct Scsi_Host *host = vha->host;
1015 struct sysfs_entry *iter;
1016 struct qla_hw_data *ha = vha->hw;
1018 for (iter = bin_file_entries; iter->name; iter++) {
1019 if (iter->type && !IS_FWI2_CAPABLE(ha))
1020 continue;
1021 if (iter->type == 2 && !IS_QLA25XX(ha))
1022 continue;
1023 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1024 continue;
1025 if (iter->type == 0x27 &&
1026 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1027 continue;
1029 sysfs_remove_bin_file(&host->shost_gendev.kobj,
1030 iter->attr);
1033 if (stop_beacon && ha->beacon_blink_led == 1)
1034 ha->isp_ops->beacon_off(vha);
1037 /* Scsi_Host attributes. */
1039 static ssize_t
1040 qla2x00_driver_version_show(struct device *dev,
1041 struct device_attribute *attr, char *buf)
1043 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1046 static ssize_t
1047 qla2x00_fw_version_show(struct device *dev,
1048 struct device_attribute *attr, char *buf)
1050 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1051 struct qla_hw_data *ha = vha->hw;
1052 char fw_str[128];
1054 return scnprintf(buf, PAGE_SIZE, "%s\n",
1055 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1058 static ssize_t
1059 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1060 char *buf)
1062 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1063 struct qla_hw_data *ha = vha->hw;
1064 uint32_t sn;
1066 if (IS_QLAFX00(vha->hw)) {
1067 return scnprintf(buf, PAGE_SIZE, "%s\n",
1068 vha->hw->mr.serial_num);
1069 } else if (IS_FWI2_CAPABLE(ha)) {
1070 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1071 return strlen(strcat(buf, "\n"));
1074 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1075 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1076 sn % 100000);
1079 static ssize_t
1080 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1081 char *buf)
1083 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1085 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1088 static ssize_t
1089 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1090 char *buf)
1092 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1093 struct qla_hw_data *ha = vha->hw;
1095 if (IS_QLAFX00(vha->hw))
1096 return scnprintf(buf, PAGE_SIZE, "%s\n",
1097 vha->hw->mr.hw_version);
1099 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1100 ha->product_id[0], ha->product_id[1], ha->product_id[2],
1101 ha->product_id[3]);
1104 static ssize_t
1105 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1106 char *buf)
1108 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1110 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1113 static ssize_t
1114 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1115 char *buf)
1117 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1119 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1122 static ssize_t
1123 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1124 char *buf)
1126 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1127 char pci_info[30];
1129 return scnprintf(buf, PAGE_SIZE, "%s\n",
1130 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1131 sizeof(pci_info)));
1134 static ssize_t
1135 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1136 char *buf)
1138 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1139 struct qla_hw_data *ha = vha->hw;
1140 int len = 0;
1142 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1143 atomic_read(&vha->loop_state) == LOOP_DEAD ||
1144 vha->device_flags & DFLG_NO_CABLE)
1145 len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1146 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1147 qla2x00_chip_is_down(vha))
1148 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1149 else {
1150 len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1152 switch (ha->current_topology) {
1153 case ISP_CFG_NL:
1154 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1155 break;
1156 case ISP_CFG_FL:
1157 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1158 break;
1159 case ISP_CFG_N:
1160 len += scnprintf(buf + len, PAGE_SIZE-len,
1161 "N_Port to N_Port\n");
1162 break;
1163 case ISP_CFG_F:
1164 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1165 break;
1166 default:
1167 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1168 break;
1171 return len;
1174 static ssize_t
1175 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1176 char *buf)
1178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1179 int len = 0;
1181 switch (vha->hw->zio_mode) {
1182 case QLA_ZIO_MODE_6:
1183 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1184 break;
1185 case QLA_ZIO_DISABLED:
1186 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1187 break;
1189 return len;
1192 static ssize_t
1193 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1194 const char *buf, size_t count)
1196 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1197 struct qla_hw_data *ha = vha->hw;
1198 int val = 0;
1199 uint16_t zio_mode;
1201 if (!IS_ZIO_SUPPORTED(ha))
1202 return -ENOTSUPP;
1204 if (sscanf(buf, "%d", &val) != 1)
1205 return -EINVAL;
1207 if (val)
1208 zio_mode = QLA_ZIO_MODE_6;
1209 else
1210 zio_mode = QLA_ZIO_DISABLED;
1212 /* Update per-hba values and queue a reset. */
1213 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1214 ha->zio_mode = zio_mode;
1215 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1217 return strlen(buf);
1220 static ssize_t
1221 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1222 char *buf)
1224 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1226 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1229 static ssize_t
1230 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1231 const char *buf, size_t count)
1233 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1234 int val = 0;
1235 uint16_t zio_timer;
1237 if (sscanf(buf, "%d", &val) != 1)
1238 return -EINVAL;
1239 if (val > 25500 || val < 100)
1240 return -ERANGE;
1242 zio_timer = (uint16_t)(val / 100);
1243 vha->hw->zio_timer = zio_timer;
1245 return strlen(buf);
1248 static ssize_t
1249 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1250 char *buf)
1252 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1254 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1255 vha->hw->last_zio_threshold);
1258 static ssize_t
1259 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1260 const char *buf, size_t count)
1262 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1263 int val = 0;
1265 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1266 return -EINVAL;
1267 if (sscanf(buf, "%d", &val) != 1)
1268 return -EINVAL;
1269 if (val < 0 || val > 256)
1270 return -ERANGE;
1272 atomic_set(&vha->hw->zio_threshold, val);
1273 return strlen(buf);
1276 static ssize_t
1277 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1278 char *buf)
1280 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1281 int len = 0;
1283 if (vha->hw->beacon_blink_led)
1284 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1285 else
1286 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1287 return len;
1290 static ssize_t
1291 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1292 const char *buf, size_t count)
1294 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1295 struct qla_hw_data *ha = vha->hw;
1296 int val = 0;
1297 int rval;
1299 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1300 return -EPERM;
1302 if (sscanf(buf, "%d", &val) != 1)
1303 return -EINVAL;
1305 mutex_lock(&vha->hw->optrom_mutex);
1306 if (qla2x00_chip_is_down(vha)) {
1307 mutex_unlock(&vha->hw->optrom_mutex);
1308 ql_log(ql_log_warn, vha, 0x707a,
1309 "Abort ISP active -- ignoring beacon request.\n");
1310 return -EBUSY;
1313 if (val)
1314 rval = ha->isp_ops->beacon_on(vha);
1315 else
1316 rval = ha->isp_ops->beacon_off(vha);
1318 if (rval != QLA_SUCCESS)
1319 count = 0;
1321 mutex_unlock(&vha->hw->optrom_mutex);
1323 return count;
1326 static ssize_t
1327 qla2x00_optrom_bios_version_show(struct device *dev,
1328 struct device_attribute *attr, char *buf)
1330 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1331 struct qla_hw_data *ha = vha->hw;
1333 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1334 ha->bios_revision[0]);
1337 static ssize_t
1338 qla2x00_optrom_efi_version_show(struct device *dev,
1339 struct device_attribute *attr, char *buf)
1341 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1342 struct qla_hw_data *ha = vha->hw;
1344 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1345 ha->efi_revision[0]);
1348 static ssize_t
1349 qla2x00_optrom_fcode_version_show(struct device *dev,
1350 struct device_attribute *attr, char *buf)
1352 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1353 struct qla_hw_data *ha = vha->hw;
1355 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1356 ha->fcode_revision[0]);
1359 static ssize_t
1360 qla2x00_optrom_fw_version_show(struct device *dev,
1361 struct device_attribute *attr, char *buf)
1363 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1364 struct qla_hw_data *ha = vha->hw;
1366 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1367 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1368 ha->fw_revision[3]);
1371 static ssize_t
1372 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1373 struct device_attribute *attr, char *buf)
1375 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1376 struct qla_hw_data *ha = vha->hw;
1378 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1379 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1380 return scnprintf(buf, PAGE_SIZE, "\n");
1382 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1383 ha->gold_fw_version[0], ha->gold_fw_version[1],
1384 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1387 static ssize_t
1388 qla2x00_total_isp_aborts_show(struct device *dev,
1389 struct device_attribute *attr, char *buf)
1391 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1393 return scnprintf(buf, PAGE_SIZE, "%d\n",
1394 vha->qla_stats.total_isp_aborts);
1397 static ssize_t
1398 qla24xx_84xx_fw_version_show(struct device *dev,
1399 struct device_attribute *attr, char *buf)
1401 int rval = QLA_SUCCESS;
1402 uint16_t status[2] = { 0 };
1403 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1404 struct qla_hw_data *ha = vha->hw;
1406 if (!IS_QLA84XX(ha))
1407 return scnprintf(buf, PAGE_SIZE, "\n");
1409 if (!ha->cs84xx->op_fw_version) {
1410 rval = qla84xx_verify_chip(vha, status);
1412 if (!rval && !status[0])
1413 return scnprintf(buf, PAGE_SIZE, "%u\n",
1414 (uint32_t)ha->cs84xx->op_fw_version);
1417 return scnprintf(buf, PAGE_SIZE, "\n");
1420 static ssize_t
1421 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1422 char *buf)
1424 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1425 struct qla_hw_data *ha = vha->hw;
1427 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1428 return scnprintf(buf, PAGE_SIZE, "\n");
1430 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1431 ha->serdes_version[0], ha->serdes_version[1],
1432 ha->serdes_version[2]);
1435 static ssize_t
1436 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1437 char *buf)
1439 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1440 struct qla_hw_data *ha = vha->hw;
1442 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1443 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1444 return scnprintf(buf, PAGE_SIZE, "\n");
1446 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1447 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1448 ha->mpi_capabilities);
1451 static ssize_t
1452 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1453 char *buf)
1455 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1456 struct qla_hw_data *ha = vha->hw;
1458 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1459 return scnprintf(buf, PAGE_SIZE, "\n");
1461 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1462 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1465 static ssize_t
1466 qla2x00_flash_block_size_show(struct device *dev,
1467 struct device_attribute *attr, char *buf)
1469 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1470 struct qla_hw_data *ha = vha->hw;
1472 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1475 static ssize_t
1476 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1477 char *buf)
1479 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1481 if (!IS_CNA_CAPABLE(vha->hw))
1482 return scnprintf(buf, PAGE_SIZE, "\n");
1484 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1487 static ssize_t
1488 qla2x00_vn_port_mac_address_show(struct device *dev,
1489 struct device_attribute *attr, char *buf)
1491 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1493 if (!IS_CNA_CAPABLE(vha->hw))
1494 return scnprintf(buf, PAGE_SIZE, "\n");
1496 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1499 static ssize_t
1500 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1501 char *buf)
1503 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1505 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1508 static ssize_t
1509 qla2x00_thermal_temp_show(struct device *dev,
1510 struct device_attribute *attr, char *buf)
1512 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1513 uint16_t temp = 0;
1514 int rc;
1516 mutex_lock(&vha->hw->optrom_mutex);
1517 if (qla2x00_chip_is_down(vha)) {
1518 mutex_unlock(&vha->hw->optrom_mutex);
1519 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1520 goto done;
1523 if (vha->hw->flags.eeh_busy) {
1524 mutex_unlock(&vha->hw->optrom_mutex);
1525 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1526 goto done;
1529 rc = qla2x00_get_thermal_temp(vha, &temp);
1530 mutex_unlock(&vha->hw->optrom_mutex);
1531 if (rc == QLA_SUCCESS)
1532 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1534 done:
1535 return scnprintf(buf, PAGE_SIZE, "\n");
1538 static ssize_t
1539 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1540 char *buf)
1542 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1543 int rval = QLA_FUNCTION_FAILED;
1544 uint16_t state[6];
1545 uint32_t pstate;
1547 if (IS_QLAFX00(vha->hw)) {
1548 pstate = qlafx00_fw_state_show(dev, attr, buf);
1549 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1552 mutex_lock(&vha->hw->optrom_mutex);
1553 if (qla2x00_chip_is_down(vha)) {
1554 mutex_unlock(&vha->hw->optrom_mutex);
1555 ql_log(ql_log_warn, vha, 0x707c,
1556 "ISP reset active.\n");
1557 goto out;
1558 } else if (vha->hw->flags.eeh_busy) {
1559 mutex_unlock(&vha->hw->optrom_mutex);
1560 goto out;
1563 rval = qla2x00_get_firmware_state(vha, state);
1564 mutex_unlock(&vha->hw->optrom_mutex);
1565 out:
1566 if (rval != QLA_SUCCESS) {
1567 memset(state, -1, sizeof(state));
1568 rval = qla2x00_get_firmware_state(vha, state);
1571 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1572 state[0], state[1], state[2], state[3], state[4], state[5]);
1575 static ssize_t
1576 qla2x00_diag_requests_show(struct device *dev,
1577 struct device_attribute *attr, char *buf)
1579 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1581 if (!IS_BIDI_CAPABLE(vha->hw))
1582 return scnprintf(buf, PAGE_SIZE, "\n");
1584 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1587 static ssize_t
1588 qla2x00_diag_megabytes_show(struct device *dev,
1589 struct device_attribute *attr, char *buf)
1591 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1593 if (!IS_BIDI_CAPABLE(vha->hw))
1594 return scnprintf(buf, PAGE_SIZE, "\n");
1596 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1597 vha->bidi_stats.transfer_bytes >> 20);
1600 static ssize_t
1601 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1602 char *buf)
1604 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1605 struct qla_hw_data *ha = vha->hw;
1606 uint32_t size;
1608 if (!ha->fw_dumped)
1609 size = 0;
1610 else if (IS_P3P_TYPE(ha))
1611 size = ha->md_template_size + ha->md_dump_size;
1612 else
1613 size = ha->fw_dump_len;
1615 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1618 static ssize_t
1619 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1620 struct device_attribute *attr, char *buf)
1622 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1624 if (!IS_P3P_TYPE(vha->hw))
1625 return scnprintf(buf, PAGE_SIZE, "\n");
1626 else
1627 return scnprintf(buf, PAGE_SIZE, "%s\n",
1628 vha->hw->allow_cna_fw_dump ? "true" : "false");
1631 static ssize_t
1632 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1633 struct device_attribute *attr, const char *buf, size_t count)
1635 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1636 int val = 0;
1638 if (!IS_P3P_TYPE(vha->hw))
1639 return -EINVAL;
1641 if (sscanf(buf, "%d", &val) != 1)
1642 return -EINVAL;
1644 vha->hw->allow_cna_fw_dump = val != 0;
1646 return strlen(buf);
1649 static ssize_t
1650 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1651 char *buf)
1653 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1654 struct qla_hw_data *ha = vha->hw;
1656 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1657 return scnprintf(buf, PAGE_SIZE, "\n");
1659 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1660 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1663 static ssize_t
1664 qla2x00_min_supported_speed_show(struct device *dev,
1665 struct device_attribute *attr, char *buf)
1667 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1668 struct qla_hw_data *ha = vha->hw;
1670 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1671 return scnprintf(buf, PAGE_SIZE, "\n");
1673 return scnprintf(buf, PAGE_SIZE, "%s\n",
1674 ha->min_supported_speed == 6 ? "64Gps" :
1675 ha->min_supported_speed == 5 ? "32Gps" :
1676 ha->min_supported_speed == 4 ? "16Gps" :
1677 ha->min_supported_speed == 3 ? "8Gps" :
1678 ha->min_supported_speed == 2 ? "4Gps" :
1679 ha->min_supported_speed != 0 ? "unknown" : "");
1682 static ssize_t
1683 qla2x00_max_supported_speed_show(struct device *dev,
1684 struct device_attribute *attr, char *buf)
1686 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1687 struct qla_hw_data *ha = vha->hw;
1689 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1690 return scnprintf(buf, PAGE_SIZE, "\n");
1692 return scnprintf(buf, PAGE_SIZE, "%s\n",
1693 ha->max_supported_speed == 2 ? "64Gps" :
1694 ha->max_supported_speed == 1 ? "32Gps" :
1695 ha->max_supported_speed == 0 ? "16Gps" : "unknown");
1698 static ssize_t
1699 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1700 const char *buf, size_t count)
1702 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1703 ulong type, speed;
1704 int oldspeed, rval;
1705 int mode = QLA_SET_DATA_RATE_LR;
1706 struct qla_hw_data *ha = vha->hw;
1708 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1709 ql_log(ql_log_warn, vha, 0x70d8,
1710 "Speed setting not supported \n");
1711 return -EINVAL;
1714 rval = kstrtol(buf, 10, &type);
1715 if (rval)
1716 return rval;
1717 speed = type;
1718 if (type == 40 || type == 80 || type == 160 ||
1719 type == 320) {
1720 ql_dbg(ql_dbg_user, vha, 0x70d9,
1721 "Setting will be affected after a loss of sync\n");
1722 type = type/10;
1723 mode = QLA_SET_DATA_RATE_NOLR;
1726 oldspeed = ha->set_data_rate;
1728 switch (type) {
1729 case 0:
1730 ha->set_data_rate = PORT_SPEED_AUTO;
1731 break;
1732 case 4:
1733 ha->set_data_rate = PORT_SPEED_4GB;
1734 break;
1735 case 8:
1736 ha->set_data_rate = PORT_SPEED_8GB;
1737 break;
1738 case 16:
1739 ha->set_data_rate = PORT_SPEED_16GB;
1740 break;
1741 case 32:
1742 ha->set_data_rate = PORT_SPEED_32GB;
1743 break;
1744 default:
1745 ql_log(ql_log_warn, vha, 0x1199,
1746 "Unrecognized speed setting:%lx. Setting Autoneg\n",
1747 speed);
1748 ha->set_data_rate = PORT_SPEED_AUTO;
1751 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1752 return -EINVAL;
1754 ql_log(ql_log_info, vha, 0x70da,
1755 "Setting speed to %lx Gbps \n", type);
1757 rval = qla2x00_set_data_rate(vha, mode);
1758 if (rval != QLA_SUCCESS)
1759 return -EIO;
1761 return strlen(buf);
1764 static ssize_t
1765 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1766 char *buf)
1768 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1769 struct qla_hw_data *ha = vha->hw;
1770 ssize_t rval;
1771 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1773 rval = qla2x00_get_data_rate(vha);
1774 if (rval != QLA_SUCCESS) {
1775 ql_log(ql_log_warn, vha, 0x70db,
1776 "Unable to get port speed rval:%zd\n", rval);
1777 return -EINVAL;
1780 ql_log(ql_log_info, vha, 0x70d6,
1781 "port speed:%d\n", ha->link_data_rate);
1783 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1786 /* ----- */
1788 static ssize_t
1789 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1791 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1792 int len = 0;
1794 len += scnprintf(buf + len, PAGE_SIZE-len,
1795 "Supported options: enabled | disabled | dual | exclusive\n");
1797 /* --- */
1798 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1800 switch (vha->qlini_mode) {
1801 case QLA2XXX_INI_MODE_EXCLUSIVE:
1802 len += scnprintf(buf + len, PAGE_SIZE-len,
1803 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1804 break;
1805 case QLA2XXX_INI_MODE_DISABLED:
1806 len += scnprintf(buf + len, PAGE_SIZE-len,
1807 QLA2XXX_INI_MODE_STR_DISABLED);
1808 break;
1809 case QLA2XXX_INI_MODE_ENABLED:
1810 len += scnprintf(buf + len, PAGE_SIZE-len,
1811 QLA2XXX_INI_MODE_STR_ENABLED);
1812 break;
1813 case QLA2XXX_INI_MODE_DUAL:
1814 len += scnprintf(buf + len, PAGE_SIZE-len,
1815 QLA2XXX_INI_MODE_STR_DUAL);
1816 break;
1818 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1820 return len;
1823 static char *mode_to_str[] = {
1824 "exclusive",
1825 "disabled",
1826 "enabled",
1827 "dual",
1830 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1831 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1833 int rc = 0;
1834 enum {
1835 NO_ACTION,
1836 MODE_CHANGE_ACCEPT,
1837 MODE_CHANGE_NO_ACTION,
1838 TARGET_STILL_ACTIVE,
1840 int action = NO_ACTION;
1841 int set_mode = 0;
1842 u8 eo_toggle = 0; /* exchange offload flipped */
1844 switch (vha->qlini_mode) {
1845 case QLA2XXX_INI_MODE_DISABLED:
1846 switch (op) {
1847 case QLA2XXX_INI_MODE_DISABLED:
1848 if (qla_tgt_mode_enabled(vha)) {
1849 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1850 vha->hw->flags.exchoffld_enabled)
1851 eo_toggle = 1;
1852 if (((vha->ql2xexchoffld !=
1853 vha->u_ql2xexchoffld) &&
1854 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1855 eo_toggle) {
1857 * The number of exchange to be offload
1858 * was tweaked or offload option was
1859 * flipped
1861 action = MODE_CHANGE_ACCEPT;
1862 } else {
1863 action = MODE_CHANGE_NO_ACTION;
1865 } else {
1866 action = MODE_CHANGE_NO_ACTION;
1868 break;
1869 case QLA2XXX_INI_MODE_EXCLUSIVE:
1870 if (qla_tgt_mode_enabled(vha)) {
1871 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1872 vha->hw->flags.exchoffld_enabled)
1873 eo_toggle = 1;
1874 if (((vha->ql2xexchoffld !=
1875 vha->u_ql2xexchoffld) &&
1876 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1877 eo_toggle) {
1879 * The number of exchange to be offload
1880 * was tweaked or offload option was
1881 * flipped
1883 action = MODE_CHANGE_ACCEPT;
1884 } else {
1885 action = MODE_CHANGE_NO_ACTION;
1887 } else {
1888 action = MODE_CHANGE_ACCEPT;
1890 break;
1891 case QLA2XXX_INI_MODE_DUAL:
1892 action = MODE_CHANGE_ACCEPT;
1893 /* active_mode is target only, reset it to dual */
1894 if (qla_tgt_mode_enabled(vha)) {
1895 set_mode = 1;
1896 action = MODE_CHANGE_ACCEPT;
1897 } else {
1898 action = MODE_CHANGE_NO_ACTION;
1900 break;
1902 case QLA2XXX_INI_MODE_ENABLED:
1903 if (qla_tgt_mode_enabled(vha))
1904 action = TARGET_STILL_ACTIVE;
1905 else {
1906 action = MODE_CHANGE_ACCEPT;
1907 set_mode = 1;
1909 break;
1911 break;
1913 case QLA2XXX_INI_MODE_EXCLUSIVE:
1914 switch (op) {
1915 case QLA2XXX_INI_MODE_EXCLUSIVE:
1916 if (qla_tgt_mode_enabled(vha)) {
1917 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1918 vha->hw->flags.exchoffld_enabled)
1919 eo_toggle = 1;
1920 if (((vha->ql2xexchoffld !=
1921 vha->u_ql2xexchoffld) &&
1922 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1923 eo_toggle)
1925 * The number of exchange to be offload
1926 * was tweaked or offload option was
1927 * flipped
1929 action = MODE_CHANGE_ACCEPT;
1930 else
1931 action = NO_ACTION;
1932 } else
1933 action = NO_ACTION;
1935 break;
1937 case QLA2XXX_INI_MODE_DISABLED:
1938 if (qla_tgt_mode_enabled(vha)) {
1939 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1940 vha->hw->flags.exchoffld_enabled)
1941 eo_toggle = 1;
1942 if (((vha->ql2xexchoffld !=
1943 vha->u_ql2xexchoffld) &&
1944 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1945 eo_toggle)
1946 action = MODE_CHANGE_ACCEPT;
1947 else
1948 action = MODE_CHANGE_NO_ACTION;
1949 } else
1950 action = MODE_CHANGE_NO_ACTION;
1951 break;
1953 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
1954 if (qla_tgt_mode_enabled(vha)) {
1955 action = MODE_CHANGE_ACCEPT;
1956 set_mode = 1;
1957 } else
1958 action = MODE_CHANGE_ACCEPT;
1959 break;
1961 case QLA2XXX_INI_MODE_ENABLED:
1962 if (qla_tgt_mode_enabled(vha))
1963 action = TARGET_STILL_ACTIVE;
1964 else {
1965 if (vha->hw->flags.fw_started)
1966 action = MODE_CHANGE_NO_ACTION;
1967 else
1968 action = MODE_CHANGE_ACCEPT;
1970 break;
1972 break;
1974 case QLA2XXX_INI_MODE_ENABLED:
1975 switch (op) {
1976 case QLA2XXX_INI_MODE_ENABLED:
1977 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
1978 vha->hw->flags.exchoffld_enabled)
1979 eo_toggle = 1;
1980 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
1981 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
1982 eo_toggle)
1983 action = MODE_CHANGE_ACCEPT;
1984 else
1985 action = NO_ACTION;
1986 break;
1987 case QLA2XXX_INI_MODE_DUAL:
1988 case QLA2XXX_INI_MODE_DISABLED:
1989 action = MODE_CHANGE_ACCEPT;
1990 break;
1991 default:
1992 action = MODE_CHANGE_NO_ACTION;
1993 break;
1995 break;
1997 case QLA2XXX_INI_MODE_DUAL:
1998 switch (op) {
1999 case QLA2XXX_INI_MODE_DUAL:
2000 if (qla_tgt_mode_enabled(vha) ||
2001 qla_dual_mode_enabled(vha)) {
2002 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2003 vha->u_ql2xiniexchg) !=
2004 vha->hw->flags.exchoffld_enabled)
2005 eo_toggle = 1;
2007 if ((((vha->ql2xexchoffld +
2008 vha->ql2xiniexchg) !=
2009 (vha->u_ql2xiniexchg +
2010 vha->u_ql2xexchoffld)) &&
2011 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2012 vha->u_ql2xexchoffld)) || eo_toggle)
2013 action = MODE_CHANGE_ACCEPT;
2014 else
2015 action = NO_ACTION;
2016 } else {
2017 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2018 vha->u_ql2xiniexchg) !=
2019 vha->hw->flags.exchoffld_enabled)
2020 eo_toggle = 1;
2022 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2023 != (vha->u_ql2xiniexchg +
2024 vha->u_ql2xexchoffld)) &&
2025 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2026 vha->u_ql2xexchoffld)) || eo_toggle)
2027 action = MODE_CHANGE_NO_ACTION;
2028 else
2029 action = NO_ACTION;
2031 break;
2033 case QLA2XXX_INI_MODE_DISABLED:
2034 if (qla_tgt_mode_enabled(vha) ||
2035 qla_dual_mode_enabled(vha)) {
2036 /* turning off initiator mode */
2037 set_mode = 1;
2038 action = MODE_CHANGE_ACCEPT;
2039 } else {
2040 action = MODE_CHANGE_NO_ACTION;
2042 break;
2044 case QLA2XXX_INI_MODE_EXCLUSIVE:
2045 if (qla_tgt_mode_enabled(vha) ||
2046 qla_dual_mode_enabled(vha)) {
2047 set_mode = 1;
2048 action = MODE_CHANGE_ACCEPT;
2049 } else {
2050 action = MODE_CHANGE_ACCEPT;
2052 break;
2054 case QLA2XXX_INI_MODE_ENABLED:
2055 if (qla_tgt_mode_enabled(vha) ||
2056 qla_dual_mode_enabled(vha)) {
2057 action = TARGET_STILL_ACTIVE;
2058 } else {
2059 action = MODE_CHANGE_ACCEPT;
2062 break;
2065 switch (action) {
2066 case MODE_CHANGE_ACCEPT:
2067 ql_log(ql_log_warn, vha, 0xffff,
2068 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2069 mode_to_str[vha->qlini_mode], mode_to_str[op],
2070 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2071 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2073 vha->qlini_mode = op;
2074 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2075 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2076 if (set_mode)
2077 qlt_set_mode(vha);
2078 vha->flags.online = 1;
2079 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2080 break;
2082 case MODE_CHANGE_NO_ACTION:
2083 ql_log(ql_log_warn, vha, 0xffff,
2084 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2085 mode_to_str[vha->qlini_mode], mode_to_str[op],
2086 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2087 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2088 vha->qlini_mode = op;
2089 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2090 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2091 break;
2093 case TARGET_STILL_ACTIVE:
2094 ql_log(ql_log_warn, vha, 0xffff,
2095 "Target Mode is active. Unable to change Mode.\n");
2096 break;
2098 case NO_ACTION:
2099 default:
2100 ql_log(ql_log_warn, vha, 0xffff,
2101 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2102 vha->qlini_mode, op,
2103 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2104 break;
2107 return rc;
2110 static ssize_t
2111 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2112 const char *buf, size_t count)
2114 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2115 int ini;
2117 if (!buf)
2118 return -EINVAL;
2120 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2121 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2122 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2123 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2124 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2125 ini = QLA2XXX_INI_MODE_DISABLED;
2126 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2127 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2128 ini = QLA2XXX_INI_MODE_ENABLED;
2129 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2130 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2131 ini = QLA2XXX_INI_MODE_DUAL;
2132 else
2133 return -EINVAL;
2135 qla_set_ini_mode(vha, ini);
2136 return strlen(buf);
2139 static ssize_t
2140 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2141 char *buf)
2143 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2144 int len = 0;
2146 len += scnprintf(buf + len, PAGE_SIZE-len,
2147 "target exchange: new %d : current: %d\n\n",
2148 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2150 len += scnprintf(buf + len, PAGE_SIZE-len,
2151 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2152 vha->host_no);
2154 return len;
2157 static ssize_t
2158 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2159 const char *buf, size_t count)
2161 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2162 int val = 0;
2164 if (sscanf(buf, "%d", &val) != 1)
2165 return -EINVAL;
2167 if (val > FW_MAX_EXCHANGES_CNT)
2168 val = FW_MAX_EXCHANGES_CNT;
2169 else if (val < 0)
2170 val = 0;
2172 vha->u_ql2xexchoffld = val;
2173 return strlen(buf);
2176 static ssize_t
2177 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2178 char *buf)
2180 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2181 int len = 0;
2183 len += scnprintf(buf + len, PAGE_SIZE-len,
2184 "target exchange: new %d : current: %d\n\n",
2185 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2187 len += scnprintf(buf + len, PAGE_SIZE-len,
2188 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2189 vha->host_no);
2191 return len;
2194 static ssize_t
2195 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2196 const char *buf, size_t count)
2198 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2199 int val = 0;
2201 if (sscanf(buf, "%d", &val) != 1)
2202 return -EINVAL;
2204 if (val > FW_MAX_EXCHANGES_CNT)
2205 val = FW_MAX_EXCHANGES_CNT;
2206 else if (val < 0)
2207 val = 0;
2209 vha->u_ql2xiniexchg = val;
2210 return strlen(buf);
2213 static ssize_t
2214 qla2x00_dif_bundle_statistics_show(struct device *dev,
2215 struct device_attribute *attr, char *buf)
2217 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2218 struct qla_hw_data *ha = vha->hw;
2220 return scnprintf(buf, PAGE_SIZE,
2221 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2222 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2223 ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2224 ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2227 static ssize_t
2228 qla2x00_fw_attr_show(struct device *dev,
2229 struct device_attribute *attr, char *buf)
2231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2232 struct qla_hw_data *ha = vha->hw;
2234 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2235 return scnprintf(buf, PAGE_SIZE, "\n");
2237 return scnprintf(buf, PAGE_SIZE, "%llx\n",
2238 (uint64_t)ha->fw_attributes_ext[1] << 48 |
2239 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2240 (uint64_t)ha->fw_attributes_h << 16 |
2241 (uint64_t)ha->fw_attributes);
2244 static ssize_t
2245 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2246 char *buf)
2248 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2250 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2253 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2254 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2255 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2256 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2257 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2258 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2259 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2260 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2261 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2262 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2263 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2264 qla2x00_zio_timer_store);
2265 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2266 qla2x00_beacon_store);
2267 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2268 qla2x00_optrom_bios_version_show, NULL);
2269 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2270 qla2x00_optrom_efi_version_show, NULL);
2271 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2272 qla2x00_optrom_fcode_version_show, NULL);
2273 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2274 NULL);
2275 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2276 qla2x00_optrom_gold_fw_version_show, NULL);
2277 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2278 NULL);
2279 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2280 NULL);
2281 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2282 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2283 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2284 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2285 NULL);
2286 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2287 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2288 qla2x00_vn_port_mac_address_show, NULL);
2289 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2290 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2291 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2292 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2293 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2294 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2295 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2296 qla2x00_allow_cna_fw_dump_show,
2297 qla2x00_allow_cna_fw_dump_store);
2298 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2299 static DEVICE_ATTR(min_supported_speed, 0444,
2300 qla2x00_min_supported_speed_show, NULL);
2301 static DEVICE_ATTR(max_supported_speed, 0444,
2302 qla2x00_max_supported_speed_show, NULL);
2303 static DEVICE_ATTR(zio_threshold, 0644,
2304 qla_zio_threshold_show,
2305 qla_zio_threshold_store);
2306 static DEVICE_ATTR_RW(qlini_mode);
2307 static DEVICE_ATTR_RW(ql2xexchoffld);
2308 static DEVICE_ATTR_RW(ql2xiniexchg);
2309 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2310 qla2x00_dif_bundle_statistics_show, NULL);
2311 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2312 qla2x00_port_speed_store);
2313 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2314 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2317 struct device_attribute *qla2x00_host_attrs[] = {
2318 &dev_attr_driver_version,
2319 &dev_attr_fw_version,
2320 &dev_attr_serial_num,
2321 &dev_attr_isp_name,
2322 &dev_attr_isp_id,
2323 &dev_attr_model_name,
2324 &dev_attr_model_desc,
2325 &dev_attr_pci_info,
2326 &dev_attr_link_state,
2327 &dev_attr_zio,
2328 &dev_attr_zio_timer,
2329 &dev_attr_beacon,
2330 &dev_attr_optrom_bios_version,
2331 &dev_attr_optrom_efi_version,
2332 &dev_attr_optrom_fcode_version,
2333 &dev_attr_optrom_fw_version,
2334 &dev_attr_84xx_fw_version,
2335 &dev_attr_total_isp_aborts,
2336 &dev_attr_serdes_version,
2337 &dev_attr_mpi_version,
2338 &dev_attr_phy_version,
2339 &dev_attr_flash_block_size,
2340 &dev_attr_vlan_id,
2341 &dev_attr_vn_port_mac_address,
2342 &dev_attr_fabric_param,
2343 &dev_attr_fw_state,
2344 &dev_attr_optrom_gold_fw_version,
2345 &dev_attr_thermal_temp,
2346 &dev_attr_diag_requests,
2347 &dev_attr_diag_megabytes,
2348 &dev_attr_fw_dump_size,
2349 &dev_attr_allow_cna_fw_dump,
2350 &dev_attr_pep_version,
2351 &dev_attr_min_supported_speed,
2352 &dev_attr_max_supported_speed,
2353 &dev_attr_zio_threshold,
2354 &dev_attr_dif_bundle_statistics,
2355 &dev_attr_port_speed,
2356 &dev_attr_port_no,
2357 &dev_attr_fw_attr,
2358 NULL, /* reserve for qlini_mode */
2359 NULL, /* reserve for ql2xiniexchg */
2360 NULL, /* reserve for ql2xexchoffld */
2361 NULL,
2364 void qla_insert_tgt_attrs(void)
2366 struct device_attribute **attr;
2368 /* advance to empty slot */
2369 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2370 continue;
2372 *attr = &dev_attr_qlini_mode;
2373 attr++;
2374 *attr = &dev_attr_ql2xiniexchg;
2375 attr++;
2376 *attr = &dev_attr_ql2xexchoffld;
2379 /* Host attributes. */
2381 static void
2382 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2384 scsi_qla_host_t *vha = shost_priv(shost);
2386 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2387 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2390 static void
2391 qla2x00_get_host_speed(struct Scsi_Host *shost)
2393 scsi_qla_host_t *vha = shost_priv(shost);
2394 u32 speed;
2396 if (IS_QLAFX00(vha->hw)) {
2397 qlafx00_get_host_speed(shost);
2398 return;
2401 switch (vha->hw->link_data_rate) {
2402 case PORT_SPEED_1GB:
2403 speed = FC_PORTSPEED_1GBIT;
2404 break;
2405 case PORT_SPEED_2GB:
2406 speed = FC_PORTSPEED_2GBIT;
2407 break;
2408 case PORT_SPEED_4GB:
2409 speed = FC_PORTSPEED_4GBIT;
2410 break;
2411 case PORT_SPEED_8GB:
2412 speed = FC_PORTSPEED_8GBIT;
2413 break;
2414 case PORT_SPEED_10GB:
2415 speed = FC_PORTSPEED_10GBIT;
2416 break;
2417 case PORT_SPEED_16GB:
2418 speed = FC_PORTSPEED_16GBIT;
2419 break;
2420 case PORT_SPEED_32GB:
2421 speed = FC_PORTSPEED_32GBIT;
2422 break;
2423 case PORT_SPEED_64GB:
2424 speed = FC_PORTSPEED_64GBIT;
2425 break;
2426 default:
2427 speed = FC_PORTSPEED_UNKNOWN;
2428 break;
2431 fc_host_speed(shost) = speed;
2434 static void
2435 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2437 scsi_qla_host_t *vha = shost_priv(shost);
2438 uint32_t port_type;
2440 if (vha->vp_idx) {
2441 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2442 return;
2444 switch (vha->hw->current_topology) {
2445 case ISP_CFG_NL:
2446 port_type = FC_PORTTYPE_LPORT;
2447 break;
2448 case ISP_CFG_FL:
2449 port_type = FC_PORTTYPE_NLPORT;
2450 break;
2451 case ISP_CFG_N:
2452 port_type = FC_PORTTYPE_PTP;
2453 break;
2454 case ISP_CFG_F:
2455 port_type = FC_PORTTYPE_NPORT;
2456 break;
2457 default:
2458 port_type = FC_PORTTYPE_UNKNOWN;
2459 break;
2462 fc_host_port_type(shost) = port_type;
2465 static void
2466 qla2x00_get_starget_node_name(struct scsi_target *starget)
2468 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2469 scsi_qla_host_t *vha = shost_priv(host);
2470 fc_port_t *fcport;
2471 u64 node_name = 0;
2473 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2474 if (fcport->rport &&
2475 starget->id == fcport->rport->scsi_target_id) {
2476 node_name = wwn_to_u64(fcport->node_name);
2477 break;
2481 fc_starget_node_name(starget) = node_name;
2484 static void
2485 qla2x00_get_starget_port_name(struct scsi_target *starget)
2487 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2488 scsi_qla_host_t *vha = shost_priv(host);
2489 fc_port_t *fcport;
2490 u64 port_name = 0;
2492 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2493 if (fcport->rport &&
2494 starget->id == fcport->rport->scsi_target_id) {
2495 port_name = wwn_to_u64(fcport->port_name);
2496 break;
2500 fc_starget_port_name(starget) = port_name;
2503 static void
2504 qla2x00_get_starget_port_id(struct scsi_target *starget)
2506 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2507 scsi_qla_host_t *vha = shost_priv(host);
2508 fc_port_t *fcport;
2509 uint32_t port_id = ~0U;
2511 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2512 if (fcport->rport &&
2513 starget->id == fcport->rport->scsi_target_id) {
2514 port_id = fcport->d_id.b.domain << 16 |
2515 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2516 break;
2520 fc_starget_port_id(starget) = port_id;
2523 static inline void
2524 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2526 rport->dev_loss_tmo = timeout ? timeout : 1;
2529 static void
2530 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2532 struct Scsi_Host *host = rport_to_shost(rport);
2533 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2534 unsigned long flags;
2536 if (!fcport)
2537 return;
2539 /* Now that the rport has been deleted, set the fcport state to
2540 FCS_DEVICE_DEAD */
2541 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2544 * Transport has effectively 'deleted' the rport, clear
2545 * all local references.
2547 spin_lock_irqsave(host->host_lock, flags);
2548 fcport->rport = fcport->drport = NULL;
2549 *((fc_port_t **)rport->dd_data) = NULL;
2550 spin_unlock_irqrestore(host->host_lock, flags);
2552 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2553 return;
2555 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2556 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2557 return;
2561 static void
2562 qla2x00_terminate_rport_io(struct fc_rport *rport)
2564 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2566 if (!fcport)
2567 return;
2569 if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2570 return;
2572 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2573 return;
2575 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2576 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2577 return;
2580 * At this point all fcport's software-states are cleared. Perform any
2581 * final cleanup of firmware resources (PCBs and XCBs).
2583 if (fcport->loop_id != FC_NO_LOOP_ID) {
2584 if (IS_FWI2_CAPABLE(fcport->vha->hw))
2585 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2586 fcport->loop_id, fcport->d_id.b.domain,
2587 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2588 else
2589 qla2x00_port_logout(fcport->vha, fcport);
2593 static int
2594 qla2x00_issue_lip(struct Scsi_Host *shost)
2596 scsi_qla_host_t *vha = shost_priv(shost);
2598 if (IS_QLAFX00(vha->hw))
2599 return 0;
2601 qla2x00_loop_reset(vha);
2602 return 0;
2605 static struct fc_host_statistics *
2606 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2608 scsi_qla_host_t *vha = shost_priv(shost);
2609 struct qla_hw_data *ha = vha->hw;
2610 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2611 int rval;
2612 struct link_statistics *stats;
2613 dma_addr_t stats_dma;
2614 struct fc_host_statistics *p = &vha->fc_host_stat;
2616 memset(p, -1, sizeof(*p));
2618 if (IS_QLAFX00(vha->hw))
2619 goto done;
2621 if (test_bit(UNLOADING, &vha->dpc_flags))
2622 goto done;
2624 if (unlikely(pci_channel_offline(ha->pdev)))
2625 goto done;
2627 if (qla2x00_chip_is_down(vha))
2628 goto done;
2630 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2631 GFP_KERNEL);
2632 if (!stats) {
2633 ql_log(ql_log_warn, vha, 0x707d,
2634 "Failed to allocate memory for stats.\n");
2635 goto done;
2638 rval = QLA_FUNCTION_FAILED;
2639 if (IS_FWI2_CAPABLE(ha)) {
2640 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2641 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2642 !ha->dpc_active) {
2643 /* Must be in a 'READY' state for statistics retrieval. */
2644 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2645 stats, stats_dma);
2648 if (rval != QLA_SUCCESS)
2649 goto done_free;
2651 p->link_failure_count = stats->link_fail_cnt;
2652 p->loss_of_sync_count = stats->loss_sync_cnt;
2653 p->loss_of_signal_count = stats->loss_sig_cnt;
2654 p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
2655 p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
2656 p->invalid_crc_count = stats->inval_crc_cnt;
2657 if (IS_FWI2_CAPABLE(ha)) {
2658 p->lip_count = stats->lip_cnt;
2659 p->tx_frames = stats->tx_frames;
2660 p->rx_frames = stats->rx_frames;
2661 p->dumped_frames = stats->discarded_frames;
2662 p->nos_count = stats->nos_rcvd;
2663 p->error_frames =
2664 stats->dropped_frames + stats->discarded_frames;
2665 p->rx_words = vha->qla_stats.input_bytes;
2666 p->tx_words = vha->qla_stats.output_bytes;
2668 p->fcp_control_requests = vha->qla_stats.control_requests;
2669 p->fcp_input_requests = vha->qla_stats.input_requests;
2670 p->fcp_output_requests = vha->qla_stats.output_requests;
2671 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2672 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2673 p->seconds_since_last_reset =
2674 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2675 do_div(p->seconds_since_last_reset, HZ);
2677 done_free:
2678 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2679 stats, stats_dma);
2680 done:
2681 return p;
2684 static void
2685 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2687 scsi_qla_host_t *vha = shost_priv(shost);
2688 struct qla_hw_data *ha = vha->hw;
2689 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2690 struct link_statistics *stats;
2691 dma_addr_t stats_dma;
2693 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2694 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2696 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2698 if (IS_FWI2_CAPABLE(ha)) {
2699 stats = dma_alloc_coherent(&ha->pdev->dev,
2700 sizeof(*stats), &stats_dma, GFP_KERNEL);
2701 if (!stats) {
2702 ql_log(ql_log_warn, vha, 0x70d7,
2703 "Failed to allocate memory for stats.\n");
2704 return;
2707 /* reset firmware statistics */
2708 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2710 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2711 stats, stats_dma);
2715 static void
2716 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2718 scsi_qla_host_t *vha = shost_priv(shost);
2720 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2721 sizeof(fc_host_symbolic_name(shost)));
2724 static void
2725 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2727 scsi_qla_host_t *vha = shost_priv(shost);
2729 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2732 static void
2733 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2735 scsi_qla_host_t *vha = shost_priv(shost);
2736 static const uint8_t node_name[WWN_SIZE] = {
2737 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2739 u64 fabric_name = wwn_to_u64(node_name);
2741 if (vha->device_flags & SWITCH_FOUND)
2742 fabric_name = wwn_to_u64(vha->fabric_node_name);
2744 fc_host_fabric_name(shost) = fabric_name;
2747 static void
2748 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2750 scsi_qla_host_t *vha = shost_priv(shost);
2751 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2753 if (!base_vha->flags.online) {
2754 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2755 return;
2758 switch (atomic_read(&base_vha->loop_state)) {
2759 case LOOP_UPDATE:
2760 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2761 break;
2762 case LOOP_DOWN:
2763 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2764 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2765 else
2766 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2767 break;
2768 case LOOP_DEAD:
2769 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2770 break;
2771 case LOOP_READY:
2772 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2773 break;
2774 default:
2775 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2776 break;
2780 static int
2781 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2783 int ret = 0;
2784 uint8_t qos = 0;
2785 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2786 scsi_qla_host_t *vha = NULL;
2787 struct qla_hw_data *ha = base_vha->hw;
2788 int cnt;
2789 struct req_que *req = ha->req_q_map[0];
2790 struct qla_qpair *qpair;
2792 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2793 if (ret) {
2794 ql_log(ql_log_warn, vha, 0x707e,
2795 "Vport sanity check failed, status %x\n", ret);
2796 return (ret);
2799 vha = qla24xx_create_vhost(fc_vport);
2800 if (vha == NULL) {
2801 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2802 return FC_VPORT_FAILED;
2804 if (disable) {
2805 atomic_set(&vha->vp_state, VP_OFFLINE);
2806 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2807 } else
2808 atomic_set(&vha->vp_state, VP_FAILED);
2810 /* ready to create vport */
2811 ql_log(ql_log_info, vha, 0x7080,
2812 "VP entry id %d assigned.\n", vha->vp_idx);
2814 /* initialized vport states */
2815 atomic_set(&vha->loop_state, LOOP_DOWN);
2816 vha->vp_err_state = VP_ERR_PORTDWN;
2817 vha->vp_prev_err_state = VP_ERR_UNKWN;
2818 /* Check if physical ha port is Up */
2819 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2820 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2821 /* Don't retry or attempt login of this virtual port */
2822 ql_dbg(ql_dbg_user, vha, 0x7081,
2823 "Vport loop state is not UP.\n");
2824 atomic_set(&vha->loop_state, LOOP_DEAD);
2825 if (!disable)
2826 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2829 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2830 if (ha->fw_attributes & BIT_4) {
2831 int prot = 0, guard;
2833 vha->flags.difdix_supported = 1;
2834 ql_dbg(ql_dbg_user, vha, 0x7082,
2835 "Registered for DIF/DIX type 1 and 3 protection.\n");
2836 if (ql2xenabledif == 1)
2837 prot = SHOST_DIX_TYPE0_PROTECTION;
2838 scsi_host_set_prot(vha->host,
2839 prot | SHOST_DIF_TYPE1_PROTECTION
2840 | SHOST_DIF_TYPE2_PROTECTION
2841 | SHOST_DIF_TYPE3_PROTECTION
2842 | SHOST_DIX_TYPE1_PROTECTION
2843 | SHOST_DIX_TYPE2_PROTECTION
2844 | SHOST_DIX_TYPE3_PROTECTION);
2846 guard = SHOST_DIX_GUARD_CRC;
2848 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2849 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2850 guard |= SHOST_DIX_GUARD_IP;
2852 scsi_host_set_guard(vha->host, guard);
2853 } else
2854 vha->flags.difdix_supported = 0;
2857 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2858 &ha->pdev->dev)) {
2859 ql_dbg(ql_dbg_user, vha, 0x7083,
2860 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2861 goto vport_create_failed_2;
2864 /* initialize attributes */
2865 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2866 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2867 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2868 fc_host_supported_classes(vha->host) =
2869 fc_host_supported_classes(base_vha->host);
2870 fc_host_supported_speeds(vha->host) =
2871 fc_host_supported_speeds(base_vha->host);
2873 qlt_vport_create(vha, ha);
2874 qla24xx_vport_disable(fc_vport, disable);
2876 if (!ql2xmqsupport || !ha->npiv_info)
2877 goto vport_queue;
2879 /* Create a request queue in QoS mode for the vport */
2880 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2881 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2882 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2883 8) == 0) {
2884 qos = ha->npiv_info[cnt].q_qos;
2885 break;
2889 if (qos) {
2890 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
2891 if (!qpair)
2892 ql_log(ql_log_warn, vha, 0x7084,
2893 "Can't create qpair for VP[%d]\n",
2894 vha->vp_idx);
2895 else {
2896 ql_dbg(ql_dbg_multiq, vha, 0xc001,
2897 "Queue pair: %d Qos: %d) created for VP[%d]\n",
2898 qpair->id, qos, vha->vp_idx);
2899 ql_dbg(ql_dbg_user, vha, 0x7085,
2900 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
2901 qpair->id, qos, vha->vp_idx);
2902 req = qpair->req;
2903 vha->qpair = qpair;
2907 vport_queue:
2908 vha->req = req;
2909 return 0;
2911 vport_create_failed_2:
2912 qla24xx_disable_vp(vha);
2913 qla24xx_deallocate_vp_id(vha);
2914 scsi_host_put(vha->host);
2915 return FC_VPORT_FAILED;
2918 static int
2919 qla24xx_vport_delete(struct fc_vport *fc_vport)
2921 scsi_qla_host_t *vha = fc_vport->dd_data;
2922 struct qla_hw_data *ha = vha->hw;
2923 uint16_t id = vha->vp_idx;
2925 set_bit(VPORT_DELETE, &vha->dpc_flags);
2927 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2928 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2929 msleep(1000);
2931 qla_nvme_delete(vha);
2933 qla24xx_disable_vp(vha);
2934 qla2x00_wait_for_sess_deletion(vha);
2936 vha->flags.delete_progress = 1;
2938 qlt_remove_target(ha, vha);
2940 fc_remove_host(vha->host);
2942 scsi_remove_host(vha->host);
2944 /* Allow timer to run to drain queued items, when removing vp */
2945 qla24xx_deallocate_vp_id(vha);
2947 if (vha->timer_active) {
2948 qla2x00_vp_stop_timer(vha);
2949 ql_dbg(ql_dbg_user, vha, 0x7086,
2950 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2953 qla2x00_free_fcports(vha);
2955 mutex_lock(&ha->vport_lock);
2956 ha->cur_vport_count--;
2957 clear_bit(vha->vp_idx, ha->vp_idx_map);
2958 mutex_unlock(&ha->vport_lock);
2960 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2961 vha->gnl.ldma);
2963 vha->gnl.l = NULL;
2965 vfree(vha->scan.l);
2967 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2968 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2969 ql_log(ql_log_warn, vha, 0x7087,
2970 "Queue Pair delete failed.\n");
2973 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2974 scsi_host_put(vha->host);
2975 return 0;
2978 static int
2979 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
2981 scsi_qla_host_t *vha = fc_vport->dd_data;
2983 if (disable)
2984 qla24xx_disable_vp(vha);
2985 else
2986 qla24xx_enable_vp(vha);
2988 return 0;
2991 struct fc_function_template qla2xxx_transport_functions = {
2993 .show_host_node_name = 1,
2994 .show_host_port_name = 1,
2995 .show_host_supported_classes = 1,
2996 .show_host_supported_speeds = 1,
2998 .get_host_port_id = qla2x00_get_host_port_id,
2999 .show_host_port_id = 1,
3000 .get_host_speed = qla2x00_get_host_speed,
3001 .show_host_speed = 1,
3002 .get_host_port_type = qla2x00_get_host_port_type,
3003 .show_host_port_type = 1,
3004 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3005 .show_host_symbolic_name = 1,
3006 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3007 .show_host_system_hostname = 1,
3008 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3009 .show_host_fabric_name = 1,
3010 .get_host_port_state = qla2x00_get_host_port_state,
3011 .show_host_port_state = 1,
3013 .dd_fcrport_size = sizeof(struct fc_port *),
3014 .show_rport_supported_classes = 1,
3016 .get_starget_node_name = qla2x00_get_starget_node_name,
3017 .show_starget_node_name = 1,
3018 .get_starget_port_name = qla2x00_get_starget_port_name,
3019 .show_starget_port_name = 1,
3020 .get_starget_port_id = qla2x00_get_starget_port_id,
3021 .show_starget_port_id = 1,
3023 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3024 .show_rport_dev_loss_tmo = 1,
3026 .issue_fc_host_lip = qla2x00_issue_lip,
3027 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3028 .terminate_rport_io = qla2x00_terminate_rport_io,
3029 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3030 .reset_fc_host_stats = qla2x00_reset_host_stats,
3032 .vport_create = qla24xx_vport_create,
3033 .vport_disable = qla24xx_vport_disable,
3034 .vport_delete = qla24xx_vport_delete,
3035 .bsg_request = qla24xx_bsg_request,
3036 .bsg_timeout = qla24xx_bsg_timeout,
3039 struct fc_function_template qla2xxx_transport_vport_functions = {
3041 .show_host_node_name = 1,
3042 .show_host_port_name = 1,
3043 .show_host_supported_classes = 1,
3045 .get_host_port_id = qla2x00_get_host_port_id,
3046 .show_host_port_id = 1,
3047 .get_host_speed = qla2x00_get_host_speed,
3048 .show_host_speed = 1,
3049 .get_host_port_type = qla2x00_get_host_port_type,
3050 .show_host_port_type = 1,
3051 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3052 .show_host_symbolic_name = 1,
3053 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3054 .show_host_system_hostname = 1,
3055 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3056 .show_host_fabric_name = 1,
3057 .get_host_port_state = qla2x00_get_host_port_state,
3058 .show_host_port_state = 1,
3060 .dd_fcrport_size = sizeof(struct fc_port *),
3061 .show_rport_supported_classes = 1,
3063 .get_starget_node_name = qla2x00_get_starget_node_name,
3064 .show_starget_node_name = 1,
3065 .get_starget_port_name = qla2x00_get_starget_port_name,
3066 .show_starget_port_name = 1,
3067 .get_starget_port_id = qla2x00_get_starget_port_id,
3068 .show_starget_port_id = 1,
3070 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3071 .show_rport_dev_loss_tmo = 1,
3073 .issue_fc_host_lip = qla2x00_issue_lip,
3074 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3075 .terminate_rport_io = qla2x00_terminate_rport_io,
3076 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3077 .reset_fc_host_stats = qla2x00_reset_host_stats,
3079 .bsg_request = qla24xx_bsg_request,
3080 .bsg_timeout = qla24xx_bsg_timeout,
3083 void
3084 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3086 struct qla_hw_data *ha = vha->hw;
3087 u32 speeds = FC_PORTSPEED_UNKNOWN;
3089 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3090 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3091 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3092 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3093 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3094 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3095 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3097 if (IS_CNA_CAPABLE(ha))
3098 speeds = FC_PORTSPEED_10GBIT;
3099 else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
3100 if (ha->max_supported_speed == 2) {
3101 if (ha->min_supported_speed <= 6)
3102 speeds |= FC_PORTSPEED_64GBIT;
3104 if (ha->max_supported_speed == 2 ||
3105 ha->max_supported_speed == 1) {
3106 if (ha->min_supported_speed <= 5)
3107 speeds |= FC_PORTSPEED_32GBIT;
3109 if (ha->max_supported_speed == 2 ||
3110 ha->max_supported_speed == 1 ||
3111 ha->max_supported_speed == 0) {
3112 if (ha->min_supported_speed <= 4)
3113 speeds |= FC_PORTSPEED_16GBIT;
3115 if (ha->max_supported_speed == 1 ||
3116 ha->max_supported_speed == 0) {
3117 if (ha->min_supported_speed <= 3)
3118 speeds |= FC_PORTSPEED_8GBIT;
3120 if (ha->max_supported_speed == 0) {
3121 if (ha->min_supported_speed <= 2)
3122 speeds |= FC_PORTSPEED_4GBIT;
3124 } else if (IS_QLA2031(ha))
3125 speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
3126 FC_PORTSPEED_4GBIT;
3127 else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
3128 speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
3129 FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3130 else if (IS_QLA24XX_TYPE(ha))
3131 speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
3132 FC_PORTSPEED_1GBIT;
3133 else if (IS_QLA23XX(ha))
3134 speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3135 else
3136 speeds = FC_PORTSPEED_1GBIT;
3138 fc_host_supported_speeds(vha->host) = speeds;