Linux 5.1.15
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_attr.c
blob70d92334e721fb47b8c96fbb530d668b7ae2347a
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
17 /* SYSFS attributes --------------------------------------------------------- */
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 struct bin_attribute *bin_attr,
22 char *buf, loff_t off, size_t count)
24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 struct device, kobj)));
26 struct qla_hw_data *ha = vha->hw;
27 int rval = 0;
29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0;
32 if (IS_P3P_TYPE(ha)) {
33 if (off < ha->md_template_size) {
34 rval = memory_read_from_buffer(buf, count,
35 &off, ha->md_tmplt_hdr, ha->md_template_size);
36 return rval;
38 off -= ha->md_template_size;
39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size);
41 return rval;
42 } else if (ha->mctp_dumped && ha->mctp_dump_reading)
43 return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 else if (ha->fw_dump_reading)
46 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47 ha->fw_dump_len);
48 else
49 return 0;
52 static ssize_t
53 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
54 struct bin_attribute *bin_attr,
55 char *buf, loff_t off, size_t count)
57 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
58 struct device, kobj)));
59 struct qla_hw_data *ha = vha->hw;
60 int reading;
62 if (off != 0)
63 return (0);
65 reading = simple_strtol(buf, NULL, 10);
66 switch (reading) {
67 case 0:
68 if (!ha->fw_dump_reading)
69 break;
71 ql_log(ql_log_info, vha, 0x705d,
72 "Firmware dump cleared on (%ld).\n", vha->host_no);
74 if (IS_P3P_TYPE(ha)) {
75 qla82xx_md_free(vha);
76 qla82xx_md_prep(vha);
78 ha->fw_dump_reading = 0;
79 ha->fw_dumped = 0;
80 break;
81 case 1:
82 if (ha->fw_dumped && !ha->fw_dump_reading) {
83 ha->fw_dump_reading = 1;
85 ql_log(ql_log_info, vha, 0x705e,
86 "Raw firmware dump ready for read on (%ld).\n",
87 vha->host_no);
89 break;
90 case 2:
91 qla2x00_alloc_fw_dump(vha);
92 break;
93 case 3:
94 if (IS_QLA82XX(ha)) {
95 qla82xx_idc_lock(ha);
96 qla82xx_set_reset_owner(vha);
97 qla82xx_idc_unlock(ha);
98 } else if (IS_QLA8044(ha)) {
99 qla8044_idc_lock(ha);
100 qla82xx_set_reset_owner(vha);
101 qla8044_idc_unlock(ha);
102 } else
103 qla2x00_system_error(vha);
104 break;
105 case 4:
106 if (IS_P3P_TYPE(ha)) {
107 if (ha->md_tmplt_hdr)
108 ql_dbg(ql_dbg_user, vha, 0x705b,
109 "MiniDump supported with this firmware.\n");
110 else
111 ql_dbg(ql_dbg_user, vha, 0x709d,
112 "MiniDump not supported with this firmware.\n");
114 break;
115 case 5:
116 if (IS_P3P_TYPE(ha))
117 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
118 break;
119 case 6:
120 if (!ha->mctp_dump_reading)
121 break;
122 ql_log(ql_log_info, vha, 0x70c1,
123 "MCTP dump cleared on (%ld).\n", vha->host_no);
124 ha->mctp_dump_reading = 0;
125 ha->mctp_dumped = 0;
126 break;
127 case 7:
128 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
129 ha->mctp_dump_reading = 1;
130 ql_log(ql_log_info, vha, 0x70c2,
131 "Raw mctp dump ready for read on (%ld).\n",
132 vha->host_no);
134 break;
136 return count;
139 static struct bin_attribute sysfs_fw_dump_attr = {
140 .attr = {
141 .name = "fw_dump",
142 .mode = S_IRUSR | S_IWUSR,
144 .size = 0,
145 .read = qla2x00_sysfs_read_fw_dump,
146 .write = qla2x00_sysfs_write_fw_dump,
149 static ssize_t
150 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
151 struct bin_attribute *bin_attr,
152 char *buf, loff_t off, size_t count)
154 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
155 struct device, kobj)));
156 struct qla_hw_data *ha = vha->hw;
158 if (!capable(CAP_SYS_ADMIN))
159 return 0;
161 mutex_lock(&ha->optrom_mutex);
162 if (qla2x00_chip_is_down(vha)) {
163 mutex_unlock(&ha->optrom_mutex);
164 return -EAGAIN;
167 if (IS_NOCACHE_VPD_TYPE(ha))
168 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
169 ha->nvram_size);
170 mutex_unlock(&ha->optrom_mutex);
172 return memory_read_from_buffer(buf, count, &off, ha->nvram,
173 ha->nvram_size);
176 static ssize_t
177 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
178 struct bin_attribute *bin_attr,
179 char *buf, loff_t off, size_t count)
181 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
182 struct device, kobj)));
183 struct qla_hw_data *ha = vha->hw;
184 uint16_t cnt;
186 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
187 !ha->isp_ops->write_nvram)
188 return -EINVAL;
190 /* Checksum NVRAM. */
191 if (IS_FWI2_CAPABLE(ha)) {
192 uint32_t *iter;
193 uint32_t chksum;
195 iter = (uint32_t *)buf;
196 chksum = 0;
197 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
198 chksum += le32_to_cpu(*iter);
199 chksum = ~chksum + 1;
200 *iter = cpu_to_le32(chksum);
201 } else {
202 uint8_t *iter;
203 uint8_t chksum;
205 iter = (uint8_t *)buf;
206 chksum = 0;
207 for (cnt = 0; cnt < count - 1; cnt++)
208 chksum += *iter++;
209 chksum = ~chksum + 1;
210 *iter = chksum;
213 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
214 ql_log(ql_log_warn, vha, 0x705f,
215 "HBA not online, failing NVRAM update.\n");
216 return -EAGAIN;
219 mutex_lock(&ha->optrom_mutex);
220 if (qla2x00_chip_is_down(vha)) {
221 mutex_unlock(&ha->optrom_mutex);
222 return -EAGAIN;
225 /* Write NVRAM. */
226 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
227 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
228 count);
229 mutex_unlock(&ha->optrom_mutex);
231 ql_dbg(ql_dbg_user, vha, 0x7060,
232 "Setting ISP_ABORT_NEEDED\n");
233 /* NVRAM settings take effect immediately. */
234 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
235 qla2xxx_wake_dpc(vha);
236 qla2x00_wait_for_chip_reset(vha);
238 return count;
241 static struct bin_attribute sysfs_nvram_attr = {
242 .attr = {
243 .name = "nvram",
244 .mode = S_IRUSR | S_IWUSR,
246 .size = 512,
247 .read = qla2x00_sysfs_read_nvram,
248 .write = qla2x00_sysfs_write_nvram,
251 static ssize_t
252 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
253 struct bin_attribute *bin_attr,
254 char *buf, loff_t off, size_t count)
256 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
257 struct device, kobj)));
258 struct qla_hw_data *ha = vha->hw;
259 ssize_t rval = 0;
261 mutex_lock(&ha->optrom_mutex);
263 if (ha->optrom_state != QLA_SREADING)
264 goto out;
266 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
267 ha->optrom_region_size);
269 out:
270 mutex_unlock(&ha->optrom_mutex);
272 return rval;
275 static ssize_t
276 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
277 struct bin_attribute *bin_attr,
278 char *buf, loff_t off, size_t count)
280 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
281 struct device, kobj)));
282 struct qla_hw_data *ha = vha->hw;
284 mutex_lock(&ha->optrom_mutex);
286 if (ha->optrom_state != QLA_SWRITING) {
287 mutex_unlock(&ha->optrom_mutex);
288 return -EINVAL;
290 if (off > ha->optrom_region_size) {
291 mutex_unlock(&ha->optrom_mutex);
292 return -ERANGE;
294 if (off + count > ha->optrom_region_size)
295 count = ha->optrom_region_size - off;
297 memcpy(&ha->optrom_buffer[off], buf, count);
298 mutex_unlock(&ha->optrom_mutex);
300 return count;
303 static struct bin_attribute sysfs_optrom_attr = {
304 .attr = {
305 .name = "optrom",
306 .mode = S_IRUSR | S_IWUSR,
308 .size = 0,
309 .read = qla2x00_sysfs_read_optrom,
310 .write = qla2x00_sysfs_write_optrom,
313 static ssize_t
314 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
315 struct bin_attribute *bin_attr,
316 char *buf, loff_t off, size_t count)
318 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
319 struct device, kobj)));
320 struct qla_hw_data *ha = vha->hw;
321 uint32_t start = 0;
322 uint32_t size = ha->optrom_size;
323 int val, valid;
324 ssize_t rval = count;
326 if (off)
327 return -EINVAL;
329 if (unlikely(pci_channel_offline(ha->pdev)))
330 return -EAGAIN;
332 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
333 return -EINVAL;
334 if (start > ha->optrom_size)
335 return -EINVAL;
336 if (size > ha->optrom_size - start)
337 size = ha->optrom_size - start;
339 mutex_lock(&ha->optrom_mutex);
340 if (qla2x00_chip_is_down(vha)) {
341 mutex_unlock(&ha->optrom_mutex);
342 return -EAGAIN;
344 switch (val) {
345 case 0:
346 if (ha->optrom_state != QLA_SREADING &&
347 ha->optrom_state != QLA_SWRITING) {
348 rval = -EINVAL;
349 goto out;
351 ha->optrom_state = QLA_SWAITING;
353 ql_dbg(ql_dbg_user, vha, 0x7061,
354 "Freeing flash region allocation -- 0x%x bytes.\n",
355 ha->optrom_region_size);
357 vfree(ha->optrom_buffer);
358 ha->optrom_buffer = NULL;
359 break;
360 case 1:
361 if (ha->optrom_state != QLA_SWAITING) {
362 rval = -EINVAL;
363 goto out;
366 ha->optrom_region_start = start;
367 ha->optrom_region_size = size;
369 ha->optrom_state = QLA_SREADING;
370 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
371 if (ha->optrom_buffer == NULL) {
372 ql_log(ql_log_warn, vha, 0x7062,
373 "Unable to allocate memory for optrom retrieval "
374 "(%x).\n", ha->optrom_region_size);
376 ha->optrom_state = QLA_SWAITING;
377 rval = -ENOMEM;
378 goto out;
381 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
382 ql_log(ql_log_warn, vha, 0x7063,
383 "HBA not online, failing NVRAM update.\n");
384 rval = -EAGAIN;
385 goto out;
388 ql_dbg(ql_dbg_user, vha, 0x7064,
389 "Reading flash region -- 0x%x/0x%x.\n",
390 ha->optrom_region_start, ha->optrom_region_size);
392 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
393 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
394 ha->optrom_region_start, ha->optrom_region_size);
395 break;
396 case 2:
397 if (ha->optrom_state != QLA_SWAITING) {
398 rval = -EINVAL;
399 goto out;
403 * We need to be more restrictive on which FLASH regions are
404 * allowed to be updated via user-space. Regions accessible
405 * via this method include:
407 * ISP21xx/ISP22xx/ISP23xx type boards:
409 * 0x000000 -> 0x020000 -- Boot code.
411 * ISP2322/ISP24xx type boards:
413 * 0x000000 -> 0x07ffff -- Boot code.
414 * 0x080000 -> 0x0fffff -- Firmware.
416 * ISP25xx type boards:
418 * 0x000000 -> 0x07ffff -- Boot code.
419 * 0x080000 -> 0x0fffff -- Firmware.
420 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
422 valid = 0;
423 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
424 valid = 1;
425 else if (start == (ha->flt_region_boot * 4) ||
426 start == (ha->flt_region_fw * 4))
427 valid = 1;
428 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
429 || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
430 || IS_QLA27XX(ha))
431 valid = 1;
432 if (!valid) {
433 ql_log(ql_log_warn, vha, 0x7065,
434 "Invalid start region 0x%x/0x%x.\n", start, size);
435 rval = -EINVAL;
436 goto out;
439 ha->optrom_region_start = start;
440 ha->optrom_region_size = size;
442 ha->optrom_state = QLA_SWRITING;
443 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
444 if (ha->optrom_buffer == NULL) {
445 ql_log(ql_log_warn, vha, 0x7066,
446 "Unable to allocate memory for optrom update "
447 "(%x)\n", ha->optrom_region_size);
449 ha->optrom_state = QLA_SWAITING;
450 rval = -ENOMEM;
451 goto out;
454 ql_dbg(ql_dbg_user, vha, 0x7067,
455 "Staging flash region write -- 0x%x/0x%x.\n",
456 ha->optrom_region_start, ha->optrom_region_size);
458 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
459 break;
460 case 3:
461 if (ha->optrom_state != QLA_SWRITING) {
462 rval = -EINVAL;
463 goto out;
466 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
467 ql_log(ql_log_warn, vha, 0x7068,
468 "HBA not online, failing flash update.\n");
469 rval = -EAGAIN;
470 goto out;
473 ql_dbg(ql_dbg_user, vha, 0x7069,
474 "Writing flash region -- 0x%x/0x%x.\n",
475 ha->optrom_region_start, ha->optrom_region_size);
477 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
478 ha->optrom_region_start, ha->optrom_region_size);
479 break;
480 default:
481 rval = -EINVAL;
484 out:
485 mutex_unlock(&ha->optrom_mutex);
486 return rval;
489 static struct bin_attribute sysfs_optrom_ctl_attr = {
490 .attr = {
491 .name = "optrom_ctl",
492 .mode = S_IWUSR,
494 .size = 0,
495 .write = qla2x00_sysfs_write_optrom_ctl,
498 static ssize_t
499 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
500 struct bin_attribute *bin_attr,
501 char *buf, loff_t off, size_t count)
503 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
504 struct device, kobj)));
505 struct qla_hw_data *ha = vha->hw;
506 uint32_t faddr;
508 if (unlikely(pci_channel_offline(ha->pdev)))
509 return -EAGAIN;
511 if (!capable(CAP_SYS_ADMIN))
512 return -EINVAL;
514 if (IS_NOCACHE_VPD_TYPE(ha)) {
515 faddr = ha->flt_region_vpd << 2;
517 if (IS_QLA27XX(ha) &&
518 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
519 faddr = ha->flt_region_vpd_sec << 2;
521 mutex_lock(&ha->optrom_mutex);
522 if (qla2x00_chip_is_down(vha)) {
523 mutex_unlock(&ha->optrom_mutex);
524 return -EAGAIN;
526 ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
527 ha->vpd_size);
528 mutex_unlock(&ha->optrom_mutex);
530 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
533 static ssize_t
534 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
535 struct bin_attribute *bin_attr,
536 char *buf, loff_t off, size_t count)
538 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
539 struct device, kobj)));
540 struct qla_hw_data *ha = vha->hw;
541 uint8_t *tmp_data;
543 if (unlikely(pci_channel_offline(ha->pdev)))
544 return 0;
546 if (qla2x00_chip_is_down(vha))
547 return 0;
549 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
550 !ha->isp_ops->write_nvram)
551 return 0;
553 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
554 ql_log(ql_log_warn, vha, 0x706a,
555 "HBA not online, failing VPD update.\n");
556 return -EAGAIN;
559 mutex_lock(&ha->optrom_mutex);
560 if (qla2x00_chip_is_down(vha)) {
561 mutex_unlock(&ha->optrom_mutex);
562 return -EAGAIN;
565 /* Write NVRAM. */
566 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
567 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
569 /* Update flash version information for 4Gb & above. */
570 if (!IS_FWI2_CAPABLE(ha)) {
571 mutex_unlock(&ha->optrom_mutex);
572 return -EINVAL;
575 tmp_data = vmalloc(256);
576 if (!tmp_data) {
577 mutex_unlock(&ha->optrom_mutex);
578 ql_log(ql_log_warn, vha, 0x706b,
579 "Unable to allocate memory for VPD information update.\n");
580 return -ENOMEM;
582 ha->isp_ops->get_flash_version(vha, tmp_data);
583 vfree(tmp_data);
585 mutex_unlock(&ha->optrom_mutex);
587 return count;
590 static struct bin_attribute sysfs_vpd_attr = {
591 .attr = {
592 .name = "vpd",
593 .mode = S_IRUSR | S_IWUSR,
595 .size = 0,
596 .read = qla2x00_sysfs_read_vpd,
597 .write = qla2x00_sysfs_write_vpd,
600 static ssize_t
601 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
602 struct bin_attribute *bin_attr,
603 char *buf, loff_t off, size_t count)
605 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
606 struct device, kobj)));
607 int rval;
609 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
610 return 0;
612 mutex_lock(&vha->hw->optrom_mutex);
613 if (qla2x00_chip_is_down(vha)) {
614 mutex_unlock(&vha->hw->optrom_mutex);
615 return 0;
618 rval = qla2x00_read_sfp_dev(vha, buf, count);
619 mutex_unlock(&vha->hw->optrom_mutex);
621 if (rval)
622 return -EIO;
624 return count;
627 static struct bin_attribute sysfs_sfp_attr = {
628 .attr = {
629 .name = "sfp",
630 .mode = S_IRUSR | S_IWUSR,
632 .size = SFP_DEV_SIZE,
633 .read = qla2x00_sysfs_read_sfp,
636 static ssize_t
637 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
638 struct bin_attribute *bin_attr,
639 char *buf, loff_t off, size_t count)
641 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
642 struct device, kobj)));
643 struct qla_hw_data *ha = vha->hw;
644 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
645 int type;
646 uint32_t idc_control;
647 uint8_t *tmp_data = NULL;
648 if (off != 0)
649 return -EINVAL;
651 type = simple_strtol(buf, NULL, 10);
652 switch (type) {
653 case 0x2025c:
654 ql_log(ql_log_info, vha, 0x706e,
655 "Issuing ISP reset.\n");
657 scsi_block_requests(vha->host);
658 if (IS_QLA82XX(ha)) {
659 ha->flags.isp82xx_no_md_cap = 1;
660 qla82xx_idc_lock(ha);
661 qla82xx_set_reset_owner(vha);
662 qla82xx_idc_unlock(ha);
663 } else if (IS_QLA8044(ha)) {
664 qla8044_idc_lock(ha);
665 idc_control = qla8044_rd_reg(ha,
666 QLA8044_IDC_DRV_CTRL);
667 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
668 (idc_control | GRACEFUL_RESET_BIT1));
669 qla82xx_set_reset_owner(vha);
670 qla8044_idc_unlock(ha);
671 } else {
672 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
673 qla2xxx_wake_dpc(vha);
675 qla2x00_wait_for_chip_reset(vha);
676 scsi_unblock_requests(vha->host);
677 break;
678 case 0x2025d:
679 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
680 return -EPERM;
682 ql_log(ql_log_info, vha, 0x706f,
683 "Issuing MPI reset.\n");
685 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
686 uint32_t idc_control;
688 qla83xx_idc_lock(vha, 0);
689 __qla83xx_get_idc_control(vha, &idc_control);
690 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
691 __qla83xx_set_idc_control(vha, idc_control);
692 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
693 QLA8XXX_DEV_NEED_RESET);
694 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
695 qla83xx_idc_unlock(vha, 0);
696 break;
697 } else {
698 /* Make sure FC side is not in reset */
699 qla2x00_wait_for_hba_online(vha);
701 /* Issue MPI reset */
702 scsi_block_requests(vha->host);
703 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
704 ql_log(ql_log_warn, vha, 0x7070,
705 "MPI reset failed.\n");
706 scsi_unblock_requests(vha->host);
707 break;
709 case 0x2025e:
710 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
711 ql_log(ql_log_info, vha, 0x7071,
712 "FCoE ctx reset not supported.\n");
713 return -EPERM;
716 ql_log(ql_log_info, vha, 0x7072,
717 "Issuing FCoE ctx reset.\n");
718 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
719 qla2xxx_wake_dpc(vha);
720 qla2x00_wait_for_fcoe_ctx_reset(vha);
721 break;
722 case 0x2025f:
723 if (!IS_QLA8031(ha))
724 return -EPERM;
725 ql_log(ql_log_info, vha, 0x70bc,
726 "Disabling Reset by IDC control\n");
727 qla83xx_idc_lock(vha, 0);
728 __qla83xx_get_idc_control(vha, &idc_control);
729 idc_control |= QLA83XX_IDC_RESET_DISABLED;
730 __qla83xx_set_idc_control(vha, idc_control);
731 qla83xx_idc_unlock(vha, 0);
732 break;
733 case 0x20260:
734 if (!IS_QLA8031(ha))
735 return -EPERM;
736 ql_log(ql_log_info, vha, 0x70bd,
737 "Enabling Reset by IDC control\n");
738 qla83xx_idc_lock(vha, 0);
739 __qla83xx_get_idc_control(vha, &idc_control);
740 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
741 __qla83xx_set_idc_control(vha, idc_control);
742 qla83xx_idc_unlock(vha, 0);
743 break;
744 case 0x20261:
745 ql_dbg(ql_dbg_user, vha, 0x70e0,
746 "Updating cache versions without reset ");
748 tmp_data = vmalloc(256);
749 if (!tmp_data) {
750 ql_log(ql_log_warn, vha, 0x70e1,
751 "Unable to allocate memory for VPD information update.\n");
752 return -ENOMEM;
754 ha->isp_ops->get_flash_version(vha, tmp_data);
755 vfree(tmp_data);
756 break;
758 return count;
761 static struct bin_attribute sysfs_reset_attr = {
762 .attr = {
763 .name = "reset",
764 .mode = S_IWUSR,
766 .size = 0,
767 .write = qla2x00_sysfs_write_reset,
770 static ssize_t
771 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
772 struct bin_attribute *bin_attr,
773 char *buf, loff_t off, size_t count)
775 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
776 struct device, kobj)));
777 int type;
778 port_id_t did;
780 if (!capable(CAP_SYS_ADMIN))
781 return 0;
783 if (unlikely(pci_channel_offline(vha->hw->pdev)))
784 return 0;
786 if (qla2x00_chip_is_down(vha))
787 return 0;
789 type = simple_strtol(buf, NULL, 10);
791 did.b.domain = (type & 0x00ff0000) >> 16;
792 did.b.area = (type & 0x0000ff00) >> 8;
793 did.b.al_pa = (type & 0x000000ff);
795 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
796 did.b.domain, did.b.area, did.b.al_pa);
798 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
800 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
801 return count;
804 static struct bin_attribute sysfs_issue_logo_attr = {
805 .attr = {
806 .name = "issue_logo",
807 .mode = S_IWUSR,
809 .size = 0,
810 .write = qla2x00_issue_logo,
813 static ssize_t
814 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
815 struct bin_attribute *bin_attr,
816 char *buf, loff_t off, size_t count)
818 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
819 struct device, kobj)));
820 struct qla_hw_data *ha = vha->hw;
821 int rval;
822 uint16_t actual_size;
824 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
825 return 0;
827 if (unlikely(pci_channel_offline(ha->pdev)))
828 return 0;
829 mutex_lock(&vha->hw->optrom_mutex);
830 if (qla2x00_chip_is_down(vha)) {
831 mutex_unlock(&vha->hw->optrom_mutex);
832 return 0;
835 if (ha->xgmac_data)
836 goto do_read;
838 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
839 &ha->xgmac_data_dma, GFP_KERNEL);
840 if (!ha->xgmac_data) {
841 mutex_unlock(&vha->hw->optrom_mutex);
842 ql_log(ql_log_warn, vha, 0x7076,
843 "Unable to allocate memory for XGMAC read-data.\n");
844 return 0;
847 do_read:
848 actual_size = 0;
849 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
851 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
852 XGMAC_DATA_SIZE, &actual_size);
854 mutex_unlock(&vha->hw->optrom_mutex);
855 if (rval != QLA_SUCCESS) {
856 ql_log(ql_log_warn, vha, 0x7077,
857 "Unable to read XGMAC data (%x).\n", rval);
858 count = 0;
861 count = actual_size > count ? count: actual_size;
862 memcpy(buf, ha->xgmac_data, count);
864 return count;
867 static struct bin_attribute sysfs_xgmac_stats_attr = {
868 .attr = {
869 .name = "xgmac_stats",
870 .mode = S_IRUSR,
872 .size = 0,
873 .read = qla2x00_sysfs_read_xgmac_stats,
876 static ssize_t
877 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
878 struct bin_attribute *bin_attr,
879 char *buf, loff_t off, size_t count)
881 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
882 struct device, kobj)));
883 struct qla_hw_data *ha = vha->hw;
884 int rval;
886 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
887 return 0;
889 if (ha->dcbx_tlv)
890 goto do_read;
891 mutex_lock(&vha->hw->optrom_mutex);
892 if (qla2x00_chip_is_down(vha)) {
893 mutex_unlock(&vha->hw->optrom_mutex);
894 return 0;
897 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
898 &ha->dcbx_tlv_dma, GFP_KERNEL);
899 if (!ha->dcbx_tlv) {
900 mutex_unlock(&vha->hw->optrom_mutex);
901 ql_log(ql_log_warn, vha, 0x7078,
902 "Unable to allocate memory for DCBX TLV read-data.\n");
903 return -ENOMEM;
906 do_read:
907 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
909 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
910 DCBX_TLV_DATA_SIZE);
912 mutex_unlock(&vha->hw->optrom_mutex);
914 if (rval != QLA_SUCCESS) {
915 ql_log(ql_log_warn, vha, 0x7079,
916 "Unable to read DCBX TLV (%x).\n", rval);
917 return -EIO;
920 memcpy(buf, ha->dcbx_tlv, count);
922 return count;
925 static struct bin_attribute sysfs_dcbx_tlv_attr = {
926 .attr = {
927 .name = "dcbx_tlv",
928 .mode = S_IRUSR,
930 .size = 0,
931 .read = qla2x00_sysfs_read_dcbx_tlv,
934 static struct sysfs_entry {
935 char *name;
936 struct bin_attribute *attr;
937 int is4GBp_only;
938 } bin_file_entries[] = {
939 { "fw_dump", &sysfs_fw_dump_attr, },
940 { "nvram", &sysfs_nvram_attr, },
941 { "optrom", &sysfs_optrom_attr, },
942 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
943 { "vpd", &sysfs_vpd_attr, 1 },
944 { "sfp", &sysfs_sfp_attr, 1 },
945 { "reset", &sysfs_reset_attr, },
946 { "issue_logo", &sysfs_issue_logo_attr, },
947 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
948 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
949 { NULL },
952 void
953 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
955 struct Scsi_Host *host = vha->host;
956 struct sysfs_entry *iter;
957 int ret;
959 for (iter = bin_file_entries; iter->name; iter++) {
960 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
961 continue;
962 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
963 continue;
964 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
965 continue;
967 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
968 iter->attr);
969 if (ret)
970 ql_log(ql_log_warn, vha, 0x00f3,
971 "Unable to create sysfs %s binary attribute (%d).\n",
972 iter->name, ret);
973 else
974 ql_dbg(ql_dbg_init, vha, 0x00f4,
975 "Successfully created sysfs %s binary attribute.\n",
976 iter->name);
980 void
981 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
983 struct Scsi_Host *host = vha->host;
984 struct sysfs_entry *iter;
985 struct qla_hw_data *ha = vha->hw;
987 for (iter = bin_file_entries; iter->name; iter++) {
988 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
989 continue;
990 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
991 continue;
992 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
993 continue;
994 if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
995 continue;
997 sysfs_remove_bin_file(&host->shost_gendev.kobj,
998 iter->attr);
1001 if (stop_beacon && ha->beacon_blink_led == 1)
1002 ha->isp_ops->beacon_off(vha);
1005 /* Scsi_Host attributes. */
1007 static ssize_t
1008 qla2x00_driver_version_show(struct device *dev,
1009 struct device_attribute *attr, char *buf)
1011 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1014 static ssize_t
1015 qla2x00_fw_version_show(struct device *dev,
1016 struct device_attribute *attr, char *buf)
1018 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1019 struct qla_hw_data *ha = vha->hw;
1020 char fw_str[128];
1022 return scnprintf(buf, PAGE_SIZE, "%s\n",
1023 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1026 static ssize_t
1027 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1028 char *buf)
1030 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1031 struct qla_hw_data *ha = vha->hw;
1032 uint32_t sn;
1034 if (IS_QLAFX00(vha->hw)) {
1035 return scnprintf(buf, PAGE_SIZE, "%s\n",
1036 vha->hw->mr.serial_num);
1037 } else if (IS_FWI2_CAPABLE(ha)) {
1038 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1039 return strlen(strcat(buf, "\n"));
1042 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1043 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1044 sn % 100000);
1047 static ssize_t
1048 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1049 char *buf)
1051 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1052 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1055 static ssize_t
1056 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1057 char *buf)
1059 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1060 struct qla_hw_data *ha = vha->hw;
1062 if (IS_QLAFX00(vha->hw))
1063 return scnprintf(buf, PAGE_SIZE, "%s\n",
1064 vha->hw->mr.hw_version);
1066 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1067 ha->product_id[0], ha->product_id[1], ha->product_id[2],
1068 ha->product_id[3]);
1071 static ssize_t
1072 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1073 char *buf)
1075 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1077 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1080 static ssize_t
1081 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1082 char *buf)
1084 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1085 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1088 static ssize_t
1089 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1090 char *buf)
1092 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1093 char pci_info[30];
1095 return scnprintf(buf, PAGE_SIZE, "%s\n",
1096 vha->hw->isp_ops->pci_info_str(vha, pci_info));
1099 static ssize_t
1100 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1101 char *buf)
1103 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1104 struct qla_hw_data *ha = vha->hw;
1105 int len = 0;
1107 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1108 atomic_read(&vha->loop_state) == LOOP_DEAD ||
1109 vha->device_flags & DFLG_NO_CABLE)
1110 len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1111 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1112 qla2x00_chip_is_down(vha))
1113 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1114 else {
1115 len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1117 switch (ha->current_topology) {
1118 case ISP_CFG_NL:
1119 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1120 break;
1121 case ISP_CFG_FL:
1122 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1123 break;
1124 case ISP_CFG_N:
1125 len += scnprintf(buf + len, PAGE_SIZE-len,
1126 "N_Port to N_Port\n");
1127 break;
1128 case ISP_CFG_F:
1129 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1130 break;
1131 default:
1132 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1133 break;
1136 return len;
1139 static ssize_t
1140 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1141 char *buf)
1143 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1144 int len = 0;
1146 switch (vha->hw->zio_mode) {
1147 case QLA_ZIO_MODE_6:
1148 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1149 break;
1150 case QLA_ZIO_DISABLED:
1151 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1152 break;
1154 return len;
1157 static ssize_t
1158 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1159 const char *buf, size_t count)
1161 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1162 struct qla_hw_data *ha = vha->hw;
1163 int val = 0;
1164 uint16_t zio_mode;
1166 if (!IS_ZIO_SUPPORTED(ha))
1167 return -ENOTSUPP;
1169 if (sscanf(buf, "%d", &val) != 1)
1170 return -EINVAL;
1172 if (val)
1173 zio_mode = QLA_ZIO_MODE_6;
1174 else
1175 zio_mode = QLA_ZIO_DISABLED;
1177 /* Update per-hba values and queue a reset. */
1178 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1179 ha->zio_mode = zio_mode;
1180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1182 return strlen(buf);
1185 static ssize_t
1186 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1187 char *buf)
1189 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1191 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1194 static ssize_t
1195 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1196 const char *buf, size_t count)
1198 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1199 int val = 0;
1200 uint16_t zio_timer;
1202 if (sscanf(buf, "%d", &val) != 1)
1203 return -EINVAL;
1204 if (val > 25500 || val < 100)
1205 return -ERANGE;
1207 zio_timer = (uint16_t)(val / 100);
1208 vha->hw->zio_timer = zio_timer;
1210 return strlen(buf);
1213 static ssize_t
1214 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1215 char *buf)
1217 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1219 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1220 vha->hw->last_zio_threshold);
1223 static ssize_t
1224 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1225 const char *buf, size_t count)
1227 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1228 int val = 0;
1230 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1231 return -EINVAL;
1232 if (sscanf(buf, "%d", &val) != 1)
1233 return -EINVAL;
1234 if (val < 0 || val > 256)
1235 return -ERANGE;
1237 atomic_set(&vha->hw->zio_threshold, val);
1238 return strlen(buf);
1241 static ssize_t
1242 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1243 char *buf)
1245 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1246 int len = 0;
1248 if (vha->hw->beacon_blink_led)
1249 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1250 else
1251 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1252 return len;
1255 static ssize_t
1256 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1257 const char *buf, size_t count)
1259 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1260 struct qla_hw_data *ha = vha->hw;
1261 int val = 0;
1262 int rval;
1264 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1265 return -EPERM;
1267 if (sscanf(buf, "%d", &val) != 1)
1268 return -EINVAL;
1270 mutex_lock(&vha->hw->optrom_mutex);
1271 if (qla2x00_chip_is_down(vha)) {
1272 mutex_unlock(&vha->hw->optrom_mutex);
1273 ql_log(ql_log_warn, vha, 0x707a,
1274 "Abort ISP active -- ignoring beacon request.\n");
1275 return -EBUSY;
1278 if (val)
1279 rval = ha->isp_ops->beacon_on(vha);
1280 else
1281 rval = ha->isp_ops->beacon_off(vha);
1283 if (rval != QLA_SUCCESS)
1284 count = 0;
1286 mutex_unlock(&vha->hw->optrom_mutex);
1288 return count;
1291 static ssize_t
1292 qla2x00_optrom_bios_version_show(struct device *dev,
1293 struct device_attribute *attr, char *buf)
1295 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1296 struct qla_hw_data *ha = vha->hw;
1297 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1298 ha->bios_revision[0]);
1301 static ssize_t
1302 qla2x00_optrom_efi_version_show(struct device *dev,
1303 struct device_attribute *attr, char *buf)
1305 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1306 struct qla_hw_data *ha = vha->hw;
1307 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1308 ha->efi_revision[0]);
1311 static ssize_t
1312 qla2x00_optrom_fcode_version_show(struct device *dev,
1313 struct device_attribute *attr, char *buf)
1315 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1316 struct qla_hw_data *ha = vha->hw;
1317 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1318 ha->fcode_revision[0]);
1321 static ssize_t
1322 qla2x00_optrom_fw_version_show(struct device *dev,
1323 struct device_attribute *attr, char *buf)
1325 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1326 struct qla_hw_data *ha = vha->hw;
1327 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1328 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1329 ha->fw_revision[3]);
1332 static ssize_t
1333 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1334 struct device_attribute *attr, char *buf)
1336 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1337 struct qla_hw_data *ha = vha->hw;
1339 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1340 return scnprintf(buf, PAGE_SIZE, "\n");
1342 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1343 ha->gold_fw_version[0], ha->gold_fw_version[1],
1344 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1347 static ssize_t
1348 qla2x00_total_isp_aborts_show(struct device *dev,
1349 struct device_attribute *attr, char *buf)
1351 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1352 return scnprintf(buf, PAGE_SIZE, "%d\n",
1353 vha->qla_stats.total_isp_aborts);
1356 static ssize_t
1357 qla24xx_84xx_fw_version_show(struct device *dev,
1358 struct device_attribute *attr, char *buf)
1360 int rval = QLA_SUCCESS;
1361 uint16_t status[2] = {0, 0};
1362 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1363 struct qla_hw_data *ha = vha->hw;
1365 if (!IS_QLA84XX(ha))
1366 return scnprintf(buf, PAGE_SIZE, "\n");
1368 if (ha->cs84xx->op_fw_version == 0)
1369 rval = qla84xx_verify_chip(vha, status);
1371 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1372 return scnprintf(buf, PAGE_SIZE, "%u\n",
1373 (uint32_t)ha->cs84xx->op_fw_version);
1375 return scnprintf(buf, PAGE_SIZE, "\n");
1378 static ssize_t
1379 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1380 char *buf)
1382 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1383 struct qla_hw_data *ha = vha->hw;
1385 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1386 !IS_QLA27XX(ha))
1387 return scnprintf(buf, PAGE_SIZE, "\n");
1389 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1390 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1391 ha->mpi_capabilities);
1394 static ssize_t
1395 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1396 char *buf)
1398 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1399 struct qla_hw_data *ha = vha->hw;
1401 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1402 return scnprintf(buf, PAGE_SIZE, "\n");
1404 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1405 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1408 static ssize_t
1409 qla2x00_flash_block_size_show(struct device *dev,
1410 struct device_attribute *attr, char *buf)
1412 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1413 struct qla_hw_data *ha = vha->hw;
1415 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1418 static ssize_t
1419 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1420 char *buf)
1422 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1424 if (!IS_CNA_CAPABLE(vha->hw))
1425 return scnprintf(buf, PAGE_SIZE, "\n");
1427 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1430 static ssize_t
1431 qla2x00_vn_port_mac_address_show(struct device *dev,
1432 struct device_attribute *attr, char *buf)
1434 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1436 if (!IS_CNA_CAPABLE(vha->hw))
1437 return scnprintf(buf, PAGE_SIZE, "\n");
1439 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1442 static ssize_t
1443 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1444 char *buf)
1446 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1448 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1451 static ssize_t
1452 qla2x00_thermal_temp_show(struct device *dev,
1453 struct device_attribute *attr, char *buf)
1455 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1456 uint16_t temp = 0;
1457 int rc;
1459 mutex_lock(&vha->hw->optrom_mutex);
1460 if (qla2x00_chip_is_down(vha)) {
1461 mutex_unlock(&vha->hw->optrom_mutex);
1462 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1463 goto done;
1466 if (vha->hw->flags.eeh_busy) {
1467 mutex_unlock(&vha->hw->optrom_mutex);
1468 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1469 goto done;
1472 rc = qla2x00_get_thermal_temp(vha, &temp);
1473 mutex_unlock(&vha->hw->optrom_mutex);
1474 if (rc == QLA_SUCCESS)
1475 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1477 done:
1478 return scnprintf(buf, PAGE_SIZE, "\n");
1481 static ssize_t
1482 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1483 char *buf)
1485 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1486 int rval = QLA_FUNCTION_FAILED;
1487 uint16_t state[6];
1488 uint32_t pstate;
1490 if (IS_QLAFX00(vha->hw)) {
1491 pstate = qlafx00_fw_state_show(dev, attr, buf);
1492 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1495 mutex_lock(&vha->hw->optrom_mutex);
1496 if (qla2x00_chip_is_down(vha)) {
1497 mutex_unlock(&vha->hw->optrom_mutex);
1498 ql_log(ql_log_warn, vha, 0x707c,
1499 "ISP reset active.\n");
1500 goto out;
1501 } else if (vha->hw->flags.eeh_busy) {
1502 mutex_unlock(&vha->hw->optrom_mutex);
1503 goto out;
1506 rval = qla2x00_get_firmware_state(vha, state);
1507 mutex_unlock(&vha->hw->optrom_mutex);
1508 out:
1509 if (rval != QLA_SUCCESS) {
1510 memset(state, -1, sizeof(state));
1511 rval = qla2x00_get_firmware_state(vha, state);
1514 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1515 state[0], state[1], state[2], state[3], state[4], state[5]);
1518 static ssize_t
1519 qla2x00_diag_requests_show(struct device *dev,
1520 struct device_attribute *attr, char *buf)
1522 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1524 if (!IS_BIDI_CAPABLE(vha->hw))
1525 return scnprintf(buf, PAGE_SIZE, "\n");
1527 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1530 static ssize_t
1531 qla2x00_diag_megabytes_show(struct device *dev,
1532 struct device_attribute *attr, char *buf)
1534 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1536 if (!IS_BIDI_CAPABLE(vha->hw))
1537 return scnprintf(buf, PAGE_SIZE, "\n");
1539 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1540 vha->bidi_stats.transfer_bytes >> 20);
1543 static ssize_t
1544 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1545 char *buf)
1547 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1548 struct qla_hw_data *ha = vha->hw;
1549 uint32_t size;
1551 if (!ha->fw_dumped)
1552 size = 0;
1553 else if (IS_P3P_TYPE(ha))
1554 size = ha->md_template_size + ha->md_dump_size;
1555 else
1556 size = ha->fw_dump_len;
1558 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1561 static ssize_t
1562 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1563 struct device_attribute *attr, char *buf)
1565 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1567 if (!IS_P3P_TYPE(vha->hw))
1568 return scnprintf(buf, PAGE_SIZE, "\n");
1569 else
1570 return scnprintf(buf, PAGE_SIZE, "%s\n",
1571 vha->hw->allow_cna_fw_dump ? "true" : "false");
1574 static ssize_t
1575 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1576 struct device_attribute *attr, const char *buf, size_t count)
1578 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1579 int val = 0;
1581 if (!IS_P3P_TYPE(vha->hw))
1582 return -EINVAL;
1584 if (sscanf(buf, "%d", &val) != 1)
1585 return -EINVAL;
1587 vha->hw->allow_cna_fw_dump = val != 0;
1589 return strlen(buf);
1592 static ssize_t
1593 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1594 char *buf)
1596 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1597 struct qla_hw_data *ha = vha->hw;
1599 if (!IS_QLA27XX(ha))
1600 return scnprintf(buf, PAGE_SIZE, "\n");
1602 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1603 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1606 static ssize_t
1607 qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
1608 char *buf)
1610 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1611 struct qla_hw_data *ha = vha->hw;
1613 if (!IS_QLA27XX(ha))
1614 return scnprintf(buf, PAGE_SIZE, "\n");
1616 return scnprintf(buf, PAGE_SIZE, "%s\n",
1617 ha->min_link_speed == 5 ? "32Gps" :
1618 ha->min_link_speed == 4 ? "16Gps" :
1619 ha->min_link_speed == 3 ? "8Gps" :
1620 ha->min_link_speed == 2 ? "4Gps" :
1621 ha->min_link_speed != 0 ? "unknown" : "");
1624 static ssize_t
1625 qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
1626 char *buf)
1628 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1629 struct qla_hw_data *ha = vha->hw;
1631 if (!IS_QLA27XX(ha))
1632 return scnprintf(buf, PAGE_SIZE, "\n");
1634 return scnprintf(buf, PAGE_SIZE, "%s\n",
1635 ha->max_speed_sup ? "32Gps" : "16Gps");
1638 static ssize_t
1639 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1640 const char *buf, size_t count)
1642 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1643 ulong type, speed;
1644 int oldspeed, rval;
1645 int mode = QLA_SET_DATA_RATE_LR;
1646 struct qla_hw_data *ha = vha->hw;
1648 if (!IS_QLA27XX(vha->hw)) {
1649 ql_log(ql_log_warn, vha, 0x70d8,
1650 "Speed setting not supported \n");
1651 return -EINVAL;
1654 rval = kstrtol(buf, 10, &type);
1655 if (rval)
1656 return rval;
1657 speed = type;
1658 if (type == 40 || type == 80 || type == 160 ||
1659 type == 320) {
1660 ql_dbg(ql_dbg_user, vha, 0x70d9,
1661 "Setting will be affected after a loss of sync\n");
1662 type = type/10;
1663 mode = QLA_SET_DATA_RATE_NOLR;
1666 oldspeed = ha->set_data_rate;
1668 switch (type) {
1669 case 0:
1670 ha->set_data_rate = PORT_SPEED_AUTO;
1671 break;
1672 case 4:
1673 ha->set_data_rate = PORT_SPEED_4GB;
1674 break;
1675 case 8:
1676 ha->set_data_rate = PORT_SPEED_8GB;
1677 break;
1678 case 16:
1679 ha->set_data_rate = PORT_SPEED_16GB;
1680 break;
1681 case 32:
1682 ha->set_data_rate = PORT_SPEED_32GB;
1683 break;
1684 default:
1685 ql_log(ql_log_warn, vha, 0x1199,
1686 "Unrecognized speed setting:%lx. Setting Autoneg\n",
1687 speed);
1688 ha->set_data_rate = PORT_SPEED_AUTO;
1691 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1692 return -EINVAL;
1694 ql_log(ql_log_info, vha, 0x70da,
1695 "Setting speed to %lx Gbps \n", type);
1697 rval = qla2x00_set_data_rate(vha, mode);
1698 if (rval != QLA_SUCCESS)
1699 return -EIO;
1701 return strlen(buf);
1704 static ssize_t
1705 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1706 char *buf)
1708 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1709 struct qla_hw_data *ha = vha->hw;
1710 ssize_t rval;
1711 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1713 rval = qla2x00_get_data_rate(vha);
1714 if (rval != QLA_SUCCESS) {
1715 ql_log(ql_log_warn, vha, 0x70db,
1716 "Unable to get port speed rval:%zd\n", rval);
1717 return -EINVAL;
1720 ql_log(ql_log_info, vha, 0x70d6,
1721 "port speed:%d\n", ha->link_data_rate);
1723 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1726 /* ----- */
1728 static ssize_t
1729 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1731 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1732 int len = 0;
1734 len += scnprintf(buf + len, PAGE_SIZE-len,
1735 "Supported options: enabled | disabled | dual | exclusive\n");
1737 /* --- */
1738 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1740 switch (vha->qlini_mode) {
1741 case QLA2XXX_INI_MODE_EXCLUSIVE:
1742 len += scnprintf(buf + len, PAGE_SIZE-len,
1743 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1744 break;
1745 case QLA2XXX_INI_MODE_DISABLED:
1746 len += scnprintf(buf + len, PAGE_SIZE-len,
1747 QLA2XXX_INI_MODE_STR_DISABLED);
1748 break;
1749 case QLA2XXX_INI_MODE_ENABLED:
1750 len += scnprintf(buf + len, PAGE_SIZE-len,
1751 QLA2XXX_INI_MODE_STR_ENABLED);
1752 break;
1753 case QLA2XXX_INI_MODE_DUAL:
1754 len += scnprintf(buf + len, PAGE_SIZE-len,
1755 QLA2XXX_INI_MODE_STR_DUAL);
1756 break;
1758 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1760 return len;
1763 static char *mode_to_str[] = {
1764 "exclusive",
1765 "disabled",
1766 "enabled",
1767 "dual",
1770 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1771 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1773 int rc = 0;
1774 enum {
1775 NO_ACTION,
1776 MODE_CHANGE_ACCEPT,
1777 MODE_CHANGE_NO_ACTION,
1778 TARGET_STILL_ACTIVE,
1780 int action = NO_ACTION;
1781 int set_mode = 0;
1782 u8 eo_toggle = 0; /* exchange offload flipped */
1784 switch (vha->qlini_mode) {
1785 case QLA2XXX_INI_MODE_DISABLED:
1786 switch (op) {
1787 case QLA2XXX_INI_MODE_DISABLED:
1788 if (qla_tgt_mode_enabled(vha)) {
1789 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1790 vha->hw->flags.exchoffld_enabled)
1791 eo_toggle = 1;
1792 if (((vha->ql2xexchoffld !=
1793 vha->u_ql2xexchoffld) &&
1794 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1795 eo_toggle) {
1797 * The number of exchange to be offload
1798 * was tweaked or offload option was
1799 * flipped
1801 action = MODE_CHANGE_ACCEPT;
1802 } else {
1803 action = MODE_CHANGE_NO_ACTION;
1805 } else {
1806 action = MODE_CHANGE_NO_ACTION;
1808 break;
1809 case QLA2XXX_INI_MODE_EXCLUSIVE:
1810 if (qla_tgt_mode_enabled(vha)) {
1811 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1812 vha->hw->flags.exchoffld_enabled)
1813 eo_toggle = 1;
1814 if (((vha->ql2xexchoffld !=
1815 vha->u_ql2xexchoffld) &&
1816 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1817 eo_toggle) {
1819 * The number of exchange to be offload
1820 * was tweaked or offload option was
1821 * flipped
1823 action = MODE_CHANGE_ACCEPT;
1824 } else {
1825 action = MODE_CHANGE_NO_ACTION;
1827 } else {
1828 action = MODE_CHANGE_ACCEPT;
1830 break;
1831 case QLA2XXX_INI_MODE_DUAL:
1832 action = MODE_CHANGE_ACCEPT;
1833 /* active_mode is target only, reset it to dual */
1834 if (qla_tgt_mode_enabled(vha)) {
1835 set_mode = 1;
1836 action = MODE_CHANGE_ACCEPT;
1837 } else {
1838 action = MODE_CHANGE_NO_ACTION;
1840 break;
1842 case QLA2XXX_INI_MODE_ENABLED:
1843 if (qla_tgt_mode_enabled(vha))
1844 action = TARGET_STILL_ACTIVE;
1845 else {
1846 action = MODE_CHANGE_ACCEPT;
1847 set_mode = 1;
1849 break;
1851 break;
1853 case QLA2XXX_INI_MODE_EXCLUSIVE:
1854 switch (op) {
1855 case QLA2XXX_INI_MODE_EXCLUSIVE:
1856 if (qla_tgt_mode_enabled(vha)) {
1857 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1858 vha->hw->flags.exchoffld_enabled)
1859 eo_toggle = 1;
1860 if (((vha->ql2xexchoffld !=
1861 vha->u_ql2xexchoffld) &&
1862 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1863 eo_toggle)
1865 * The number of exchange to be offload
1866 * was tweaked or offload option was
1867 * flipped
1869 action = MODE_CHANGE_ACCEPT;
1870 else
1871 action = NO_ACTION;
1872 } else
1873 action = NO_ACTION;
1875 break;
1877 case QLA2XXX_INI_MODE_DISABLED:
1878 if (qla_tgt_mode_enabled(vha)) {
1879 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1880 vha->hw->flags.exchoffld_enabled)
1881 eo_toggle = 1;
1882 if (((vha->ql2xexchoffld !=
1883 vha->u_ql2xexchoffld) &&
1884 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1885 eo_toggle)
1886 action = MODE_CHANGE_ACCEPT;
1887 else
1888 action = MODE_CHANGE_NO_ACTION;
1889 } else
1890 action = MODE_CHANGE_NO_ACTION;
1891 break;
1893 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
1894 if (qla_tgt_mode_enabled(vha)) {
1895 action = MODE_CHANGE_ACCEPT;
1896 set_mode = 1;
1897 } else
1898 action = MODE_CHANGE_ACCEPT;
1899 break;
1901 case QLA2XXX_INI_MODE_ENABLED:
1902 if (qla_tgt_mode_enabled(vha))
1903 action = TARGET_STILL_ACTIVE;
1904 else {
1905 if (vha->hw->flags.fw_started)
1906 action = MODE_CHANGE_NO_ACTION;
1907 else
1908 action = MODE_CHANGE_ACCEPT;
1910 break;
1912 break;
1914 case QLA2XXX_INI_MODE_ENABLED:
1915 switch (op) {
1916 case QLA2XXX_INI_MODE_ENABLED:
1917 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
1918 vha->hw->flags.exchoffld_enabled)
1919 eo_toggle = 1;
1920 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
1921 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
1922 eo_toggle)
1923 action = MODE_CHANGE_ACCEPT;
1924 else
1925 action = NO_ACTION;
1926 break;
1927 case QLA2XXX_INI_MODE_DUAL:
1928 case QLA2XXX_INI_MODE_DISABLED:
1929 action = MODE_CHANGE_ACCEPT;
1930 break;
1931 default:
1932 action = MODE_CHANGE_NO_ACTION;
1933 break;
1935 break;
1937 case QLA2XXX_INI_MODE_DUAL:
1938 switch (op) {
1939 case QLA2XXX_INI_MODE_DUAL:
1940 if (qla_tgt_mode_enabled(vha) ||
1941 qla_dual_mode_enabled(vha)) {
1942 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
1943 vha->u_ql2xiniexchg) !=
1944 vha->hw->flags.exchoffld_enabled)
1945 eo_toggle = 1;
1947 if ((((vha->ql2xexchoffld +
1948 vha->ql2xiniexchg) !=
1949 (vha->u_ql2xiniexchg +
1950 vha->u_ql2xexchoffld)) &&
1951 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
1952 vha->u_ql2xexchoffld)) || eo_toggle)
1953 action = MODE_CHANGE_ACCEPT;
1954 else
1955 action = NO_ACTION;
1956 } else {
1957 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
1958 vha->u_ql2xiniexchg) !=
1959 vha->hw->flags.exchoffld_enabled)
1960 eo_toggle = 1;
1962 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
1963 != (vha->u_ql2xiniexchg +
1964 vha->u_ql2xexchoffld)) &&
1965 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
1966 vha->u_ql2xexchoffld)) || eo_toggle)
1967 action = MODE_CHANGE_NO_ACTION;
1968 else
1969 action = NO_ACTION;
1971 break;
1973 case QLA2XXX_INI_MODE_DISABLED:
1974 if (qla_tgt_mode_enabled(vha) ||
1975 qla_dual_mode_enabled(vha)) {
1976 /* turning off initiator mode */
1977 set_mode = 1;
1978 action = MODE_CHANGE_ACCEPT;
1979 } else {
1980 action = MODE_CHANGE_NO_ACTION;
1982 break;
1984 case QLA2XXX_INI_MODE_EXCLUSIVE:
1985 if (qla_tgt_mode_enabled(vha) ||
1986 qla_dual_mode_enabled(vha)) {
1987 set_mode = 1;
1988 action = MODE_CHANGE_ACCEPT;
1989 } else {
1990 action = MODE_CHANGE_ACCEPT;
1992 break;
1994 case QLA2XXX_INI_MODE_ENABLED:
1995 if (qla_tgt_mode_enabled(vha) ||
1996 qla_dual_mode_enabled(vha)) {
1997 action = TARGET_STILL_ACTIVE;
1998 } else {
1999 action = MODE_CHANGE_ACCEPT;
2002 break;
2005 switch (action) {
2006 case MODE_CHANGE_ACCEPT:
2007 ql_log(ql_log_warn, vha, 0xffff,
2008 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2009 mode_to_str[vha->qlini_mode], mode_to_str[op],
2010 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2011 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2013 vha->qlini_mode = op;
2014 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2015 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2016 if (set_mode)
2017 qlt_set_mode(vha);
2018 vha->flags.online = 1;
2019 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2020 break;
2022 case MODE_CHANGE_NO_ACTION:
2023 ql_log(ql_log_warn, vha, 0xffff,
2024 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2025 mode_to_str[vha->qlini_mode], mode_to_str[op],
2026 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2027 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2028 vha->qlini_mode = op;
2029 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2030 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2031 break;
2033 case TARGET_STILL_ACTIVE:
2034 ql_log(ql_log_warn, vha, 0xffff,
2035 "Target Mode is active. Unable to change Mode.\n");
2036 break;
2038 case NO_ACTION:
2039 default:
2040 ql_log(ql_log_warn, vha, 0xffff,
2041 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2042 vha->qlini_mode, op,
2043 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2044 break;
2047 return rc;
2050 static ssize_t
2051 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2052 const char *buf, size_t count)
2054 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2055 int ini;
2057 if (!buf)
2058 return -EINVAL;
2060 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2061 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2062 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2063 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2064 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2065 ini = QLA2XXX_INI_MODE_DISABLED;
2066 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2067 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2068 ini = QLA2XXX_INI_MODE_ENABLED;
2069 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2070 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2071 ini = QLA2XXX_INI_MODE_DUAL;
2072 else
2073 return -EINVAL;
2075 qla_set_ini_mode(vha, ini);
2076 return strlen(buf);
2079 static ssize_t
2080 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2081 char *buf)
2083 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2084 int len = 0;
2086 len += scnprintf(buf + len, PAGE_SIZE-len,
2087 "target exchange: new %d : current: %d\n\n",
2088 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2090 len += scnprintf(buf + len, PAGE_SIZE-len,
2091 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2092 vha->host_no);
2094 return len;
2097 static ssize_t
2098 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2099 const char *buf, size_t count)
2101 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2102 int val = 0;
2104 if (sscanf(buf, "%d", &val) != 1)
2105 return -EINVAL;
2107 if (val > FW_MAX_EXCHANGES_CNT)
2108 val = FW_MAX_EXCHANGES_CNT;
2109 else if (val < 0)
2110 val = 0;
2112 vha->u_ql2xexchoffld = val;
2113 return strlen(buf);
2116 static ssize_t
2117 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2118 char *buf)
2120 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2121 int len = 0;
2123 len += scnprintf(buf + len, PAGE_SIZE-len,
2124 "target exchange: new %d : current: %d\n\n",
2125 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2127 len += scnprintf(buf + len, PAGE_SIZE-len,
2128 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2129 vha->host_no);
2131 return len;
2134 static ssize_t
2135 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2136 const char *buf, size_t count)
2138 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2139 int val = 0;
2141 if (sscanf(buf, "%d", &val) != 1)
2142 return -EINVAL;
2144 if (val > FW_MAX_EXCHANGES_CNT)
2145 val = FW_MAX_EXCHANGES_CNT;
2146 else if (val < 0)
2147 val = 0;
2149 vha->u_ql2xiniexchg = val;
2150 return strlen(buf);
2153 static ssize_t
2154 qla2x00_dif_bundle_statistics_show(struct device *dev,
2155 struct device_attribute *attr, char *buf)
2157 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2158 struct qla_hw_data *ha = vha->hw;
2160 return scnprintf(buf, PAGE_SIZE,
2161 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2162 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2163 ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2164 ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2167 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2168 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2169 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2170 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2171 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2172 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2173 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2174 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2175 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2176 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2177 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2178 qla2x00_zio_timer_store);
2179 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2180 qla2x00_beacon_store);
2181 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2182 qla2x00_optrom_bios_version_show, NULL);
2183 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2184 qla2x00_optrom_efi_version_show, NULL);
2185 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2186 qla2x00_optrom_fcode_version_show, NULL);
2187 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2188 NULL);
2189 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2190 qla2x00_optrom_gold_fw_version_show, NULL);
2191 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2192 NULL);
2193 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2194 NULL);
2195 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2196 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2197 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2198 NULL);
2199 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2200 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2201 qla2x00_vn_port_mac_address_show, NULL);
2202 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2203 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2204 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2205 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2206 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2207 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2208 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2209 qla2x00_allow_cna_fw_dump_show,
2210 qla2x00_allow_cna_fw_dump_store);
2211 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2212 static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
2213 static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
2214 static DEVICE_ATTR(zio_threshold, 0644,
2215 qla_zio_threshold_show,
2216 qla_zio_threshold_store);
2217 static DEVICE_ATTR_RW(qlini_mode);
2218 static DEVICE_ATTR_RW(ql2xexchoffld);
2219 static DEVICE_ATTR_RW(ql2xiniexchg);
2220 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2221 qla2x00_dif_bundle_statistics_show, NULL);
2222 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2223 qla2x00_port_speed_store);
2226 struct device_attribute *qla2x00_host_attrs[] = {
2227 &dev_attr_driver_version,
2228 &dev_attr_fw_version,
2229 &dev_attr_serial_num,
2230 &dev_attr_isp_name,
2231 &dev_attr_isp_id,
2232 &dev_attr_model_name,
2233 &dev_attr_model_desc,
2234 &dev_attr_pci_info,
2235 &dev_attr_link_state,
2236 &dev_attr_zio,
2237 &dev_attr_zio_timer,
2238 &dev_attr_beacon,
2239 &dev_attr_optrom_bios_version,
2240 &dev_attr_optrom_efi_version,
2241 &dev_attr_optrom_fcode_version,
2242 &dev_attr_optrom_fw_version,
2243 &dev_attr_84xx_fw_version,
2244 &dev_attr_total_isp_aborts,
2245 &dev_attr_mpi_version,
2246 &dev_attr_phy_version,
2247 &dev_attr_flash_block_size,
2248 &dev_attr_vlan_id,
2249 &dev_attr_vn_port_mac_address,
2250 &dev_attr_fabric_param,
2251 &dev_attr_fw_state,
2252 &dev_attr_optrom_gold_fw_version,
2253 &dev_attr_thermal_temp,
2254 &dev_attr_diag_requests,
2255 &dev_attr_diag_megabytes,
2256 &dev_attr_fw_dump_size,
2257 &dev_attr_allow_cna_fw_dump,
2258 &dev_attr_pep_version,
2259 &dev_attr_min_link_speed,
2260 &dev_attr_max_speed_sup,
2261 &dev_attr_zio_threshold,
2262 &dev_attr_dif_bundle_statistics,
2263 &dev_attr_port_speed,
2264 NULL, /* reserve for qlini_mode */
2265 NULL, /* reserve for ql2xiniexchg */
2266 NULL, /* reserve for ql2xexchoffld */
2267 NULL,
2270 void qla_insert_tgt_attrs(void)
2272 struct device_attribute **attr;
2274 /* advance to empty slot */
2275 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2276 continue;
2278 *attr = &dev_attr_qlini_mode;
2279 attr++;
2280 *attr = &dev_attr_ql2xiniexchg;
2281 attr++;
2282 *attr = &dev_attr_ql2xexchoffld;
2285 /* Host attributes. */
2287 static void
2288 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2290 scsi_qla_host_t *vha = shost_priv(shost);
2292 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2293 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2296 static void
2297 qla2x00_get_host_speed(struct Scsi_Host *shost)
2299 struct qla_hw_data *ha = ((struct scsi_qla_host *)
2300 (shost_priv(shost)))->hw;
2301 u32 speed = FC_PORTSPEED_UNKNOWN;
2303 if (IS_QLAFX00(ha)) {
2304 qlafx00_get_host_speed(shost);
2305 return;
2308 switch (ha->link_data_rate) {
2309 case PORT_SPEED_1GB:
2310 speed = FC_PORTSPEED_1GBIT;
2311 break;
2312 case PORT_SPEED_2GB:
2313 speed = FC_PORTSPEED_2GBIT;
2314 break;
2315 case PORT_SPEED_4GB:
2316 speed = FC_PORTSPEED_4GBIT;
2317 break;
2318 case PORT_SPEED_8GB:
2319 speed = FC_PORTSPEED_8GBIT;
2320 break;
2321 case PORT_SPEED_10GB:
2322 speed = FC_PORTSPEED_10GBIT;
2323 break;
2324 case PORT_SPEED_16GB:
2325 speed = FC_PORTSPEED_16GBIT;
2326 break;
2327 case PORT_SPEED_32GB:
2328 speed = FC_PORTSPEED_32GBIT;
2329 break;
2331 fc_host_speed(shost) = speed;
2334 static void
2335 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2337 scsi_qla_host_t *vha = shost_priv(shost);
2338 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
2340 if (vha->vp_idx) {
2341 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2342 return;
2344 switch (vha->hw->current_topology) {
2345 case ISP_CFG_NL:
2346 port_type = FC_PORTTYPE_LPORT;
2347 break;
2348 case ISP_CFG_FL:
2349 port_type = FC_PORTTYPE_NLPORT;
2350 break;
2351 case ISP_CFG_N:
2352 port_type = FC_PORTTYPE_PTP;
2353 break;
2354 case ISP_CFG_F:
2355 port_type = FC_PORTTYPE_NPORT;
2356 break;
2358 fc_host_port_type(shost) = port_type;
2361 static void
2362 qla2x00_get_starget_node_name(struct scsi_target *starget)
2364 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2365 scsi_qla_host_t *vha = shost_priv(host);
2366 fc_port_t *fcport;
2367 u64 node_name = 0;
2369 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2370 if (fcport->rport &&
2371 starget->id == fcport->rport->scsi_target_id) {
2372 node_name = wwn_to_u64(fcport->node_name);
2373 break;
2377 fc_starget_node_name(starget) = node_name;
2380 static void
2381 qla2x00_get_starget_port_name(struct scsi_target *starget)
2383 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2384 scsi_qla_host_t *vha = shost_priv(host);
2385 fc_port_t *fcport;
2386 u64 port_name = 0;
2388 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2389 if (fcport->rport &&
2390 starget->id == fcport->rport->scsi_target_id) {
2391 port_name = wwn_to_u64(fcport->port_name);
2392 break;
2396 fc_starget_port_name(starget) = port_name;
2399 static void
2400 qla2x00_get_starget_port_id(struct scsi_target *starget)
2402 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2403 scsi_qla_host_t *vha = shost_priv(host);
2404 fc_port_t *fcport;
2405 uint32_t port_id = ~0U;
2407 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2408 if (fcport->rport &&
2409 starget->id == fcport->rport->scsi_target_id) {
2410 port_id = fcport->d_id.b.domain << 16 |
2411 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2412 break;
2416 fc_starget_port_id(starget) = port_id;
2419 static void
2420 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2422 if (timeout)
2423 rport->dev_loss_tmo = timeout;
2424 else
2425 rport->dev_loss_tmo = 1;
2428 static void
2429 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2431 struct Scsi_Host *host = rport_to_shost(rport);
2432 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2433 unsigned long flags;
2435 if (!fcport)
2436 return;
2438 /* Now that the rport has been deleted, set the fcport state to
2439 FCS_DEVICE_DEAD */
2440 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2443 * Transport has effectively 'deleted' the rport, clear
2444 * all local references.
2446 spin_lock_irqsave(host->host_lock, flags);
2447 fcport->rport = fcport->drport = NULL;
2448 *((fc_port_t **)rport->dd_data) = NULL;
2449 spin_unlock_irqrestore(host->host_lock, flags);
2451 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2452 return;
2454 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2455 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2456 return;
2460 static void
2461 qla2x00_terminate_rport_io(struct fc_rport *rport)
2463 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2465 if (!fcport)
2466 return;
2468 if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2469 return;
2471 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2472 return;
2474 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2475 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2476 return;
2479 * At this point all fcport's software-states are cleared. Perform any
2480 * final cleanup of firmware resources (PCBs and XCBs).
2482 if (fcport->loop_id != FC_NO_LOOP_ID) {
2483 if (IS_FWI2_CAPABLE(fcport->vha->hw))
2484 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2485 fcport->loop_id, fcport->d_id.b.domain,
2486 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2487 else
2488 qla2x00_port_logout(fcport->vha, fcport);
2492 static int
2493 qla2x00_issue_lip(struct Scsi_Host *shost)
2495 scsi_qla_host_t *vha = shost_priv(shost);
2497 if (IS_QLAFX00(vha->hw))
2498 return 0;
2500 qla2x00_loop_reset(vha);
2501 return 0;
2504 static struct fc_host_statistics *
2505 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2507 scsi_qla_host_t *vha = shost_priv(shost);
2508 struct qla_hw_data *ha = vha->hw;
2509 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2510 int rval;
2511 struct link_statistics *stats;
2512 dma_addr_t stats_dma;
2513 struct fc_host_statistics *p = &vha->fc_host_stat;
2515 memset(p, -1, sizeof(*p));
2517 if (IS_QLAFX00(vha->hw))
2518 goto done;
2520 if (test_bit(UNLOADING, &vha->dpc_flags))
2521 goto done;
2523 if (unlikely(pci_channel_offline(ha->pdev)))
2524 goto done;
2526 if (qla2x00_chip_is_down(vha))
2527 goto done;
2529 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2530 GFP_KERNEL);
2531 if (!stats) {
2532 ql_log(ql_log_warn, vha, 0x707d,
2533 "Failed to allocate memory for stats.\n");
2534 goto done;
2537 rval = QLA_FUNCTION_FAILED;
2538 if (IS_FWI2_CAPABLE(ha)) {
2539 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2540 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2541 !ha->dpc_active) {
2542 /* Must be in a 'READY' state for statistics retrieval. */
2543 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2544 stats, stats_dma);
2547 if (rval != QLA_SUCCESS)
2548 goto done_free;
2550 p->link_failure_count = stats->link_fail_cnt;
2551 p->loss_of_sync_count = stats->loss_sync_cnt;
2552 p->loss_of_signal_count = stats->loss_sig_cnt;
2553 p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
2554 p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
2555 p->invalid_crc_count = stats->inval_crc_cnt;
2556 if (IS_FWI2_CAPABLE(ha)) {
2557 p->lip_count = stats->lip_cnt;
2558 p->tx_frames = stats->tx_frames;
2559 p->rx_frames = stats->rx_frames;
2560 p->dumped_frames = stats->discarded_frames;
2561 p->nos_count = stats->nos_rcvd;
2562 p->error_frames =
2563 stats->dropped_frames + stats->discarded_frames;
2564 p->rx_words = vha->qla_stats.input_bytes;
2565 p->tx_words = vha->qla_stats.output_bytes;
2567 p->fcp_control_requests = vha->qla_stats.control_requests;
2568 p->fcp_input_requests = vha->qla_stats.input_requests;
2569 p->fcp_output_requests = vha->qla_stats.output_requests;
2570 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2571 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2572 p->seconds_since_last_reset =
2573 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2574 do_div(p->seconds_since_last_reset, HZ);
2576 done_free:
2577 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2578 stats, stats_dma);
2579 done:
2580 return p;
2583 static void
2584 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2586 scsi_qla_host_t *vha = shost_priv(shost);
2587 struct qla_hw_data *ha = vha->hw;
2588 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2589 struct link_statistics *stats;
2590 dma_addr_t stats_dma;
2592 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2593 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2595 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2597 if (IS_FWI2_CAPABLE(ha)) {
2598 stats = dma_alloc_coherent(&ha->pdev->dev,
2599 sizeof(*stats), &stats_dma, GFP_KERNEL);
2600 if (!stats) {
2601 ql_log(ql_log_warn, vha, 0x70d7,
2602 "Failed to allocate memory for stats.\n");
2603 return;
2606 /* reset firmware statistics */
2607 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2609 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2610 stats, stats_dma);
2614 static void
2615 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2617 scsi_qla_host_t *vha = shost_priv(shost);
2619 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2620 sizeof(fc_host_symbolic_name(shost)));
2623 static void
2624 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2626 scsi_qla_host_t *vha = shost_priv(shost);
2628 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2631 static void
2632 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2634 scsi_qla_host_t *vha = shost_priv(shost);
2635 uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
2636 0xFF, 0xFF, 0xFF, 0xFF};
2637 u64 fabric_name = wwn_to_u64(node_name);
2639 if (vha->device_flags & SWITCH_FOUND)
2640 fabric_name = wwn_to_u64(vha->fabric_node_name);
2642 fc_host_fabric_name(shost) = fabric_name;
2645 static void
2646 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2648 scsi_qla_host_t *vha = shost_priv(shost);
2649 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2651 if (!base_vha->flags.online) {
2652 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2653 return;
2656 switch (atomic_read(&base_vha->loop_state)) {
2657 case LOOP_UPDATE:
2658 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2659 break;
2660 case LOOP_DOWN:
2661 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2662 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2663 else
2664 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2665 break;
2666 case LOOP_DEAD:
2667 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2668 break;
2669 case LOOP_READY:
2670 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2671 break;
2672 default:
2673 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2674 break;
2678 static int
2679 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2681 int ret = 0;
2682 uint8_t qos = 0;
2683 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2684 scsi_qla_host_t *vha = NULL;
2685 struct qla_hw_data *ha = base_vha->hw;
2686 int cnt;
2687 struct req_que *req = ha->req_q_map[0];
2688 struct qla_qpair *qpair;
2690 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2691 if (ret) {
2692 ql_log(ql_log_warn, vha, 0x707e,
2693 "Vport sanity check failed, status %x\n", ret);
2694 return (ret);
2697 vha = qla24xx_create_vhost(fc_vport);
2698 if (vha == NULL) {
2699 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2700 return FC_VPORT_FAILED;
2702 if (disable) {
2703 atomic_set(&vha->vp_state, VP_OFFLINE);
2704 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2705 } else
2706 atomic_set(&vha->vp_state, VP_FAILED);
2708 /* ready to create vport */
2709 ql_log(ql_log_info, vha, 0x7080,
2710 "VP entry id %d assigned.\n", vha->vp_idx);
2712 /* initialized vport states */
2713 atomic_set(&vha->loop_state, LOOP_DOWN);
2714 vha->vp_err_state= VP_ERR_PORTDWN;
2715 vha->vp_prev_err_state= VP_ERR_UNKWN;
2716 /* Check if physical ha port is Up */
2717 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2718 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2719 /* Don't retry or attempt login of this virtual port */
2720 ql_dbg(ql_dbg_user, vha, 0x7081,
2721 "Vport loop state is not UP.\n");
2722 atomic_set(&vha->loop_state, LOOP_DEAD);
2723 if (!disable)
2724 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2727 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2728 if (ha->fw_attributes & BIT_4) {
2729 int prot = 0, guard;
2730 vha->flags.difdix_supported = 1;
2731 ql_dbg(ql_dbg_user, vha, 0x7082,
2732 "Registered for DIF/DIX type 1 and 3 protection.\n");
2733 if (ql2xenabledif == 1)
2734 prot = SHOST_DIX_TYPE0_PROTECTION;
2735 scsi_host_set_prot(vha->host,
2736 prot | SHOST_DIF_TYPE1_PROTECTION
2737 | SHOST_DIF_TYPE2_PROTECTION
2738 | SHOST_DIF_TYPE3_PROTECTION
2739 | SHOST_DIX_TYPE1_PROTECTION
2740 | SHOST_DIX_TYPE2_PROTECTION
2741 | SHOST_DIX_TYPE3_PROTECTION);
2743 guard = SHOST_DIX_GUARD_CRC;
2745 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2746 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2747 guard |= SHOST_DIX_GUARD_IP;
2749 scsi_host_set_guard(vha->host, guard);
2750 } else
2751 vha->flags.difdix_supported = 0;
2754 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2755 &ha->pdev->dev)) {
2756 ql_dbg(ql_dbg_user, vha, 0x7083,
2757 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2758 goto vport_create_failed_2;
2761 /* initialize attributes */
2762 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2763 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2764 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2765 fc_host_supported_classes(vha->host) =
2766 fc_host_supported_classes(base_vha->host);
2767 fc_host_supported_speeds(vha->host) =
2768 fc_host_supported_speeds(base_vha->host);
2770 qlt_vport_create(vha, ha);
2771 qla24xx_vport_disable(fc_vport, disable);
2773 if (!ql2xmqsupport || !ha->npiv_info)
2774 goto vport_queue;
2776 /* Create a request queue in QoS mode for the vport */
2777 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2778 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2779 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2780 8) == 0) {
2781 qos = ha->npiv_info[cnt].q_qos;
2782 break;
2786 if (qos) {
2787 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
2788 if (!qpair)
2789 ql_log(ql_log_warn, vha, 0x7084,
2790 "Can't create qpair for VP[%d]\n",
2791 vha->vp_idx);
2792 else {
2793 ql_dbg(ql_dbg_multiq, vha, 0xc001,
2794 "Queue pair: %d Qos: %d) created for VP[%d]\n",
2795 qpair->id, qos, vha->vp_idx);
2796 ql_dbg(ql_dbg_user, vha, 0x7085,
2797 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
2798 qpair->id, qos, vha->vp_idx);
2799 req = qpair->req;
2800 vha->qpair = qpair;
2804 vport_queue:
2805 vha->req = req;
2806 return 0;
2808 vport_create_failed_2:
2809 qla24xx_disable_vp(vha);
2810 qla24xx_deallocate_vp_id(vha);
2811 scsi_host_put(vha->host);
2812 return FC_VPORT_FAILED;
2815 static int
2816 qla24xx_vport_delete(struct fc_vport *fc_vport)
2818 scsi_qla_host_t *vha = fc_vport->dd_data;
2819 struct qla_hw_data *ha = vha->hw;
2820 uint16_t id = vha->vp_idx;
2822 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2823 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2824 msleep(1000);
2826 qla_nvme_delete(vha);
2828 qla24xx_disable_vp(vha);
2829 qla2x00_wait_for_sess_deletion(vha);
2831 vha->flags.delete_progress = 1;
2833 qlt_remove_target(ha, vha);
2835 fc_remove_host(vha->host);
2837 scsi_remove_host(vha->host);
2839 /* Allow timer to run to drain queued items, when removing vp */
2840 qla24xx_deallocate_vp_id(vha);
2842 if (vha->timer_active) {
2843 qla2x00_vp_stop_timer(vha);
2844 ql_dbg(ql_dbg_user, vha, 0x7086,
2845 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2848 qla2x00_free_fcports(vha);
2850 mutex_lock(&ha->vport_lock);
2851 ha->cur_vport_count--;
2852 clear_bit(vha->vp_idx, ha->vp_idx_map);
2853 mutex_unlock(&ha->vport_lock);
2855 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2856 vha->gnl.ldma);
2858 vfree(vha->scan.l);
2860 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2861 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2862 ql_log(ql_log_warn, vha, 0x7087,
2863 "Queue Pair delete failed.\n");
2866 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2867 scsi_host_put(vha->host);
2868 return 0;
2871 static int
2872 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
2874 scsi_qla_host_t *vha = fc_vport->dd_data;
2876 if (disable)
2877 qla24xx_disable_vp(vha);
2878 else
2879 qla24xx_enable_vp(vha);
2881 return 0;
2884 struct fc_function_template qla2xxx_transport_functions = {
2886 .show_host_node_name = 1,
2887 .show_host_port_name = 1,
2888 .show_host_supported_classes = 1,
2889 .show_host_supported_speeds = 1,
2891 .get_host_port_id = qla2x00_get_host_port_id,
2892 .show_host_port_id = 1,
2893 .get_host_speed = qla2x00_get_host_speed,
2894 .show_host_speed = 1,
2895 .get_host_port_type = qla2x00_get_host_port_type,
2896 .show_host_port_type = 1,
2897 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2898 .show_host_symbolic_name = 1,
2899 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2900 .show_host_system_hostname = 1,
2901 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2902 .show_host_fabric_name = 1,
2903 .get_host_port_state = qla2x00_get_host_port_state,
2904 .show_host_port_state = 1,
2906 .dd_fcrport_size = sizeof(struct fc_port *),
2907 .show_rport_supported_classes = 1,
2909 .get_starget_node_name = qla2x00_get_starget_node_name,
2910 .show_starget_node_name = 1,
2911 .get_starget_port_name = qla2x00_get_starget_port_name,
2912 .show_starget_port_name = 1,
2913 .get_starget_port_id = qla2x00_get_starget_port_id,
2914 .show_starget_port_id = 1,
2916 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2917 .show_rport_dev_loss_tmo = 1,
2919 .issue_fc_host_lip = qla2x00_issue_lip,
2920 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2921 .terminate_rport_io = qla2x00_terminate_rport_io,
2922 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2923 .reset_fc_host_stats = qla2x00_reset_host_stats,
2925 .vport_create = qla24xx_vport_create,
2926 .vport_disable = qla24xx_vport_disable,
2927 .vport_delete = qla24xx_vport_delete,
2928 .bsg_request = qla24xx_bsg_request,
2929 .bsg_timeout = qla24xx_bsg_timeout,
2932 struct fc_function_template qla2xxx_transport_vport_functions = {
2934 .show_host_node_name = 1,
2935 .show_host_port_name = 1,
2936 .show_host_supported_classes = 1,
2938 .get_host_port_id = qla2x00_get_host_port_id,
2939 .show_host_port_id = 1,
2940 .get_host_speed = qla2x00_get_host_speed,
2941 .show_host_speed = 1,
2942 .get_host_port_type = qla2x00_get_host_port_type,
2943 .show_host_port_type = 1,
2944 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2945 .show_host_symbolic_name = 1,
2946 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2947 .show_host_system_hostname = 1,
2948 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2949 .show_host_fabric_name = 1,
2950 .get_host_port_state = qla2x00_get_host_port_state,
2951 .show_host_port_state = 1,
2953 .dd_fcrport_size = sizeof(struct fc_port *),
2954 .show_rport_supported_classes = 1,
2956 .get_starget_node_name = qla2x00_get_starget_node_name,
2957 .show_starget_node_name = 1,
2958 .get_starget_port_name = qla2x00_get_starget_port_name,
2959 .show_starget_port_name = 1,
2960 .get_starget_port_id = qla2x00_get_starget_port_id,
2961 .show_starget_port_id = 1,
2963 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2964 .show_rport_dev_loss_tmo = 1,
2966 .issue_fc_host_lip = qla2x00_issue_lip,
2967 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2968 .terminate_rport_io = qla2x00_terminate_rport_io,
2969 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2970 .reset_fc_host_stats = qla2x00_reset_host_stats,
2972 .bsg_request = qla24xx_bsg_request,
2973 .bsg_timeout = qla24xx_bsg_timeout,
2976 void
2977 qla2x00_init_host_attr(scsi_qla_host_t *vha)
2979 struct qla_hw_data *ha = vha->hw;
2980 u32 speed = FC_PORTSPEED_UNKNOWN;
2982 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2983 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2984 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2985 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
2986 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
2987 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2988 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2990 if (IS_CNA_CAPABLE(ha))
2991 speed = FC_PORTSPEED_10GBIT;
2992 else if (IS_QLA2031(ha))
2993 speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
2994 FC_PORTSPEED_4GBIT;
2995 else if (IS_QLA25XX(ha))
2996 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2997 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2998 else if (IS_QLA24XX_TYPE(ha))
2999 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
3000 FC_PORTSPEED_1GBIT;
3001 else if (IS_QLA23XX(ha))
3002 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
3003 else if (IS_QLAFX00(ha))
3004 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
3005 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
3006 else if (IS_QLA27XX(ha))
3007 speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
3008 FC_PORTSPEED_8GBIT;
3009 else
3010 speed = FC_PORTSPEED_1GBIT;
3011 fc_host_supported_speeds(vha->host) = speed;