1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic iSCSI HBA Driver
4 * Copyright (c) 2011-2013 QLogic Corporation
12 qla4xxx_read_flash(struct bsg_job
*bsg_job
)
14 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
15 struct scsi_qla_host
*ha
= to_qla_host(host
);
16 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
17 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
21 uint8_t *flash
= NULL
;
24 bsg_reply
->reply_payload_rcv_len
= 0;
26 if (unlikely(pci_channel_offline(ha
->pdev
)))
29 if (ql4xxx_reset_active(ha
)) {
30 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
35 if (ha
->flash_state
!= QLFLASH_WAITING
) {
36 ql4_printk(KERN_ERR
, ha
, "%s: another flash operation "
37 "active\n", __func__
);
42 ha
->flash_state
= QLFLASH_READING
;
43 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
44 length
= bsg_job
->reply_payload
.payload_len
;
46 flash
= dma_alloc_coherent(&ha
->pdev
->dev
, length
, &flash_dma
,
49 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for flash "
55 rval
= qla4xxx_get_flash(ha
, flash_dma
, offset
, length
);
57 ql4_printk(KERN_ERR
, ha
, "%s: get flash failed\n", __func__
);
58 bsg_reply
->result
= DID_ERROR
<< 16;
61 bsg_reply
->reply_payload_rcv_len
=
62 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
63 bsg_job
->reply_payload
.sg_cnt
,
65 bsg_reply
->result
= DID_OK
<< 16;
68 bsg_job_done(bsg_job
, bsg_reply
->result
,
69 bsg_reply
->reply_payload_rcv_len
);
70 dma_free_coherent(&ha
->pdev
->dev
, length
, flash
, flash_dma
);
72 ha
->flash_state
= QLFLASH_WAITING
;
77 qla4xxx_update_flash(struct bsg_job
*bsg_job
)
79 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
80 struct scsi_qla_host
*ha
= to_qla_host(host
);
81 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
82 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
87 uint8_t *flash
= NULL
;
90 bsg_reply
->reply_payload_rcv_len
= 0;
92 if (unlikely(pci_channel_offline(ha
->pdev
)))
95 if (ql4xxx_reset_active(ha
)) {
96 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
101 if (ha
->flash_state
!= QLFLASH_WAITING
) {
102 ql4_printk(KERN_ERR
, ha
, "%s: another flash operation "
103 "active\n", __func__
);
108 ha
->flash_state
= QLFLASH_WRITING
;
109 length
= bsg_job
->request_payload
.payload_len
;
110 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
111 options
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2];
113 flash
= dma_alloc_coherent(&ha
->pdev
->dev
, length
, &flash_dma
,
116 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for flash "
122 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
123 bsg_job
->request_payload
.sg_cnt
, flash
, length
);
125 rval
= qla4xxx_set_flash(ha
, flash_dma
, offset
, length
, options
);
127 ql4_printk(KERN_ERR
, ha
, "%s: set flash failed\n", __func__
);
128 bsg_reply
->result
= DID_ERROR
<< 16;
131 bsg_reply
->result
= DID_OK
<< 16;
133 bsg_job_done(bsg_job
, bsg_reply
->result
,
134 bsg_reply
->reply_payload_rcv_len
);
135 dma_free_coherent(&ha
->pdev
->dev
, length
, flash
, flash_dma
);
137 ha
->flash_state
= QLFLASH_WAITING
;
142 qla4xxx_get_acb_state(struct bsg_job
*bsg_job
)
144 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
145 struct scsi_qla_host
*ha
= to_qla_host(host
);
146 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
147 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
148 uint32_t status
[MBOX_REG_COUNT
];
153 bsg_reply
->reply_payload_rcv_len
= 0;
155 if (unlikely(pci_channel_offline(ha
->pdev
)))
158 /* Only 4022 and above adapters are supported */
162 if (ql4xxx_reset_active(ha
)) {
163 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
168 if (bsg_job
->reply_payload
.payload_len
< sizeof(status
)) {
169 ql4_printk(KERN_ERR
, ha
, "%s: invalid payload len %d\n",
170 __func__
, bsg_job
->reply_payload
.payload_len
);
175 acb_idx
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
176 ip_idx
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2];
178 rval
= qla4xxx_get_ip_state(ha
, acb_idx
, ip_idx
, status
);
180 ql4_printk(KERN_ERR
, ha
, "%s: get ip state failed\n",
182 bsg_reply
->result
= DID_ERROR
<< 16;
185 bsg_reply
->reply_payload_rcv_len
=
186 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
187 bsg_job
->reply_payload
.sg_cnt
,
188 status
, sizeof(status
));
189 bsg_reply
->result
= DID_OK
<< 16;
192 bsg_job_done(bsg_job
, bsg_reply
->result
,
193 bsg_reply
->reply_payload_rcv_len
);
199 qla4xxx_read_nvram(struct bsg_job
*bsg_job
)
201 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
202 struct scsi_qla_host
*ha
= to_qla_host(host
);
203 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
204 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
207 uint32_t total_len
= 0;
208 dma_addr_t nvram_dma
;
209 uint8_t *nvram
= NULL
;
212 bsg_reply
->reply_payload_rcv_len
= 0;
214 if (unlikely(pci_channel_offline(ha
->pdev
)))
217 /* Only 40xx adapters are supported */
218 if (!(is_qla4010(ha
) || is_qla4022(ha
) || is_qla4032(ha
)))
221 if (ql4xxx_reset_active(ha
)) {
222 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
227 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
228 len
= bsg_job
->reply_payload
.payload_len
;
229 total_len
= offset
+ len
;
231 /* total len should not be greater than max NVRAM size */
232 if ((is_qla4010(ha
) && total_len
> QL4010_NVRAM_SIZE
) ||
233 ((is_qla4022(ha
) || is_qla4032(ha
)) &&
234 total_len
> QL40X2_NVRAM_SIZE
)) {
235 ql4_printk(KERN_ERR
, ha
, "%s: offset+len greater than max"
236 " nvram size, offset=%d len=%d\n",
237 __func__
, offset
, len
);
241 nvram
= dma_alloc_coherent(&ha
->pdev
->dev
, len
, &nvram_dma
,
244 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for nvram "
250 rval
= qla4xxx_get_nvram(ha
, nvram_dma
, offset
, len
);
252 ql4_printk(KERN_ERR
, ha
, "%s: get nvram failed\n", __func__
);
253 bsg_reply
->result
= DID_ERROR
<< 16;
256 bsg_reply
->reply_payload_rcv_len
=
257 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
258 bsg_job
->reply_payload
.sg_cnt
,
260 bsg_reply
->result
= DID_OK
<< 16;
263 bsg_job_done(bsg_job
, bsg_reply
->result
,
264 bsg_reply
->reply_payload_rcv_len
);
265 dma_free_coherent(&ha
->pdev
->dev
, len
, nvram
, nvram_dma
);
271 qla4xxx_update_nvram(struct bsg_job
*bsg_job
)
273 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
274 struct scsi_qla_host
*ha
= to_qla_host(host
);
275 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
276 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
279 uint32_t total_len
= 0;
280 dma_addr_t nvram_dma
;
281 uint8_t *nvram
= NULL
;
284 bsg_reply
->reply_payload_rcv_len
= 0;
286 if (unlikely(pci_channel_offline(ha
->pdev
)))
289 if (!(is_qla4010(ha
) || is_qla4022(ha
) || is_qla4032(ha
)))
292 if (ql4xxx_reset_active(ha
)) {
293 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
298 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
299 len
= bsg_job
->request_payload
.payload_len
;
300 total_len
= offset
+ len
;
302 /* total len should not be greater than max NVRAM size */
303 if ((is_qla4010(ha
) && total_len
> QL4010_NVRAM_SIZE
) ||
304 ((is_qla4022(ha
) || is_qla4032(ha
)) &&
305 total_len
> QL40X2_NVRAM_SIZE
)) {
306 ql4_printk(KERN_ERR
, ha
, "%s: offset+len greater than max"
307 " nvram size, offset=%d len=%d\n",
308 __func__
, offset
, len
);
312 nvram
= dma_alloc_coherent(&ha
->pdev
->dev
, len
, &nvram_dma
,
315 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for flash "
321 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
322 bsg_job
->request_payload
.sg_cnt
, nvram
, len
);
324 rval
= qla4xxx_set_nvram(ha
, nvram_dma
, offset
, len
);
326 ql4_printk(KERN_ERR
, ha
, "%s: set nvram failed\n", __func__
);
327 bsg_reply
->result
= DID_ERROR
<< 16;
330 bsg_reply
->result
= DID_OK
<< 16;
332 bsg_job_done(bsg_job
, bsg_reply
->result
,
333 bsg_reply
->reply_payload_rcv_len
);
334 dma_free_coherent(&ha
->pdev
->dev
, len
, nvram
, nvram_dma
);
340 qla4xxx_restore_defaults(struct bsg_job
*bsg_job
)
342 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
343 struct scsi_qla_host
*ha
= to_qla_host(host
);
344 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
345 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
351 bsg_reply
->reply_payload_rcv_len
= 0;
353 if (unlikely(pci_channel_offline(ha
->pdev
)))
359 if (ql4xxx_reset_active(ha
)) {
360 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
365 region
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
366 field0
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2];
367 field1
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[3];
369 rval
= qla4xxx_restore_factory_defaults(ha
, region
, field0
, field1
);
371 ql4_printk(KERN_ERR
, ha
, "%s: set nvram failed\n", __func__
);
372 bsg_reply
->result
= DID_ERROR
<< 16;
375 bsg_reply
->result
= DID_OK
<< 16;
377 bsg_job_done(bsg_job
, bsg_reply
->result
,
378 bsg_reply
->reply_payload_rcv_len
);
384 qla4xxx_bsg_get_acb(struct bsg_job
*bsg_job
)
386 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
387 struct scsi_qla_host
*ha
= to_qla_host(host
);
388 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
389 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
390 uint32_t acb_type
= 0;
396 bsg_reply
->reply_payload_rcv_len
= 0;
398 if (unlikely(pci_channel_offline(ha
->pdev
)))
401 /* Only 4022 and above adapters are supported */
405 if (ql4xxx_reset_active(ha
)) {
406 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
411 acb_type
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
412 len
= bsg_job
->reply_payload
.payload_len
;
413 if (len
< sizeof(struct addr_ctrl_blk
)) {
414 ql4_printk(KERN_ERR
, ha
, "%s: invalid acb len %d\n",
420 acb
= dma_alloc_coherent(&ha
->pdev
->dev
, len
, &acb_dma
, GFP_KERNEL
);
422 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for acb "
428 rval
= qla4xxx_get_acb(ha
, acb_dma
, acb_type
, len
);
430 ql4_printk(KERN_ERR
, ha
, "%s: get acb failed\n", __func__
);
431 bsg_reply
->result
= DID_ERROR
<< 16;
434 bsg_reply
->reply_payload_rcv_len
=
435 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
436 bsg_job
->reply_payload
.sg_cnt
,
438 bsg_reply
->result
= DID_OK
<< 16;
441 bsg_job_done(bsg_job
, bsg_reply
->result
,
442 bsg_reply
->reply_payload_rcv_len
);
443 dma_free_coherent(&ha
->pdev
->dev
, len
, acb
, acb_dma
);
448 static void ql4xxx_execute_diag_cmd(struct bsg_job
*bsg_job
)
450 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
451 struct scsi_qla_host
*ha
= to_qla_host(host
);
452 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
453 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
454 uint8_t *rsp_ptr
= NULL
;
455 uint32_t mbox_cmd
[MBOX_REG_COUNT
];
456 uint32_t mbox_sts
[MBOX_REG_COUNT
];
457 int status
= QLA_ERROR
;
459 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
461 if (test_bit(DPC_RESET_HA
, &ha
->dpc_flags
)) {
462 ql4_printk(KERN_INFO
, ha
, "%s: Adapter reset in progress. Invalid Request\n",
464 bsg_reply
->result
= DID_ERROR
<< 16;
465 goto exit_diag_mem_test
;
468 bsg_reply
->reply_payload_rcv_len
= 0;
469 memcpy(mbox_cmd
, &bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1],
470 sizeof(uint32_t) * MBOX_REG_COUNT
);
472 DEBUG2(ql4_printk(KERN_INFO
, ha
,
473 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
474 __func__
, mbox_cmd
[0], mbox_cmd
[1], mbox_cmd
[2],
475 mbox_cmd
[3], mbox_cmd
[4], mbox_cmd
[5], mbox_cmd
[6],
478 status
= qla4xxx_mailbox_command(ha
, MBOX_REG_COUNT
, 8, &mbox_cmd
[0],
481 DEBUG2(ql4_printk(KERN_INFO
, ha
,
482 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
483 __func__
, mbox_sts
[0], mbox_sts
[1], mbox_sts
[2],
484 mbox_sts
[3], mbox_sts
[4], mbox_sts
[5], mbox_sts
[6],
487 if (status
== QLA_SUCCESS
)
488 bsg_reply
->result
= DID_OK
<< 16;
490 bsg_reply
->result
= DID_ERROR
<< 16;
492 /* Send mbox_sts to application */
493 bsg_job
->reply_len
= sizeof(struct iscsi_bsg_reply
) + sizeof(mbox_sts
);
494 rsp_ptr
= ((uint8_t *)bsg_reply
) + sizeof(struct iscsi_bsg_reply
);
495 memcpy(rsp_ptr
, mbox_sts
, sizeof(mbox_sts
));
498 DEBUG2(ql4_printk(KERN_INFO
, ha
,
499 "%s: bsg_reply->result = x%x, status = %s\n",
500 __func__
, bsg_reply
->result
, STATUS(status
)));
502 bsg_job_done(bsg_job
, bsg_reply
->result
,
503 bsg_reply
->reply_payload_rcv_len
);
506 static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host
*ha
,
509 int status
= QLA_SUCCESS
;
511 if (!wait_for_completion_timeout(&ha
->idc_comp
, (IDC_COMP_TOV
* HZ
))) {
512 ql4_printk(KERN_INFO
, ha
, "%s: IDC Complete notification not received, Waiting for another %d timeout",
513 __func__
, ha
->idc_extend_tmo
);
514 if (ha
->idc_extend_tmo
) {
515 if (!wait_for_completion_timeout(&ha
->idc_comp
,
516 (ha
->idc_extend_tmo
* HZ
))) {
517 ha
->notify_idc_comp
= 0;
518 ha
->notify_link_up_comp
= 0;
519 ql4_printk(KERN_WARNING
, ha
, "%s: Aborting: IDC Complete notification not received",
524 DEBUG2(ql4_printk(KERN_INFO
, ha
,
525 "%s: IDC Complete notification received\n",
530 DEBUG2(ql4_printk(KERN_INFO
, ha
,
531 "%s: IDC Complete notification received\n",
534 ha
->notify_idc_comp
= 0;
537 if (!wait_for_completion_timeout(&ha
->link_up_comp
,
538 (IDC_COMP_TOV
* HZ
))) {
539 ha
->notify_link_up_comp
= 0;
540 ql4_printk(KERN_WARNING
, ha
, "%s: Aborting: LINK UP notification not received",
545 DEBUG2(ql4_printk(KERN_INFO
, ha
,
546 "%s: LINK UP notification received\n",
549 ha
->notify_link_up_comp
= 0;
556 static int qla4_83xx_pre_loopback_config(struct scsi_qla_host
*ha
,
560 int status
= QLA_SUCCESS
;
562 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
564 status
= qla4_83xx_get_port_config(ha
, &config
);
565 if (status
!= QLA_SUCCESS
)
566 goto exit_pre_loopback_config
;
568 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: Default port config=%08X\n",
571 if ((config
& ENABLE_INTERNAL_LOOPBACK
) ||
572 (config
& ENABLE_EXTERNAL_LOOPBACK
)) {
573 ql4_printk(KERN_INFO
, ha
, "%s: Loopback diagnostics already in progress. Invalid request\n",
575 goto exit_pre_loopback_config
;
578 if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK
)
579 config
|= ENABLE_INTERNAL_LOOPBACK
;
581 if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK
)
582 config
|= ENABLE_EXTERNAL_LOOPBACK
;
584 config
&= ~ENABLE_DCBX
;
586 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: New port config=%08X\n",
589 ha
->notify_idc_comp
= 1;
590 ha
->notify_link_up_comp
= 1;
592 /* get the link state */
593 qla4xxx_get_firmware_state(ha
);
595 status
= qla4_83xx_set_port_config(ha
, &config
);
596 if (status
!= QLA_SUCCESS
) {
597 ha
->notify_idc_comp
= 0;
598 ha
->notify_link_up_comp
= 0;
599 goto exit_pre_loopback_config
;
601 exit_pre_loopback_config
:
602 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: status = %s\n", __func__
,
607 static int qla4_83xx_post_loopback_config(struct scsi_qla_host
*ha
,
610 int status
= QLA_SUCCESS
;
613 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
615 status
= qla4_83xx_get_port_config(ha
, &config
);
616 if (status
!= QLA_SUCCESS
)
617 goto exit_post_loopback_config
;
619 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: port config=%08X\n", __func__
,
622 if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK
)
623 config
&= ~ENABLE_INTERNAL_LOOPBACK
;
624 else if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK
)
625 config
&= ~ENABLE_EXTERNAL_LOOPBACK
;
627 config
|= ENABLE_DCBX
;
629 DEBUG2(ql4_printk(KERN_INFO
, ha
,
630 "%s: Restore default port config=%08X\n", __func__
,
633 ha
->notify_idc_comp
= 1;
634 if (ha
->addl_fw_state
& FW_ADDSTATE_LINK_UP
)
635 ha
->notify_link_up_comp
= 1;
637 status
= qla4_83xx_set_port_config(ha
, &config
);
638 if (status
!= QLA_SUCCESS
) {
639 ql4_printk(KERN_INFO
, ha
, "%s: Scheduling adapter reset\n",
641 set_bit(DPC_RESET_HA
, &ha
->dpc_flags
);
642 clear_bit(AF_LOOPBACK
, &ha
->flags
);
643 goto exit_post_loopback_config
;
646 exit_post_loopback_config
:
647 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: status = %s\n", __func__
,
652 static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job
*bsg_job
)
654 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
655 struct scsi_qla_host
*ha
= to_qla_host(host
);
656 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
657 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
658 uint8_t *rsp_ptr
= NULL
;
659 uint32_t mbox_cmd
[MBOX_REG_COUNT
];
660 uint32_t mbox_sts
[MBOX_REG_COUNT
];
661 int wait_for_link
= 1;
662 int status
= QLA_ERROR
;
664 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
666 bsg_reply
->reply_payload_rcv_len
= 0;
668 if (test_bit(AF_LOOPBACK
, &ha
->flags
)) {
669 ql4_printk(KERN_INFO
, ha
, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
671 bsg_reply
->result
= DID_ERROR
<< 16;
672 goto exit_loopback_cmd
;
675 if (test_bit(DPC_RESET_HA
, &ha
->dpc_flags
)) {
676 ql4_printk(KERN_INFO
, ha
, "%s: Adapter reset in progress. Invalid Request\n",
678 bsg_reply
->result
= DID_ERROR
<< 16;
679 goto exit_loopback_cmd
;
682 memcpy(mbox_cmd
, &bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1],
683 sizeof(uint32_t) * MBOX_REG_COUNT
);
685 if (is_qla8032(ha
) || is_qla8042(ha
)) {
686 status
= qla4_83xx_pre_loopback_config(ha
, mbox_cmd
);
687 if (status
!= QLA_SUCCESS
) {
688 bsg_reply
->result
= DID_ERROR
<< 16;
689 goto exit_loopback_cmd
;
692 status
= qla4_83xx_wait_for_loopback_config_comp(ha
,
694 if (status
!= QLA_SUCCESS
) {
695 bsg_reply
->result
= DID_TIME_OUT
<< 16;
700 DEBUG2(ql4_printk(KERN_INFO
, ha
,
701 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
702 __func__
, mbox_cmd
[0], mbox_cmd
[1], mbox_cmd
[2],
703 mbox_cmd
[3], mbox_cmd
[4], mbox_cmd
[5], mbox_cmd
[6],
706 status
= qla4xxx_mailbox_command(ha
, MBOX_REG_COUNT
, 8, &mbox_cmd
[0],
709 if (status
== QLA_SUCCESS
)
710 bsg_reply
->result
= DID_OK
<< 16;
712 bsg_reply
->result
= DID_ERROR
<< 16;
714 DEBUG2(ql4_printk(KERN_INFO
, ha
,
715 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
716 __func__
, mbox_sts
[0], mbox_sts
[1], mbox_sts
[2],
717 mbox_sts
[3], mbox_sts
[4], mbox_sts
[5], mbox_sts
[6],
720 /* Send mbox_sts to application */
721 bsg_job
->reply_len
= sizeof(struct iscsi_bsg_reply
) + sizeof(mbox_sts
);
722 rsp_ptr
= ((uint8_t *)bsg_reply
) + sizeof(struct iscsi_bsg_reply
);
723 memcpy(rsp_ptr
, mbox_sts
, sizeof(mbox_sts
));
725 if (is_qla8032(ha
) || is_qla8042(ha
)) {
726 status
= qla4_83xx_post_loopback_config(ha
, mbox_cmd
);
727 if (status
!= QLA_SUCCESS
) {
728 bsg_reply
->result
= DID_ERROR
<< 16;
729 goto exit_loopback_cmd
;
732 /* for pre_loopback_config() wait for LINK UP only
733 * if PHY LINK is UP */
734 if (!(ha
->addl_fw_state
& FW_ADDSTATE_LINK_UP
))
737 status
= qla4_83xx_wait_for_loopback_config_comp(ha
,
739 if (status
!= QLA_SUCCESS
) {
740 bsg_reply
->result
= DID_TIME_OUT
<< 16;
741 goto exit_loopback_cmd
;
745 DEBUG2(ql4_printk(KERN_INFO
, ha
,
746 "%s: bsg_reply->result = x%x, status = %s\n",
747 __func__
, bsg_reply
->result
, STATUS(status
)));
748 bsg_job_done(bsg_job
, bsg_reply
->result
,
749 bsg_reply
->reply_payload_rcv_len
);
752 static int qla4xxx_execute_diag_test(struct bsg_job
*bsg_job
)
754 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
755 struct scsi_qla_host
*ha
= to_qla_host(host
);
756 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
760 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
762 diag_cmd
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
763 if (diag_cmd
== MBOX_CMD_DIAG_TEST
) {
764 switch (bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2]) {
765 case QL_DIAG_CMD_TEST_DDR_SIZE
:
766 case QL_DIAG_CMD_TEST_DDR_RW
:
767 case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW
:
768 case QL_DIAG_CMD_TEST_NVRAM
:
769 case QL_DIAG_CMD_TEST_FLASH_ROM
:
770 case QL_DIAG_CMD_TEST_DMA_XFER
:
771 case QL_DIAG_CMD_SELF_DDR_RW
:
772 case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW
:
773 /* Execute diag test for adapter RAM/FLASH */
774 ql4xxx_execute_diag_cmd(bsg_job
);
775 /* Always return success as we want to sent bsg_reply
780 case QL_DIAG_CMD_TEST_INT_LOOPBACK
:
781 case QL_DIAG_CMD_TEST_EXT_LOOPBACK
:
782 /* Execute diag test for Network */
783 qla4xxx_execute_diag_loopback_cmd(bsg_job
);
784 /* Always return success as we want to sent bsg_reply
789 ql4_printk(KERN_ERR
, ha
, "%s: Invalid diag test: 0x%x\n",
791 bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2]);
793 } else if ((diag_cmd
== MBOX_CMD_SET_LED_CONFIG
) ||
794 (diag_cmd
== MBOX_CMD_GET_LED_CONFIG
)) {
795 ql4xxx_execute_diag_cmd(bsg_job
);
798 ql4_printk(KERN_ERR
, ha
, "%s: Invalid diag cmd: 0x%x\n",
806 * qla4xxx_process_vendor_specific - handle vendor specific bsg request
807 * @bsg_job: iscsi_bsg_job to handle
809 int qla4xxx_process_vendor_specific(struct bsg_job
*bsg_job
)
811 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
812 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
813 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
814 struct scsi_qla_host
*ha
= to_qla_host(host
);
816 switch (bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
817 case QLISCSI_VND_READ_FLASH
:
818 return qla4xxx_read_flash(bsg_job
);
820 case QLISCSI_VND_UPDATE_FLASH
:
821 return qla4xxx_update_flash(bsg_job
);
823 case QLISCSI_VND_GET_ACB_STATE
:
824 return qla4xxx_get_acb_state(bsg_job
);
826 case QLISCSI_VND_READ_NVRAM
:
827 return qla4xxx_read_nvram(bsg_job
);
829 case QLISCSI_VND_UPDATE_NVRAM
:
830 return qla4xxx_update_nvram(bsg_job
);
832 case QLISCSI_VND_RESTORE_DEFAULTS
:
833 return qla4xxx_restore_defaults(bsg_job
);
835 case QLISCSI_VND_GET_ACB
:
836 return qla4xxx_bsg_get_acb(bsg_job
);
838 case QLISCSI_VND_DIAG_TEST
:
839 return qla4xxx_execute_diag_test(bsg_job
);
842 ql4_printk(KERN_ERR
, ha
, "%s: invalid BSG vendor command: "
843 "0x%x\n", __func__
, bsg_req
->msgcode
);
844 bsg_reply
->result
= (DID_ERROR
<< 16);
845 bsg_reply
->reply_payload_rcv_len
= 0;
846 bsg_job_done(bsg_job
, bsg_reply
->result
,
847 bsg_reply
->reply_payload_rcv_len
);
853 * qla4xxx_bsg_request - handle bsg request from ISCSI transport
854 * @bsg_job: iscsi_bsg_job to handle
856 int qla4xxx_bsg_request(struct bsg_job
*bsg_job
)
858 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
859 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
860 struct scsi_qla_host
*ha
= to_qla_host(host
);
862 switch (bsg_req
->msgcode
) {
863 case ISCSI_BSG_HST_VENDOR
:
864 return qla4xxx_process_vendor_specific(bsg_job
);
867 ql4_printk(KERN_ERR
, ha
, "%s: invalid BSG command: 0x%x\n",
868 __func__
, bsg_req
->msgcode
);