2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2011-2013 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
13 qla4xxx_read_flash(struct bsg_job
*bsg_job
)
15 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
16 struct scsi_qla_host
*ha
= to_qla_host(host
);
17 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
18 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
22 uint8_t *flash
= NULL
;
25 bsg_reply
->reply_payload_rcv_len
= 0;
27 if (unlikely(pci_channel_offline(ha
->pdev
)))
30 if (ql4xxx_reset_active(ha
)) {
31 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
36 if (ha
->flash_state
!= QLFLASH_WAITING
) {
37 ql4_printk(KERN_ERR
, ha
, "%s: another flash operation "
38 "active\n", __func__
);
43 ha
->flash_state
= QLFLASH_READING
;
44 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
45 length
= bsg_job
->reply_payload
.payload_len
;
47 flash
= dma_alloc_coherent(&ha
->pdev
->dev
, length
, &flash_dma
,
50 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for flash "
56 rval
= qla4xxx_get_flash(ha
, flash_dma
, offset
, length
);
58 ql4_printk(KERN_ERR
, ha
, "%s: get flash failed\n", __func__
);
59 bsg_reply
->result
= DID_ERROR
<< 16;
62 bsg_reply
->reply_payload_rcv_len
=
63 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
64 bsg_job
->reply_payload
.sg_cnt
,
66 bsg_reply
->result
= DID_OK
<< 16;
69 bsg_job_done(bsg_job
, bsg_reply
->result
,
70 bsg_reply
->reply_payload_rcv_len
);
71 dma_free_coherent(&ha
->pdev
->dev
, length
, flash
, flash_dma
);
73 ha
->flash_state
= QLFLASH_WAITING
;
78 qla4xxx_update_flash(struct bsg_job
*bsg_job
)
80 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
81 struct scsi_qla_host
*ha
= to_qla_host(host
);
82 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
83 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
88 uint8_t *flash
= NULL
;
91 bsg_reply
->reply_payload_rcv_len
= 0;
93 if (unlikely(pci_channel_offline(ha
->pdev
)))
96 if (ql4xxx_reset_active(ha
)) {
97 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
102 if (ha
->flash_state
!= QLFLASH_WAITING
) {
103 ql4_printk(KERN_ERR
, ha
, "%s: another flash operation "
104 "active\n", __func__
);
109 ha
->flash_state
= QLFLASH_WRITING
;
110 length
= bsg_job
->request_payload
.payload_len
;
111 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
112 options
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2];
114 flash
= dma_alloc_coherent(&ha
->pdev
->dev
, length
, &flash_dma
,
117 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for flash "
123 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
124 bsg_job
->request_payload
.sg_cnt
, flash
, length
);
126 rval
= qla4xxx_set_flash(ha
, flash_dma
, offset
, length
, options
);
128 ql4_printk(KERN_ERR
, ha
, "%s: set flash failed\n", __func__
);
129 bsg_reply
->result
= DID_ERROR
<< 16;
132 bsg_reply
->result
= DID_OK
<< 16;
134 bsg_job_done(bsg_job
, bsg_reply
->result
,
135 bsg_reply
->reply_payload_rcv_len
);
136 dma_free_coherent(&ha
->pdev
->dev
, length
, flash
, flash_dma
);
138 ha
->flash_state
= QLFLASH_WAITING
;
143 qla4xxx_get_acb_state(struct bsg_job
*bsg_job
)
145 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
146 struct scsi_qla_host
*ha
= to_qla_host(host
);
147 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
148 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
149 uint32_t status
[MBOX_REG_COUNT
];
154 bsg_reply
->reply_payload_rcv_len
= 0;
156 if (unlikely(pci_channel_offline(ha
->pdev
)))
159 /* Only 4022 and above adapters are supported */
163 if (ql4xxx_reset_active(ha
)) {
164 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
169 if (bsg_job
->reply_payload
.payload_len
< sizeof(status
)) {
170 ql4_printk(KERN_ERR
, ha
, "%s: invalid payload len %d\n",
171 __func__
, bsg_job
->reply_payload
.payload_len
);
176 acb_idx
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
177 ip_idx
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2];
179 rval
= qla4xxx_get_ip_state(ha
, acb_idx
, ip_idx
, status
);
181 ql4_printk(KERN_ERR
, ha
, "%s: get ip state failed\n",
183 bsg_reply
->result
= DID_ERROR
<< 16;
186 bsg_reply
->reply_payload_rcv_len
=
187 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
188 bsg_job
->reply_payload
.sg_cnt
,
189 status
, sizeof(status
));
190 bsg_reply
->result
= DID_OK
<< 16;
193 bsg_job_done(bsg_job
, bsg_reply
->result
,
194 bsg_reply
->reply_payload_rcv_len
);
200 qla4xxx_read_nvram(struct bsg_job
*bsg_job
)
202 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
203 struct scsi_qla_host
*ha
= to_qla_host(host
);
204 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
205 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
208 uint32_t total_len
= 0;
209 dma_addr_t nvram_dma
;
210 uint8_t *nvram
= NULL
;
213 bsg_reply
->reply_payload_rcv_len
= 0;
215 if (unlikely(pci_channel_offline(ha
->pdev
)))
218 /* Only 40xx adapters are supported */
219 if (!(is_qla4010(ha
) || is_qla4022(ha
) || is_qla4032(ha
)))
222 if (ql4xxx_reset_active(ha
)) {
223 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
228 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
229 len
= bsg_job
->reply_payload
.payload_len
;
230 total_len
= offset
+ len
;
232 /* total len should not be greater than max NVRAM size */
233 if ((is_qla4010(ha
) && total_len
> QL4010_NVRAM_SIZE
) ||
234 ((is_qla4022(ha
) || is_qla4032(ha
)) &&
235 total_len
> QL40X2_NVRAM_SIZE
)) {
236 ql4_printk(KERN_ERR
, ha
, "%s: offset+len greater than max"
237 " nvram size, offset=%d len=%d\n",
238 __func__
, offset
, len
);
242 nvram
= dma_alloc_coherent(&ha
->pdev
->dev
, len
, &nvram_dma
,
245 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for nvram "
251 rval
= qla4xxx_get_nvram(ha
, nvram_dma
, offset
, len
);
253 ql4_printk(KERN_ERR
, ha
, "%s: get nvram failed\n", __func__
);
254 bsg_reply
->result
= DID_ERROR
<< 16;
257 bsg_reply
->reply_payload_rcv_len
=
258 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
259 bsg_job
->reply_payload
.sg_cnt
,
261 bsg_reply
->result
= DID_OK
<< 16;
264 bsg_job_done(bsg_job
, bsg_reply
->result
,
265 bsg_reply
->reply_payload_rcv_len
);
266 dma_free_coherent(&ha
->pdev
->dev
, len
, nvram
, nvram_dma
);
272 qla4xxx_update_nvram(struct bsg_job
*bsg_job
)
274 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
275 struct scsi_qla_host
*ha
= to_qla_host(host
);
276 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
277 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
280 uint32_t total_len
= 0;
281 dma_addr_t nvram_dma
;
282 uint8_t *nvram
= NULL
;
285 bsg_reply
->reply_payload_rcv_len
= 0;
287 if (unlikely(pci_channel_offline(ha
->pdev
)))
290 if (!(is_qla4010(ha
) || is_qla4022(ha
) || is_qla4032(ha
)))
293 if (ql4xxx_reset_active(ha
)) {
294 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
299 offset
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
300 len
= bsg_job
->request_payload
.payload_len
;
301 total_len
= offset
+ len
;
303 /* total len should not be greater than max NVRAM size */
304 if ((is_qla4010(ha
) && total_len
> QL4010_NVRAM_SIZE
) ||
305 ((is_qla4022(ha
) || is_qla4032(ha
)) &&
306 total_len
> QL40X2_NVRAM_SIZE
)) {
307 ql4_printk(KERN_ERR
, ha
, "%s: offset+len greater than max"
308 " nvram size, offset=%d len=%d\n",
309 __func__
, offset
, len
);
313 nvram
= dma_alloc_coherent(&ha
->pdev
->dev
, len
, &nvram_dma
,
316 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for flash "
322 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
323 bsg_job
->request_payload
.sg_cnt
, nvram
, len
);
325 rval
= qla4xxx_set_nvram(ha
, nvram_dma
, offset
, len
);
327 ql4_printk(KERN_ERR
, ha
, "%s: set nvram failed\n", __func__
);
328 bsg_reply
->result
= DID_ERROR
<< 16;
331 bsg_reply
->result
= DID_OK
<< 16;
333 bsg_job_done(bsg_job
, bsg_reply
->result
,
334 bsg_reply
->reply_payload_rcv_len
);
335 dma_free_coherent(&ha
->pdev
->dev
, len
, nvram
, nvram_dma
);
341 qla4xxx_restore_defaults(struct bsg_job
*bsg_job
)
343 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
344 struct scsi_qla_host
*ha
= to_qla_host(host
);
345 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
346 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
352 bsg_reply
->reply_payload_rcv_len
= 0;
354 if (unlikely(pci_channel_offline(ha
->pdev
)))
360 if (ql4xxx_reset_active(ha
)) {
361 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
366 region
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
367 field0
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2];
368 field1
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[3];
370 rval
= qla4xxx_restore_factory_defaults(ha
, region
, field0
, field1
);
372 ql4_printk(KERN_ERR
, ha
, "%s: set nvram failed\n", __func__
);
373 bsg_reply
->result
= DID_ERROR
<< 16;
376 bsg_reply
->result
= DID_OK
<< 16;
378 bsg_job_done(bsg_job
, bsg_reply
->result
,
379 bsg_reply
->reply_payload_rcv_len
);
385 qla4xxx_bsg_get_acb(struct bsg_job
*bsg_job
)
387 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
388 struct scsi_qla_host
*ha
= to_qla_host(host
);
389 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
390 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
391 uint32_t acb_type
= 0;
397 bsg_reply
->reply_payload_rcv_len
= 0;
399 if (unlikely(pci_channel_offline(ha
->pdev
)))
402 /* Only 4022 and above adapters are supported */
406 if (ql4xxx_reset_active(ha
)) {
407 ql4_printk(KERN_ERR
, ha
, "%s: reset active\n", __func__
);
412 acb_type
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
413 len
= bsg_job
->reply_payload
.payload_len
;
414 if (len
< sizeof(struct addr_ctrl_blk
)) {
415 ql4_printk(KERN_ERR
, ha
, "%s: invalid acb len %d\n",
421 acb
= dma_alloc_coherent(&ha
->pdev
->dev
, len
, &acb_dma
, GFP_KERNEL
);
423 ql4_printk(KERN_ERR
, ha
, "%s: dma alloc failed for acb "
429 rval
= qla4xxx_get_acb(ha
, acb_dma
, acb_type
, len
);
431 ql4_printk(KERN_ERR
, ha
, "%s: get acb failed\n", __func__
);
432 bsg_reply
->result
= DID_ERROR
<< 16;
435 bsg_reply
->reply_payload_rcv_len
=
436 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
437 bsg_job
->reply_payload
.sg_cnt
,
439 bsg_reply
->result
= DID_OK
<< 16;
442 bsg_job_done(bsg_job
, bsg_reply
->result
,
443 bsg_reply
->reply_payload_rcv_len
);
444 dma_free_coherent(&ha
->pdev
->dev
, len
, acb
, acb_dma
);
449 static void ql4xxx_execute_diag_cmd(struct bsg_job
*bsg_job
)
451 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
452 struct scsi_qla_host
*ha
= to_qla_host(host
);
453 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
454 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
455 uint8_t *rsp_ptr
= NULL
;
456 uint32_t mbox_cmd
[MBOX_REG_COUNT
];
457 uint32_t mbox_sts
[MBOX_REG_COUNT
];
458 int status
= QLA_ERROR
;
460 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
462 if (test_bit(DPC_RESET_HA
, &ha
->dpc_flags
)) {
463 ql4_printk(KERN_INFO
, ha
, "%s: Adapter reset in progress. Invalid Request\n",
465 bsg_reply
->result
= DID_ERROR
<< 16;
466 goto exit_diag_mem_test
;
469 bsg_reply
->reply_payload_rcv_len
= 0;
470 memcpy(mbox_cmd
, &bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1],
471 sizeof(uint32_t) * MBOX_REG_COUNT
);
473 DEBUG2(ql4_printk(KERN_INFO
, ha
,
474 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
475 __func__
, mbox_cmd
[0], mbox_cmd
[1], mbox_cmd
[2],
476 mbox_cmd
[3], mbox_cmd
[4], mbox_cmd
[5], mbox_cmd
[6],
479 status
= qla4xxx_mailbox_command(ha
, MBOX_REG_COUNT
, 8, &mbox_cmd
[0],
482 DEBUG2(ql4_printk(KERN_INFO
, ha
,
483 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
484 __func__
, mbox_sts
[0], mbox_sts
[1], mbox_sts
[2],
485 mbox_sts
[3], mbox_sts
[4], mbox_sts
[5], mbox_sts
[6],
488 if (status
== QLA_SUCCESS
)
489 bsg_reply
->result
= DID_OK
<< 16;
491 bsg_reply
->result
= DID_ERROR
<< 16;
493 /* Send mbox_sts to application */
494 bsg_job
->reply_len
= sizeof(struct iscsi_bsg_reply
) + sizeof(mbox_sts
);
495 rsp_ptr
= ((uint8_t *)bsg_reply
) + sizeof(struct iscsi_bsg_reply
);
496 memcpy(rsp_ptr
, mbox_sts
, sizeof(mbox_sts
));
499 DEBUG2(ql4_printk(KERN_INFO
, ha
,
500 "%s: bsg_reply->result = x%x, status = %s\n",
501 __func__
, bsg_reply
->result
, STATUS(status
)));
503 bsg_job_done(bsg_job
, bsg_reply
->result
,
504 bsg_reply
->reply_payload_rcv_len
);
507 static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host
*ha
,
510 int status
= QLA_SUCCESS
;
512 if (!wait_for_completion_timeout(&ha
->idc_comp
, (IDC_COMP_TOV
* HZ
))) {
513 ql4_printk(KERN_INFO
, ha
, "%s: IDC Complete notification not received, Waiting for another %d timeout",
514 __func__
, ha
->idc_extend_tmo
);
515 if (ha
->idc_extend_tmo
) {
516 if (!wait_for_completion_timeout(&ha
->idc_comp
,
517 (ha
->idc_extend_tmo
* HZ
))) {
518 ha
->notify_idc_comp
= 0;
519 ha
->notify_link_up_comp
= 0;
520 ql4_printk(KERN_WARNING
, ha
, "%s: Aborting: IDC Complete notification not received",
525 DEBUG2(ql4_printk(KERN_INFO
, ha
,
526 "%s: IDC Complete notification received\n",
531 DEBUG2(ql4_printk(KERN_INFO
, ha
,
532 "%s: IDC Complete notification received\n",
535 ha
->notify_idc_comp
= 0;
538 if (!wait_for_completion_timeout(&ha
->link_up_comp
,
539 (IDC_COMP_TOV
* HZ
))) {
540 ha
->notify_link_up_comp
= 0;
541 ql4_printk(KERN_WARNING
, ha
, "%s: Aborting: LINK UP notification not received",
546 DEBUG2(ql4_printk(KERN_INFO
, ha
,
547 "%s: LINK UP notification received\n",
550 ha
->notify_link_up_comp
= 0;
557 static int qla4_83xx_pre_loopback_config(struct scsi_qla_host
*ha
,
561 int status
= QLA_SUCCESS
;
563 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
565 status
= qla4_83xx_get_port_config(ha
, &config
);
566 if (status
!= QLA_SUCCESS
)
567 goto exit_pre_loopback_config
;
569 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: Default port config=%08X\n",
572 if ((config
& ENABLE_INTERNAL_LOOPBACK
) ||
573 (config
& ENABLE_EXTERNAL_LOOPBACK
)) {
574 ql4_printk(KERN_INFO
, ha
, "%s: Loopback diagnostics already in progress. Invalid request\n",
576 goto exit_pre_loopback_config
;
579 if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK
)
580 config
|= ENABLE_INTERNAL_LOOPBACK
;
582 if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK
)
583 config
|= ENABLE_EXTERNAL_LOOPBACK
;
585 config
&= ~ENABLE_DCBX
;
587 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: New port config=%08X\n",
590 ha
->notify_idc_comp
= 1;
591 ha
->notify_link_up_comp
= 1;
593 /* get the link state */
594 qla4xxx_get_firmware_state(ha
);
596 status
= qla4_83xx_set_port_config(ha
, &config
);
597 if (status
!= QLA_SUCCESS
) {
598 ha
->notify_idc_comp
= 0;
599 ha
->notify_link_up_comp
= 0;
600 goto exit_pre_loopback_config
;
602 exit_pre_loopback_config
:
603 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: status = %s\n", __func__
,
608 static int qla4_83xx_post_loopback_config(struct scsi_qla_host
*ha
,
611 int status
= QLA_SUCCESS
;
614 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
616 status
= qla4_83xx_get_port_config(ha
, &config
);
617 if (status
!= QLA_SUCCESS
)
618 goto exit_post_loopback_config
;
620 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: port config=%08X\n", __func__
,
623 if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK
)
624 config
&= ~ENABLE_INTERNAL_LOOPBACK
;
625 else if (mbox_cmd
[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK
)
626 config
&= ~ENABLE_EXTERNAL_LOOPBACK
;
628 config
|= ENABLE_DCBX
;
630 DEBUG2(ql4_printk(KERN_INFO
, ha
,
631 "%s: Restore default port config=%08X\n", __func__
,
634 ha
->notify_idc_comp
= 1;
635 if (ha
->addl_fw_state
& FW_ADDSTATE_LINK_UP
)
636 ha
->notify_link_up_comp
= 1;
638 status
= qla4_83xx_set_port_config(ha
, &config
);
639 if (status
!= QLA_SUCCESS
) {
640 ql4_printk(KERN_INFO
, ha
, "%s: Scheduling adapter reset\n",
642 set_bit(DPC_RESET_HA
, &ha
->dpc_flags
);
643 clear_bit(AF_LOOPBACK
, &ha
->flags
);
644 goto exit_post_loopback_config
;
647 exit_post_loopback_config
:
648 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: status = %s\n", __func__
,
653 static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job
*bsg_job
)
655 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
656 struct scsi_qla_host
*ha
= to_qla_host(host
);
657 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
658 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
659 uint8_t *rsp_ptr
= NULL
;
660 uint32_t mbox_cmd
[MBOX_REG_COUNT
];
661 uint32_t mbox_sts
[MBOX_REG_COUNT
];
662 int wait_for_link
= 1;
663 int status
= QLA_ERROR
;
665 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
667 bsg_reply
->reply_payload_rcv_len
= 0;
669 if (test_bit(AF_LOOPBACK
, &ha
->flags
)) {
670 ql4_printk(KERN_INFO
, ha
, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
672 bsg_reply
->result
= DID_ERROR
<< 16;
673 goto exit_loopback_cmd
;
676 if (test_bit(DPC_RESET_HA
, &ha
->dpc_flags
)) {
677 ql4_printk(KERN_INFO
, ha
, "%s: Adapter reset in progress. Invalid Request\n",
679 bsg_reply
->result
= DID_ERROR
<< 16;
680 goto exit_loopback_cmd
;
683 memcpy(mbox_cmd
, &bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1],
684 sizeof(uint32_t) * MBOX_REG_COUNT
);
686 if (is_qla8032(ha
) || is_qla8042(ha
)) {
687 status
= qla4_83xx_pre_loopback_config(ha
, mbox_cmd
);
688 if (status
!= QLA_SUCCESS
) {
689 bsg_reply
->result
= DID_ERROR
<< 16;
690 goto exit_loopback_cmd
;
693 status
= qla4_83xx_wait_for_loopback_config_comp(ha
,
695 if (status
!= QLA_SUCCESS
) {
696 bsg_reply
->result
= DID_TIME_OUT
<< 16;
701 DEBUG2(ql4_printk(KERN_INFO
, ha
,
702 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
703 __func__
, mbox_cmd
[0], mbox_cmd
[1], mbox_cmd
[2],
704 mbox_cmd
[3], mbox_cmd
[4], mbox_cmd
[5], mbox_cmd
[6],
707 status
= qla4xxx_mailbox_command(ha
, MBOX_REG_COUNT
, 8, &mbox_cmd
[0],
710 if (status
== QLA_SUCCESS
)
711 bsg_reply
->result
= DID_OK
<< 16;
713 bsg_reply
->result
= DID_ERROR
<< 16;
715 DEBUG2(ql4_printk(KERN_INFO
, ha
,
716 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
717 __func__
, mbox_sts
[0], mbox_sts
[1], mbox_sts
[2],
718 mbox_sts
[3], mbox_sts
[4], mbox_sts
[5], mbox_sts
[6],
721 /* Send mbox_sts to application */
722 bsg_job
->reply_len
= sizeof(struct iscsi_bsg_reply
) + sizeof(mbox_sts
);
723 rsp_ptr
= ((uint8_t *)bsg_reply
) + sizeof(struct iscsi_bsg_reply
);
724 memcpy(rsp_ptr
, mbox_sts
, sizeof(mbox_sts
));
726 if (is_qla8032(ha
) || is_qla8042(ha
)) {
727 status
= qla4_83xx_post_loopback_config(ha
, mbox_cmd
);
728 if (status
!= QLA_SUCCESS
) {
729 bsg_reply
->result
= DID_ERROR
<< 16;
730 goto exit_loopback_cmd
;
733 /* for pre_loopback_config() wait for LINK UP only
734 * if PHY LINK is UP */
735 if (!(ha
->addl_fw_state
& FW_ADDSTATE_LINK_UP
))
738 status
= qla4_83xx_wait_for_loopback_config_comp(ha
,
740 if (status
!= QLA_SUCCESS
) {
741 bsg_reply
->result
= DID_TIME_OUT
<< 16;
742 goto exit_loopback_cmd
;
746 DEBUG2(ql4_printk(KERN_INFO
, ha
,
747 "%s: bsg_reply->result = x%x, status = %s\n",
748 __func__
, bsg_reply
->result
, STATUS(status
)));
749 bsg_job_done(bsg_job
, bsg_reply
->result
,
750 bsg_reply
->reply_payload_rcv_len
);
753 static int qla4xxx_execute_diag_test(struct bsg_job
*bsg_job
)
755 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
756 struct scsi_qla_host
*ha
= to_qla_host(host
);
757 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
761 DEBUG2(ql4_printk(KERN_INFO
, ha
, "%s: in\n", __func__
));
763 diag_cmd
= bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[1];
764 if (diag_cmd
== MBOX_CMD_DIAG_TEST
) {
765 switch (bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2]) {
766 case QL_DIAG_CMD_TEST_DDR_SIZE
:
767 case QL_DIAG_CMD_TEST_DDR_RW
:
768 case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW
:
769 case QL_DIAG_CMD_TEST_NVRAM
:
770 case QL_DIAG_CMD_TEST_FLASH_ROM
:
771 case QL_DIAG_CMD_TEST_DMA_XFER
:
772 case QL_DIAG_CMD_SELF_DDR_RW
:
773 case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW
:
774 /* Execute diag test for adapter RAM/FLASH */
775 ql4xxx_execute_diag_cmd(bsg_job
);
776 /* Always return success as we want to sent bsg_reply
781 case QL_DIAG_CMD_TEST_INT_LOOPBACK
:
782 case QL_DIAG_CMD_TEST_EXT_LOOPBACK
:
783 /* Execute diag test for Network */
784 qla4xxx_execute_diag_loopback_cmd(bsg_job
);
785 /* Always return success as we want to sent bsg_reply
790 ql4_printk(KERN_ERR
, ha
, "%s: Invalid diag test: 0x%x\n",
792 bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[2]);
794 } else if ((diag_cmd
== MBOX_CMD_SET_LED_CONFIG
) ||
795 (diag_cmd
== MBOX_CMD_GET_LED_CONFIG
)) {
796 ql4xxx_execute_diag_cmd(bsg_job
);
799 ql4_printk(KERN_ERR
, ha
, "%s: Invalid diag cmd: 0x%x\n",
807 * qla4xxx_process_vendor_specific - handle vendor specific bsg request
808 * @job: iscsi_bsg_job to handle
810 int qla4xxx_process_vendor_specific(struct bsg_job
*bsg_job
)
812 struct iscsi_bsg_reply
*bsg_reply
= bsg_job
->reply
;
813 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
814 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
815 struct scsi_qla_host
*ha
= to_qla_host(host
);
817 switch (bsg_req
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
818 case QLISCSI_VND_READ_FLASH
:
819 return qla4xxx_read_flash(bsg_job
);
821 case QLISCSI_VND_UPDATE_FLASH
:
822 return qla4xxx_update_flash(bsg_job
);
824 case QLISCSI_VND_GET_ACB_STATE
:
825 return qla4xxx_get_acb_state(bsg_job
);
827 case QLISCSI_VND_READ_NVRAM
:
828 return qla4xxx_read_nvram(bsg_job
);
830 case QLISCSI_VND_UPDATE_NVRAM
:
831 return qla4xxx_update_nvram(bsg_job
);
833 case QLISCSI_VND_RESTORE_DEFAULTS
:
834 return qla4xxx_restore_defaults(bsg_job
);
836 case QLISCSI_VND_GET_ACB
:
837 return qla4xxx_bsg_get_acb(bsg_job
);
839 case QLISCSI_VND_DIAG_TEST
:
840 return qla4xxx_execute_diag_test(bsg_job
);
843 ql4_printk(KERN_ERR
, ha
, "%s: invalid BSG vendor command: "
844 "0x%x\n", __func__
, bsg_req
->msgcode
);
845 bsg_reply
->result
= (DID_ERROR
<< 16);
846 bsg_reply
->reply_payload_rcv_len
= 0;
847 bsg_job_done(bsg_job
, bsg_reply
->result
,
848 bsg_reply
->reply_payload_rcv_len
);
854 * qla4xxx_bsg_request - handle bsg request from ISCSI transport
855 * @job: iscsi_bsg_job to handle
857 int qla4xxx_bsg_request(struct bsg_job
*bsg_job
)
859 struct iscsi_bsg_request
*bsg_req
= bsg_job
->request
;
860 struct Scsi_Host
*host
= iscsi_job_to_shost(bsg_job
);
861 struct scsi_qla_host
*ha
= to_qla_host(host
);
863 switch (bsg_req
->msgcode
) {
864 case ISCSI_BSG_HST_VENDOR
:
865 return qla4xxx_process_vendor_specific(bsg_job
);
868 ql4_printk(KERN_ERR
, ha
, "%s: invalid BSG command: 0x%x\n",
869 __func__
, bsg_req
->msgcode
);