drm/client: Fix drm client endless Kconfig loop
[drm/drm-misc.git] / drivers / scsi / qla4xxx / ql4_bsg.c
blobc447a9d598a172165106c2a703550027e9b8d8a3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic iSCSI HBA Driver
4 * Copyright (c) 2011-2013 QLogic Corporation
5 */
7 #include "ql4_def.h"
8 #include "ql4_glbl.h"
9 #include "ql4_bsg.h"
11 static int
12 qla4xxx_read_flash(struct bsg_job *bsg_job)
14 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
15 struct scsi_qla_host *ha = to_qla_host(host);
16 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
17 struct iscsi_bsg_request *bsg_req = bsg_job->request;
18 uint32_t offset = 0;
19 uint32_t length = 0;
20 dma_addr_t flash_dma;
21 uint8_t *flash = NULL;
22 int rval = -EINVAL;
24 bsg_reply->reply_payload_rcv_len = 0;
26 if (unlikely(pci_channel_offline(ha->pdev)))
27 goto leave;
29 if (ql4xxx_reset_active(ha)) {
30 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
31 rval = -EBUSY;
32 goto leave;
35 if (ha->flash_state != QLFLASH_WAITING) {
36 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
37 "active\n", __func__);
38 rval = -EBUSY;
39 goto leave;
42 ha->flash_state = QLFLASH_READING;
43 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
44 length = bsg_job->reply_payload.payload_len;
46 flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
47 GFP_KERNEL);
48 if (!flash) {
49 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
50 "data\n", __func__);
51 rval = -ENOMEM;
52 goto leave;
55 rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
56 if (rval) {
57 ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
58 bsg_reply->result = DID_ERROR << 16;
59 rval = -EIO;
60 } else {
61 bsg_reply->reply_payload_rcv_len =
62 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
63 bsg_job->reply_payload.sg_cnt,
64 flash, length);
65 bsg_reply->result = DID_OK << 16;
68 bsg_job_done(bsg_job, bsg_reply->result,
69 bsg_reply->reply_payload_rcv_len);
70 dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
71 leave:
72 ha->flash_state = QLFLASH_WAITING;
73 return rval;
76 static int
77 qla4xxx_update_flash(struct bsg_job *bsg_job)
79 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
80 struct scsi_qla_host *ha = to_qla_host(host);
81 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
82 struct iscsi_bsg_request *bsg_req = bsg_job->request;
83 uint32_t length = 0;
84 uint32_t offset = 0;
85 uint32_t options = 0;
86 dma_addr_t flash_dma;
87 uint8_t *flash = NULL;
88 int rval = -EINVAL;
90 bsg_reply->reply_payload_rcv_len = 0;
92 if (unlikely(pci_channel_offline(ha->pdev)))
93 goto leave;
95 if (ql4xxx_reset_active(ha)) {
96 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
97 rval = -EBUSY;
98 goto leave;
101 if (ha->flash_state != QLFLASH_WAITING) {
102 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
103 "active\n", __func__);
104 rval = -EBUSY;
105 goto leave;
108 ha->flash_state = QLFLASH_WRITING;
109 length = bsg_job->request_payload.payload_len;
110 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
111 options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
113 flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
114 GFP_KERNEL);
115 if (!flash) {
116 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
117 "data\n", __func__);
118 rval = -ENOMEM;
119 goto leave;
122 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
123 bsg_job->request_payload.sg_cnt, flash, length);
125 rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
126 if (rval) {
127 ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
128 bsg_reply->result = DID_ERROR << 16;
129 rval = -EIO;
130 } else
131 bsg_reply->result = DID_OK << 16;
133 bsg_job_done(bsg_job, bsg_reply->result,
134 bsg_reply->reply_payload_rcv_len);
135 dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
136 leave:
137 ha->flash_state = QLFLASH_WAITING;
138 return rval;
141 static int
142 qla4xxx_get_acb_state(struct bsg_job *bsg_job)
144 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
145 struct scsi_qla_host *ha = to_qla_host(host);
146 struct iscsi_bsg_request *bsg_req = bsg_job->request;
147 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
148 uint32_t status[MBOX_REG_COUNT];
149 uint32_t acb_idx;
150 uint32_t ip_idx;
151 int rval = -EINVAL;
153 bsg_reply->reply_payload_rcv_len = 0;
155 if (unlikely(pci_channel_offline(ha->pdev)))
156 goto leave;
158 /* Only 4022 and above adapters are supported */
159 if (is_qla4010(ha))
160 goto leave;
162 if (ql4xxx_reset_active(ha)) {
163 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
164 rval = -EBUSY;
165 goto leave;
168 if (bsg_job->reply_payload.payload_len < sizeof(status)) {
169 ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
170 __func__, bsg_job->reply_payload.payload_len);
171 rval = -EINVAL;
172 goto leave;
175 acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
176 ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
178 rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
179 if (rval) {
180 ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
181 __func__);
182 bsg_reply->result = DID_ERROR << 16;
183 rval = -EIO;
184 } else {
185 bsg_reply->reply_payload_rcv_len =
186 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
187 bsg_job->reply_payload.sg_cnt,
188 status, sizeof(status));
189 bsg_reply->result = DID_OK << 16;
192 bsg_job_done(bsg_job, bsg_reply->result,
193 bsg_reply->reply_payload_rcv_len);
194 leave:
195 return rval;
198 static int
199 qla4xxx_read_nvram(struct bsg_job *bsg_job)
201 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
202 struct scsi_qla_host *ha = to_qla_host(host);
203 struct iscsi_bsg_request *bsg_req = bsg_job->request;
204 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
205 uint32_t offset = 0;
206 uint32_t len = 0;
207 uint32_t total_len = 0;
208 dma_addr_t nvram_dma;
209 uint8_t *nvram = NULL;
210 int rval = -EINVAL;
212 bsg_reply->reply_payload_rcv_len = 0;
214 if (unlikely(pci_channel_offline(ha->pdev)))
215 goto leave;
217 /* Only 40xx adapters are supported */
218 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
219 goto leave;
221 if (ql4xxx_reset_active(ha)) {
222 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
223 rval = -EBUSY;
224 goto leave;
227 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
228 len = bsg_job->reply_payload.payload_len;
229 total_len = offset + len;
231 /* total len should not be greater than max NVRAM size */
232 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
233 ((is_qla4022(ha) || is_qla4032(ha)) &&
234 total_len > QL40X2_NVRAM_SIZE)) {
235 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
236 " nvram size, offset=%d len=%d\n",
237 __func__, offset, len);
238 goto leave;
241 nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
242 GFP_KERNEL);
243 if (!nvram) {
244 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
245 "data\n", __func__);
246 rval = -ENOMEM;
247 goto leave;
250 rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
251 if (rval) {
252 ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
253 bsg_reply->result = DID_ERROR << 16;
254 rval = -EIO;
255 } else {
256 bsg_reply->reply_payload_rcv_len =
257 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
258 bsg_job->reply_payload.sg_cnt,
259 nvram, len);
260 bsg_reply->result = DID_OK << 16;
263 bsg_job_done(bsg_job, bsg_reply->result,
264 bsg_reply->reply_payload_rcv_len);
265 dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
266 leave:
267 return rval;
270 static int
271 qla4xxx_update_nvram(struct bsg_job *bsg_job)
273 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
274 struct scsi_qla_host *ha = to_qla_host(host);
275 struct iscsi_bsg_request *bsg_req = bsg_job->request;
276 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
277 uint32_t offset = 0;
278 uint32_t len = 0;
279 uint32_t total_len = 0;
280 dma_addr_t nvram_dma;
281 uint8_t *nvram = NULL;
282 int rval = -EINVAL;
284 bsg_reply->reply_payload_rcv_len = 0;
286 if (unlikely(pci_channel_offline(ha->pdev)))
287 goto leave;
289 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
290 goto leave;
292 if (ql4xxx_reset_active(ha)) {
293 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
294 rval = -EBUSY;
295 goto leave;
298 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
299 len = bsg_job->request_payload.payload_len;
300 total_len = offset + len;
302 /* total len should not be greater than max NVRAM size */
303 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
304 ((is_qla4022(ha) || is_qla4032(ha)) &&
305 total_len > QL40X2_NVRAM_SIZE)) {
306 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
307 " nvram size, offset=%d len=%d\n",
308 __func__, offset, len);
309 goto leave;
312 nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
313 GFP_KERNEL);
314 if (!nvram) {
315 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
316 "data\n", __func__);
317 rval = -ENOMEM;
318 goto leave;
321 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
322 bsg_job->request_payload.sg_cnt, nvram, len);
324 rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
325 if (rval) {
326 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
327 bsg_reply->result = DID_ERROR << 16;
328 rval = -EIO;
329 } else
330 bsg_reply->result = DID_OK << 16;
332 bsg_job_done(bsg_job, bsg_reply->result,
333 bsg_reply->reply_payload_rcv_len);
334 dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
335 leave:
336 return rval;
339 static int
340 qla4xxx_restore_defaults(struct bsg_job *bsg_job)
342 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
343 struct scsi_qla_host *ha = to_qla_host(host);
344 struct iscsi_bsg_request *bsg_req = bsg_job->request;
345 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
346 uint32_t region = 0;
347 uint32_t field0 = 0;
348 uint32_t field1 = 0;
349 int rval = -EINVAL;
351 bsg_reply->reply_payload_rcv_len = 0;
353 if (unlikely(pci_channel_offline(ha->pdev)))
354 goto leave;
356 if (is_qla4010(ha))
357 goto leave;
359 if (ql4xxx_reset_active(ha)) {
360 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
361 rval = -EBUSY;
362 goto leave;
365 region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
366 field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
367 field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
369 rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
370 if (rval) {
371 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
372 bsg_reply->result = DID_ERROR << 16;
373 rval = -EIO;
374 } else
375 bsg_reply->result = DID_OK << 16;
377 bsg_job_done(bsg_job, bsg_reply->result,
378 bsg_reply->reply_payload_rcv_len);
379 leave:
380 return rval;
383 static int
384 qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
386 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
387 struct scsi_qla_host *ha = to_qla_host(host);
388 struct iscsi_bsg_request *bsg_req = bsg_job->request;
389 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
390 uint32_t acb_type = 0;
391 uint32_t len = 0;
392 dma_addr_t acb_dma;
393 uint8_t *acb = NULL;
394 int rval = -EINVAL;
396 bsg_reply->reply_payload_rcv_len = 0;
398 if (unlikely(pci_channel_offline(ha->pdev)))
399 goto leave;
401 /* Only 4022 and above adapters are supported */
402 if (is_qla4010(ha))
403 goto leave;
405 if (ql4xxx_reset_active(ha)) {
406 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
407 rval = -EBUSY;
408 goto leave;
411 acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
412 len = bsg_job->reply_payload.payload_len;
413 if (len < sizeof(struct addr_ctrl_blk)) {
414 ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
415 __func__, len);
416 rval = -EINVAL;
417 goto leave;
420 acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
421 if (!acb) {
422 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
423 "data\n", __func__);
424 rval = -ENOMEM;
425 goto leave;
428 rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
429 if (rval) {
430 ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
431 bsg_reply->result = DID_ERROR << 16;
432 rval = -EIO;
433 } else {
434 bsg_reply->reply_payload_rcv_len =
435 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
436 bsg_job->reply_payload.sg_cnt,
437 acb, len);
438 bsg_reply->result = DID_OK << 16;
441 bsg_job_done(bsg_job, bsg_reply->result,
442 bsg_reply->reply_payload_rcv_len);
443 dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
444 leave:
445 return rval;
448 static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
450 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
451 struct scsi_qla_host *ha = to_qla_host(host);
452 struct iscsi_bsg_request *bsg_req = bsg_job->request;
453 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
454 uint8_t *rsp_ptr = NULL;
455 uint32_t mbox_cmd[MBOX_REG_COUNT];
456 uint32_t mbox_sts[MBOX_REG_COUNT];
457 int status = QLA_ERROR;
459 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
461 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
462 ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
463 __func__);
464 bsg_reply->result = DID_ERROR << 16;
465 goto exit_diag_mem_test;
468 bsg_reply->reply_payload_rcv_len = 0;
469 memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
470 sizeof(uint32_t) * MBOX_REG_COUNT);
472 DEBUG2(ql4_printk(KERN_INFO, ha,
473 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
474 __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
475 mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
476 mbox_cmd[7]));
478 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
479 &mbox_sts[0]);
481 DEBUG2(ql4_printk(KERN_INFO, ha,
482 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
483 __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
484 mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
485 mbox_sts[7]));
487 if (status == QLA_SUCCESS)
488 bsg_reply->result = DID_OK << 16;
489 else
490 bsg_reply->result = DID_ERROR << 16;
492 /* Send mbox_sts to application */
493 bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
494 rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
495 memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
497 exit_diag_mem_test:
498 DEBUG2(ql4_printk(KERN_INFO, ha,
499 "%s: bsg_reply->result = x%x, status = %s\n",
500 __func__, bsg_reply->result, STATUS(status)));
502 bsg_job_done(bsg_job, bsg_reply->result,
503 bsg_reply->reply_payload_rcv_len);
506 static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
507 int wait_for_link)
509 int status = QLA_SUCCESS;
511 if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
512 ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
513 __func__, ha->idc_extend_tmo);
514 if (ha->idc_extend_tmo) {
515 if (!wait_for_completion_timeout(&ha->idc_comp,
516 (ha->idc_extend_tmo * HZ))) {
517 ha->notify_idc_comp = 0;
518 ha->notify_link_up_comp = 0;
519 ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
520 __func__);
521 status = QLA_ERROR;
522 goto exit_wait;
523 } else {
524 DEBUG2(ql4_printk(KERN_INFO, ha,
525 "%s: IDC Complete notification received\n",
526 __func__));
529 } else {
530 DEBUG2(ql4_printk(KERN_INFO, ha,
531 "%s: IDC Complete notification received\n",
532 __func__));
534 ha->notify_idc_comp = 0;
536 if (wait_for_link) {
537 if (!wait_for_completion_timeout(&ha->link_up_comp,
538 (IDC_COMP_TOV * HZ))) {
539 ha->notify_link_up_comp = 0;
540 ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
541 __func__);
542 status = QLA_ERROR;
543 goto exit_wait;
544 } else {
545 DEBUG2(ql4_printk(KERN_INFO, ha,
546 "%s: LINK UP notification received\n",
547 __func__));
549 ha->notify_link_up_comp = 0;
552 exit_wait:
553 return status;
556 static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
557 uint32_t *mbox_cmd)
559 uint32_t config = 0;
560 int status = QLA_SUCCESS;
562 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
564 status = qla4_83xx_get_port_config(ha, &config);
565 if (status != QLA_SUCCESS)
566 goto exit_pre_loopback_config;
568 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
569 __func__, config));
571 if ((config & ENABLE_INTERNAL_LOOPBACK) ||
572 (config & ENABLE_EXTERNAL_LOOPBACK)) {
573 ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n",
574 __func__);
575 goto exit_pre_loopback_config;
578 if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
579 config |= ENABLE_INTERNAL_LOOPBACK;
581 if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
582 config |= ENABLE_EXTERNAL_LOOPBACK;
584 config &= ~ENABLE_DCBX;
586 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
587 __func__, config));
589 ha->notify_idc_comp = 1;
590 ha->notify_link_up_comp = 1;
592 /* get the link state */
593 qla4xxx_get_firmware_state(ha);
595 status = qla4_83xx_set_port_config(ha, &config);
596 if (status != QLA_SUCCESS) {
597 ha->notify_idc_comp = 0;
598 ha->notify_link_up_comp = 0;
599 goto exit_pre_loopback_config;
601 exit_pre_loopback_config:
602 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
603 STATUS(status)));
604 return status;
607 static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
608 uint32_t *mbox_cmd)
610 int status = QLA_SUCCESS;
611 uint32_t config = 0;
613 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
615 status = qla4_83xx_get_port_config(ha, &config);
616 if (status != QLA_SUCCESS)
617 goto exit_post_loopback_config;
619 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
620 config));
622 if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
623 config &= ~ENABLE_INTERNAL_LOOPBACK;
624 else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
625 config &= ~ENABLE_EXTERNAL_LOOPBACK;
627 config |= ENABLE_DCBX;
629 DEBUG2(ql4_printk(KERN_INFO, ha,
630 "%s: Restore default port config=%08X\n", __func__,
631 config));
633 ha->notify_idc_comp = 1;
634 if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
635 ha->notify_link_up_comp = 1;
637 status = qla4_83xx_set_port_config(ha, &config);
638 if (status != QLA_SUCCESS) {
639 ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
640 __func__);
641 set_bit(DPC_RESET_HA, &ha->dpc_flags);
642 clear_bit(AF_LOOPBACK, &ha->flags);
643 goto exit_post_loopback_config;
646 exit_post_loopback_config:
647 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
648 STATUS(status)));
649 return status;
652 static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
654 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
655 struct scsi_qla_host *ha = to_qla_host(host);
656 struct iscsi_bsg_request *bsg_req = bsg_job->request;
657 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
658 uint8_t *rsp_ptr = NULL;
659 uint32_t mbox_cmd[MBOX_REG_COUNT];
660 uint32_t mbox_sts[MBOX_REG_COUNT];
661 int wait_for_link = 1;
662 int status = QLA_ERROR;
664 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
666 bsg_reply->reply_payload_rcv_len = 0;
668 if (test_bit(AF_LOOPBACK, &ha->flags)) {
669 ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
670 __func__);
671 bsg_reply->result = DID_ERROR << 16;
672 goto exit_loopback_cmd;
675 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
676 ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
677 __func__);
678 bsg_reply->result = DID_ERROR << 16;
679 goto exit_loopback_cmd;
682 memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
683 sizeof(uint32_t) * MBOX_REG_COUNT);
685 if (is_qla8032(ha) || is_qla8042(ha)) {
686 status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
687 if (status != QLA_SUCCESS) {
688 bsg_reply->result = DID_ERROR << 16;
689 goto exit_loopback_cmd;
692 status = qla4_83xx_wait_for_loopback_config_comp(ha,
693 wait_for_link);
694 if (status != QLA_SUCCESS) {
695 bsg_reply->result = DID_TIME_OUT << 16;
696 goto restore;
700 DEBUG2(ql4_printk(KERN_INFO, ha,
701 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
702 __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
703 mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
704 mbox_cmd[7]));
706 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
707 &mbox_sts[0]);
709 if (status == QLA_SUCCESS)
710 bsg_reply->result = DID_OK << 16;
711 else
712 bsg_reply->result = DID_ERROR << 16;
714 DEBUG2(ql4_printk(KERN_INFO, ha,
715 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
716 __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
717 mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
718 mbox_sts[7]));
720 /* Send mbox_sts to application */
721 bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
722 rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
723 memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
724 restore:
725 if (is_qla8032(ha) || is_qla8042(ha)) {
726 status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
727 if (status != QLA_SUCCESS) {
728 bsg_reply->result = DID_ERROR << 16;
729 goto exit_loopback_cmd;
732 /* for pre_loopback_config() wait for LINK UP only
733 * if PHY LINK is UP */
734 if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
735 wait_for_link = 0;
737 status = qla4_83xx_wait_for_loopback_config_comp(ha,
738 wait_for_link);
739 if (status != QLA_SUCCESS) {
740 bsg_reply->result = DID_TIME_OUT << 16;
741 goto exit_loopback_cmd;
744 exit_loopback_cmd:
745 DEBUG2(ql4_printk(KERN_INFO, ha,
746 "%s: bsg_reply->result = x%x, status = %s\n",
747 __func__, bsg_reply->result, STATUS(status)));
748 bsg_job_done(bsg_job, bsg_reply->result,
749 bsg_reply->reply_payload_rcv_len);
752 static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
754 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
755 struct scsi_qla_host *ha = to_qla_host(host);
756 struct iscsi_bsg_request *bsg_req = bsg_job->request;
757 uint32_t diag_cmd;
758 int rval = -EINVAL;
760 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
762 diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
763 if (diag_cmd == MBOX_CMD_DIAG_TEST) {
764 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
765 case QL_DIAG_CMD_TEST_DDR_SIZE:
766 case QL_DIAG_CMD_TEST_DDR_RW:
767 case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
768 case QL_DIAG_CMD_TEST_NVRAM:
769 case QL_DIAG_CMD_TEST_FLASH_ROM:
770 case QL_DIAG_CMD_TEST_DMA_XFER:
771 case QL_DIAG_CMD_SELF_DDR_RW:
772 case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
773 /* Execute diag test for adapter RAM/FLASH */
774 ql4xxx_execute_diag_cmd(bsg_job);
775 /* Always return success as we want to sent bsg_reply
776 * to Application */
777 rval = QLA_SUCCESS;
778 break;
780 case QL_DIAG_CMD_TEST_INT_LOOPBACK:
781 case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
782 /* Execute diag test for Network */
783 qla4xxx_execute_diag_loopback_cmd(bsg_job);
784 /* Always return success as we want to sent bsg_reply
785 * to Application */
786 rval = QLA_SUCCESS;
787 break;
788 default:
789 ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
790 __func__,
791 bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
793 } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
794 (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
795 ql4xxx_execute_diag_cmd(bsg_job);
796 rval = QLA_SUCCESS;
797 } else {
798 ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
799 __func__, diag_cmd);
802 return rval;
806 * qla4xxx_process_vendor_specific - handle vendor specific bsg request
807 * @bsg_job: iscsi_bsg_job to handle
809 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
811 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
812 struct iscsi_bsg_request *bsg_req = bsg_job->request;
813 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
814 struct scsi_qla_host *ha = to_qla_host(host);
816 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
817 case QLISCSI_VND_READ_FLASH:
818 return qla4xxx_read_flash(bsg_job);
820 case QLISCSI_VND_UPDATE_FLASH:
821 return qla4xxx_update_flash(bsg_job);
823 case QLISCSI_VND_GET_ACB_STATE:
824 return qla4xxx_get_acb_state(bsg_job);
826 case QLISCSI_VND_READ_NVRAM:
827 return qla4xxx_read_nvram(bsg_job);
829 case QLISCSI_VND_UPDATE_NVRAM:
830 return qla4xxx_update_nvram(bsg_job);
832 case QLISCSI_VND_RESTORE_DEFAULTS:
833 return qla4xxx_restore_defaults(bsg_job);
835 case QLISCSI_VND_GET_ACB:
836 return qla4xxx_bsg_get_acb(bsg_job);
838 case QLISCSI_VND_DIAG_TEST:
839 return qla4xxx_execute_diag_test(bsg_job);
841 default:
842 ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
843 "0x%x\n", __func__, bsg_req->msgcode);
844 bsg_reply->result = (DID_ERROR << 16);
845 bsg_reply->reply_payload_rcv_len = 0;
846 bsg_job_done(bsg_job, bsg_reply->result,
847 bsg_reply->reply_payload_rcv_len);
848 return -ENOSYS;
853 * qla4xxx_bsg_request - handle bsg request from ISCSI transport
854 * @bsg_job: iscsi_bsg_job to handle
856 int qla4xxx_bsg_request(struct bsg_job *bsg_job)
858 struct iscsi_bsg_request *bsg_req = bsg_job->request;
859 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
860 struct scsi_qla_host *ha = to_qla_host(host);
862 switch (bsg_req->msgcode) {
863 case ISCSI_BSG_HST_VENDOR:
864 return qla4xxx_process_vendor_specific(bsg_job);
866 default:
867 ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
868 __func__, bsg_req->msgcode);
871 return -ENOSYS;