blk-mq: always free hctx after request queue is freed
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_bsg.c
blob17d42658ad9a6450edb03f33f147c6ef6e31b1fc
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 /* BSG support for ELS/CT pass through */
15 void
16 qla2x00_bsg_job_done(void *ptr, int res)
18 srb_t *sp = ptr;
19 struct bsg_job *bsg_job = sp->u.bsg_job;
20 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
22 bsg_reply->result = res;
23 bsg_job_done(bsg_job, bsg_reply->result,
24 bsg_reply->reply_payload_rcv_len);
25 sp->free(sp);
28 void
29 qla2x00_bsg_sp_free(void *ptr)
31 srb_t *sp = ptr;
32 struct qla_hw_data *ha = sp->vha->hw;
33 struct bsg_job *bsg_job = sp->u.bsg_job;
34 struct fc_bsg_request *bsg_request = bsg_job->request;
35 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
37 if (sp->type == SRB_FXIOCB_BCMD) {
38 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
39 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
41 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
42 dma_unmap_sg(&ha->pdev->dev,
43 bsg_job->request_payload.sg_list,
44 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
46 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
47 dma_unmap_sg(&ha->pdev->dev,
48 bsg_job->reply_payload.sg_list,
49 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
50 } else {
51 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
52 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
54 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
55 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
58 if (sp->type == SRB_CT_CMD ||
59 sp->type == SRB_FXIOCB_BCMD ||
60 sp->type == SRB_ELS_CMD_HST)
61 kfree(sp->fcport);
62 qla2x00_rel_sp(sp);
65 int
66 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
67 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
69 int i, ret, num_valid;
70 uint8_t *bcode;
71 struct qla_fcp_prio_entry *pri_entry;
72 uint32_t *bcode_val_ptr, bcode_val;
74 ret = 1;
75 num_valid = 0;
76 bcode = (uint8_t *)pri_cfg;
77 bcode_val_ptr = (uint32_t *)pri_cfg;
78 bcode_val = (uint32_t)(*bcode_val_ptr);
80 if (bcode_val == 0xFFFFFFFF) {
81 /* No FCP Priority config data in flash */
82 ql_dbg(ql_dbg_user, vha, 0x7051,
83 "No FCP Priority config data.\n");
84 return 0;
87 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
88 bcode[3] != 'S') {
89 /* Invalid FCP priority data header*/
90 ql_dbg(ql_dbg_user, vha, 0x7052,
91 "Invalid FCP Priority data header. bcode=0x%x.\n",
92 bcode_val);
93 return 0;
95 if (flag != 1)
96 return ret;
98 pri_entry = &pri_cfg->entry[0];
99 for (i = 0; i < pri_cfg->num_entries; i++) {
100 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
101 num_valid++;
102 pri_entry++;
105 if (num_valid == 0) {
106 /* No valid FCP priority data entries */
107 ql_dbg(ql_dbg_user, vha, 0x7053,
108 "No valid FCP Priority data entries.\n");
109 ret = 0;
110 } else {
111 /* FCP priority data is valid */
112 ql_dbg(ql_dbg_user, vha, 0x7054,
113 "Valid FCP priority data. num entries = %d.\n",
114 num_valid);
117 return ret;
120 static int
121 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
123 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
124 struct fc_bsg_request *bsg_request = bsg_job->request;
125 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
126 scsi_qla_host_t *vha = shost_priv(host);
127 struct qla_hw_data *ha = vha->hw;
128 int ret = 0;
129 uint32_t len;
130 uint32_t oper;
132 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
133 ret = -EINVAL;
134 goto exit_fcp_prio_cfg;
137 /* Get the sub command */
138 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
140 /* Only set config is allowed if config memory is not allocated */
141 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
142 ret = -EINVAL;
143 goto exit_fcp_prio_cfg;
145 switch (oper) {
146 case QLFC_FCP_PRIO_DISABLE:
147 if (ha->flags.fcp_prio_enabled) {
148 ha->flags.fcp_prio_enabled = 0;
149 ha->fcp_prio_cfg->attributes &=
150 ~FCP_PRIO_ATTR_ENABLE;
151 qla24xx_update_all_fcp_prio(vha);
152 bsg_reply->result = DID_OK;
153 } else {
154 ret = -EINVAL;
155 bsg_reply->result = (DID_ERROR << 16);
156 goto exit_fcp_prio_cfg;
158 break;
160 case QLFC_FCP_PRIO_ENABLE:
161 if (!ha->flags.fcp_prio_enabled) {
162 if (ha->fcp_prio_cfg) {
163 ha->flags.fcp_prio_enabled = 1;
164 ha->fcp_prio_cfg->attributes |=
165 FCP_PRIO_ATTR_ENABLE;
166 qla24xx_update_all_fcp_prio(vha);
167 bsg_reply->result = DID_OK;
168 } else {
169 ret = -EINVAL;
170 bsg_reply->result = (DID_ERROR << 16);
171 goto exit_fcp_prio_cfg;
174 break;
176 case QLFC_FCP_PRIO_GET_CONFIG:
177 len = bsg_job->reply_payload.payload_len;
178 if (!len || len > FCP_PRIO_CFG_SIZE) {
179 ret = -EINVAL;
180 bsg_reply->result = (DID_ERROR << 16);
181 goto exit_fcp_prio_cfg;
184 bsg_reply->result = DID_OK;
185 bsg_reply->reply_payload_rcv_len =
186 sg_copy_from_buffer(
187 bsg_job->reply_payload.sg_list,
188 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
189 len);
191 break;
193 case QLFC_FCP_PRIO_SET_CONFIG:
194 len = bsg_job->request_payload.payload_len;
195 if (!len || len > FCP_PRIO_CFG_SIZE) {
196 bsg_reply->result = (DID_ERROR << 16);
197 ret = -EINVAL;
198 goto exit_fcp_prio_cfg;
201 if (!ha->fcp_prio_cfg) {
202 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
203 if (!ha->fcp_prio_cfg) {
204 ql_log(ql_log_warn, vha, 0x7050,
205 "Unable to allocate memory for fcp prio "
206 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
207 bsg_reply->result = (DID_ERROR << 16);
208 ret = -ENOMEM;
209 goto exit_fcp_prio_cfg;
213 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
214 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
215 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
216 FCP_PRIO_CFG_SIZE);
218 /* validate fcp priority data */
220 if (!qla24xx_fcp_prio_cfg_valid(vha,
221 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
222 bsg_reply->result = (DID_ERROR << 16);
223 ret = -EINVAL;
224 /* If buffer was invalidatic int
225 * fcp_prio_cfg is of no use
227 vfree(ha->fcp_prio_cfg);
228 ha->fcp_prio_cfg = NULL;
229 goto exit_fcp_prio_cfg;
232 ha->flags.fcp_prio_enabled = 0;
233 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
234 ha->flags.fcp_prio_enabled = 1;
235 qla24xx_update_all_fcp_prio(vha);
236 bsg_reply->result = DID_OK;
237 break;
238 default:
239 ret = -EINVAL;
240 break;
242 exit_fcp_prio_cfg:
243 if (!ret)
244 bsg_job_done(bsg_job, bsg_reply->result,
245 bsg_reply->reply_payload_rcv_len);
246 return ret;
249 static int
250 qla2x00_process_els(struct bsg_job *bsg_job)
252 struct fc_bsg_request *bsg_request = bsg_job->request;
253 struct fc_rport *rport;
254 fc_port_t *fcport = NULL;
255 struct Scsi_Host *host;
256 scsi_qla_host_t *vha;
257 struct qla_hw_data *ha;
258 srb_t *sp;
259 const char *type;
260 int req_sg_cnt, rsp_sg_cnt;
261 int rval = (DRIVER_ERROR << 16);
262 uint16_t nextlid = 0;
264 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
265 rport = fc_bsg_to_rport(bsg_job);
266 fcport = *(fc_port_t **) rport->dd_data;
267 host = rport_to_shost(rport);
268 vha = shost_priv(host);
269 ha = vha->hw;
270 type = "FC_BSG_RPT_ELS";
271 } else {
272 host = fc_bsg_to_shost(bsg_job);
273 vha = shost_priv(host);
274 ha = vha->hw;
275 type = "FC_BSG_HST_ELS_NOLOGIN";
278 if (!vha->flags.online) {
279 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
280 rval = -EIO;
281 goto done;
284 /* pass through is supported only for ISP 4Gb or higher */
285 if (!IS_FWI2_CAPABLE(ha)) {
286 ql_dbg(ql_dbg_user, vha, 0x7001,
287 "ELS passthru not supported for ISP23xx based adapters.\n");
288 rval = -EPERM;
289 goto done;
292 /* Multiple SG's are not supported for ELS requests */
293 if (bsg_job->request_payload.sg_cnt > 1 ||
294 bsg_job->reply_payload.sg_cnt > 1) {
295 ql_dbg(ql_dbg_user, vha, 0x7002,
296 "Multiple SG's are not supported for ELS requests, "
297 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
298 bsg_job->request_payload.sg_cnt,
299 bsg_job->reply_payload.sg_cnt);
300 rval = -EPERM;
301 goto done;
304 /* ELS request for rport */
305 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
306 /* make sure the rport is logged in,
307 * if not perform fabric login
309 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
310 ql_dbg(ql_dbg_user, vha, 0x7003,
311 "Failed to login port %06X for ELS passthru.\n",
312 fcport->d_id.b24);
313 rval = -EIO;
314 goto done;
316 } else {
317 /* Allocate a dummy fcport structure, since functions
318 * preparing the IOCB and mailbox command retrieves port
319 * specific information from fcport structure. For Host based
320 * ELS commands there will be no fcport structure allocated
322 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
323 if (!fcport) {
324 rval = -ENOMEM;
325 goto done;
328 /* Initialize all required fields of fcport */
329 fcport->vha = vha;
330 fcport->d_id.b.al_pa =
331 bsg_request->rqst_data.h_els.port_id[0];
332 fcport->d_id.b.area =
333 bsg_request->rqst_data.h_els.port_id[1];
334 fcport->d_id.b.domain =
335 bsg_request->rqst_data.h_els.port_id[2];
336 fcport->loop_id =
337 (fcport->d_id.b.al_pa == 0xFD) ?
338 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
341 req_sg_cnt =
342 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
343 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
344 if (!req_sg_cnt) {
345 rval = -ENOMEM;
346 goto done_free_fcport;
349 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
350 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
351 if (!rsp_sg_cnt) {
352 rval = -ENOMEM;
353 goto done_free_fcport;
356 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
357 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
358 ql_log(ql_log_warn, vha, 0x7008,
359 "dma mapping resulted in different sg counts, "
360 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
361 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
362 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
363 rval = -EAGAIN;
364 goto done_unmap_sg;
367 /* Alloc SRB structure */
368 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
369 if (!sp) {
370 rval = -ENOMEM;
371 goto done_unmap_sg;
374 sp->type =
375 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
376 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
377 sp->name =
378 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
379 "bsg_els_rpt" : "bsg_els_hst");
380 sp->u.bsg_job = bsg_job;
381 sp->free = qla2x00_bsg_sp_free;
382 sp->done = qla2x00_bsg_job_done;
384 ql_dbg(ql_dbg_user, vha, 0x700a,
385 "bsg rqst type: %s els type: %x - loop-id=%x "
386 "portid=%-2x%02x%02x.\n", type,
387 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
388 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
390 rval = qla2x00_start_sp(sp);
391 if (rval != QLA_SUCCESS) {
392 ql_log(ql_log_warn, vha, 0x700e,
393 "qla2x00_start_sp failed = %d\n", rval);
394 qla2x00_rel_sp(sp);
395 rval = -EIO;
396 goto done_unmap_sg;
398 return rval;
400 done_unmap_sg:
401 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
402 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
403 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
404 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
405 goto done_free_fcport;
407 done_free_fcport:
408 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
409 kfree(fcport);
410 done:
411 return rval;
414 static inline uint16_t
415 qla24xx_calc_ct_iocbs(uint16_t dsds)
417 uint16_t iocbs;
419 iocbs = 1;
420 if (dsds > 2) {
421 iocbs += (dsds - 2) / 5;
422 if ((dsds - 2) % 5)
423 iocbs++;
425 return iocbs;
428 static int
429 qla2x00_process_ct(struct bsg_job *bsg_job)
431 srb_t *sp;
432 struct fc_bsg_request *bsg_request = bsg_job->request;
433 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
434 scsi_qla_host_t *vha = shost_priv(host);
435 struct qla_hw_data *ha = vha->hw;
436 int rval = (DRIVER_ERROR << 16);
437 int req_sg_cnt, rsp_sg_cnt;
438 uint16_t loop_id;
439 struct fc_port *fcport;
440 char *type = "FC_BSG_HST_CT";
442 req_sg_cnt =
443 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
444 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
445 if (!req_sg_cnt) {
446 ql_log(ql_log_warn, vha, 0x700f,
447 "dma_map_sg return %d for request\n", req_sg_cnt);
448 rval = -ENOMEM;
449 goto done;
452 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
453 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
454 if (!rsp_sg_cnt) {
455 ql_log(ql_log_warn, vha, 0x7010,
456 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
457 rval = -ENOMEM;
458 goto done;
461 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
462 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
463 ql_log(ql_log_warn, vha, 0x7011,
464 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
465 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
466 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
467 rval = -EAGAIN;
468 goto done_unmap_sg;
471 if (!vha->flags.online) {
472 ql_log(ql_log_warn, vha, 0x7012,
473 "Host is not online.\n");
474 rval = -EIO;
475 goto done_unmap_sg;
478 loop_id =
479 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
480 >> 24;
481 switch (loop_id) {
482 case 0xFC:
483 loop_id = cpu_to_le16(NPH_SNS);
484 break;
485 case 0xFA:
486 loop_id = vha->mgmt_svr_loop_id;
487 break;
488 default:
489 ql_dbg(ql_dbg_user, vha, 0x7013,
490 "Unknown loop id: %x.\n", loop_id);
491 rval = -EINVAL;
492 goto done_unmap_sg;
495 /* Allocate a dummy fcport structure, since functions preparing the
496 * IOCB and mailbox command retrieves port specific information
497 * from fcport structure. For Host based ELS commands there will be
498 * no fcport structure allocated
500 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
501 if (!fcport) {
502 ql_log(ql_log_warn, vha, 0x7014,
503 "Failed to allocate fcport.\n");
504 rval = -ENOMEM;
505 goto done_unmap_sg;
508 /* Initialize all required fields of fcport */
509 fcport->vha = vha;
510 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
511 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
512 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
513 fcport->loop_id = loop_id;
515 /* Alloc SRB structure */
516 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
517 if (!sp) {
518 ql_log(ql_log_warn, vha, 0x7015,
519 "qla2x00_get_sp failed.\n");
520 rval = -ENOMEM;
521 goto done_free_fcport;
524 sp->type = SRB_CT_CMD;
525 sp->name = "bsg_ct";
526 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
527 sp->u.bsg_job = bsg_job;
528 sp->free = qla2x00_bsg_sp_free;
529 sp->done = qla2x00_bsg_job_done;
531 ql_dbg(ql_dbg_user, vha, 0x7016,
532 "bsg rqst type: %s else type: %x - "
533 "loop-id=%x portid=%02x%02x%02x.\n", type,
534 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
535 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
536 fcport->d_id.b.al_pa);
538 rval = qla2x00_start_sp(sp);
539 if (rval != QLA_SUCCESS) {
540 ql_log(ql_log_warn, vha, 0x7017,
541 "qla2x00_start_sp failed=%d.\n", rval);
542 qla2x00_rel_sp(sp);
543 rval = -EIO;
544 goto done_free_fcport;
546 return rval;
548 done_free_fcport:
549 kfree(fcport);
550 done_unmap_sg:
551 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
552 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
553 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
554 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
555 done:
556 return rval;
559 /* Disable loopback mode */
560 static inline int
561 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
562 int wait, int wait2)
564 int ret = 0;
565 int rval = 0;
566 uint16_t new_config[4];
567 struct qla_hw_data *ha = vha->hw;
569 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
570 goto done_reset_internal;
572 memset(new_config, 0 , sizeof(new_config));
573 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
574 ENABLE_INTERNAL_LOOPBACK ||
575 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
576 ENABLE_EXTERNAL_LOOPBACK) {
577 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
578 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
579 (new_config[0] & INTERNAL_LOOPBACK_MASK));
580 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
582 ha->notify_dcbx_comp = wait;
583 ha->notify_lb_portup_comp = wait2;
585 ret = qla81xx_set_port_config(vha, new_config);
586 if (ret != QLA_SUCCESS) {
587 ql_log(ql_log_warn, vha, 0x7025,
588 "Set port config failed.\n");
589 ha->notify_dcbx_comp = 0;
590 ha->notify_lb_portup_comp = 0;
591 rval = -EINVAL;
592 goto done_reset_internal;
595 /* Wait for DCBX complete event */
596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
597 (DCBX_COMP_TIMEOUT * HZ))) {
598 ql_dbg(ql_dbg_user, vha, 0x7026,
599 "DCBX completion not received.\n");
600 ha->notify_dcbx_comp = 0;
601 ha->notify_lb_portup_comp = 0;
602 rval = -EINVAL;
603 goto done_reset_internal;
604 } else
605 ql_dbg(ql_dbg_user, vha, 0x7027,
606 "DCBX completion received.\n");
608 if (wait2 &&
609 !wait_for_completion_timeout(&ha->lb_portup_comp,
610 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
611 ql_dbg(ql_dbg_user, vha, 0x70c5,
612 "Port up completion not received.\n");
613 ha->notify_lb_portup_comp = 0;
614 rval = -EINVAL;
615 goto done_reset_internal;
616 } else
617 ql_dbg(ql_dbg_user, vha, 0x70c6,
618 "Port up completion received.\n");
620 ha->notify_dcbx_comp = 0;
621 ha->notify_lb_portup_comp = 0;
623 done_reset_internal:
624 return rval;
628 * Set the port configuration to enable the internal or external loopback
629 * depending on the loopback mode.
631 static inline int
632 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
633 uint16_t *new_config, uint16_t mode)
635 int ret = 0;
636 int rval = 0;
637 unsigned long rem_tmo = 0, current_tmo = 0;
638 struct qla_hw_data *ha = vha->hw;
640 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
641 goto done_set_internal;
643 if (mode == INTERNAL_LOOPBACK)
644 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
645 else if (mode == EXTERNAL_LOOPBACK)
646 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
647 ql_dbg(ql_dbg_user, vha, 0x70be,
648 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
650 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
652 ha->notify_dcbx_comp = 1;
653 ret = qla81xx_set_port_config(vha, new_config);
654 if (ret != QLA_SUCCESS) {
655 ql_log(ql_log_warn, vha, 0x7021,
656 "set port config failed.\n");
657 ha->notify_dcbx_comp = 0;
658 rval = -EINVAL;
659 goto done_set_internal;
662 /* Wait for DCBX complete event */
663 current_tmo = DCBX_COMP_TIMEOUT * HZ;
664 while (1) {
665 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
666 current_tmo);
667 if (!ha->idc_extend_tmo || rem_tmo) {
668 ha->idc_extend_tmo = 0;
669 break;
671 current_tmo = ha->idc_extend_tmo * HZ;
672 ha->idc_extend_tmo = 0;
675 if (!rem_tmo) {
676 ql_dbg(ql_dbg_user, vha, 0x7022,
677 "DCBX completion not received.\n");
678 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
680 * If the reset of the loopback mode doesn't work take a FCoE
681 * dump and reset the chip.
683 if (ret) {
684 ha->isp_ops->fw_dump(vha, 0);
685 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
687 rval = -EINVAL;
688 } else {
689 if (ha->flags.idc_compl_status) {
690 ql_dbg(ql_dbg_user, vha, 0x70c3,
691 "Bad status in IDC Completion AEN\n");
692 rval = -EINVAL;
693 ha->flags.idc_compl_status = 0;
694 } else
695 ql_dbg(ql_dbg_user, vha, 0x7023,
696 "DCBX completion received.\n");
699 ha->notify_dcbx_comp = 0;
700 ha->idc_extend_tmo = 0;
702 done_set_internal:
703 return rval;
706 static int
707 qla2x00_process_loopback(struct bsg_job *bsg_job)
709 struct fc_bsg_request *bsg_request = bsg_job->request;
710 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
711 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
712 scsi_qla_host_t *vha = shost_priv(host);
713 struct qla_hw_data *ha = vha->hw;
714 int rval;
715 uint8_t command_sent;
716 char *type;
717 struct msg_echo_lb elreq;
718 uint16_t response[MAILBOX_REGISTER_COUNT];
719 uint16_t config[4], new_config[4];
720 uint8_t *fw_sts_ptr;
721 uint8_t *req_data = NULL;
722 dma_addr_t req_data_dma;
723 uint32_t req_data_len;
724 uint8_t *rsp_data = NULL;
725 dma_addr_t rsp_data_dma;
726 uint32_t rsp_data_len;
728 if (!vha->flags.online) {
729 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
730 return -EIO;
733 memset(&elreq, 0, sizeof(elreq));
735 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
736 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
737 DMA_TO_DEVICE);
739 if (!elreq.req_sg_cnt) {
740 ql_log(ql_log_warn, vha, 0x701a,
741 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
742 return -ENOMEM;
745 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
746 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
747 DMA_FROM_DEVICE);
749 if (!elreq.rsp_sg_cnt) {
750 ql_log(ql_log_warn, vha, 0x701b,
751 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
752 rval = -ENOMEM;
753 goto done_unmap_req_sg;
756 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
757 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
758 ql_log(ql_log_warn, vha, 0x701c,
759 "dma mapping resulted in different sg counts, "
760 "request_sg_cnt: %x dma_request_sg_cnt: %x "
761 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
762 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
763 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
764 rval = -EAGAIN;
765 goto done_unmap_sg;
767 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
768 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
769 &req_data_dma, GFP_KERNEL);
770 if (!req_data) {
771 ql_log(ql_log_warn, vha, 0x701d,
772 "dma alloc failed for req_data.\n");
773 rval = -ENOMEM;
774 goto done_unmap_sg;
777 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
778 &rsp_data_dma, GFP_KERNEL);
779 if (!rsp_data) {
780 ql_log(ql_log_warn, vha, 0x7004,
781 "dma alloc failed for rsp_data.\n");
782 rval = -ENOMEM;
783 goto done_free_dma_req;
786 /* Copy the request buffer in req_data now */
787 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
788 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
790 elreq.send_dma = req_data_dma;
791 elreq.rcv_dma = rsp_data_dma;
792 elreq.transfer_size = req_data_len;
794 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
795 elreq.iteration_count =
796 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
798 if (atomic_read(&vha->loop_state) == LOOP_READY &&
799 (ha->current_topology == ISP_CFG_F ||
800 (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
801 req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
802 elreq.options == EXTERNAL_LOOPBACK) {
803 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
804 ql_dbg(ql_dbg_user, vha, 0x701e,
805 "BSG request type: %s.\n", type);
806 command_sent = INT_DEF_LB_ECHO_CMD;
807 rval = qla2x00_echo_test(vha, &elreq, response);
808 } else {
809 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
810 memset(config, 0, sizeof(config));
811 memset(new_config, 0, sizeof(new_config));
813 if (qla81xx_get_port_config(vha, config)) {
814 ql_log(ql_log_warn, vha, 0x701f,
815 "Get port config failed.\n");
816 rval = -EPERM;
817 goto done_free_dma_rsp;
820 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
821 ql_dbg(ql_dbg_user, vha, 0x70c4,
822 "Loopback operation already in "
823 "progress.\n");
824 rval = -EAGAIN;
825 goto done_free_dma_rsp;
828 ql_dbg(ql_dbg_user, vha, 0x70c0,
829 "elreq.options=%04x\n", elreq.options);
831 if (elreq.options == EXTERNAL_LOOPBACK)
832 if (IS_QLA8031(ha) || IS_QLA8044(ha))
833 rval = qla81xx_set_loopback_mode(vha,
834 config, new_config, elreq.options);
835 else
836 rval = qla81xx_reset_loopback_mode(vha,
837 config, 1, 0);
838 else
839 rval = qla81xx_set_loopback_mode(vha, config,
840 new_config, elreq.options);
842 if (rval) {
843 rval = -EPERM;
844 goto done_free_dma_rsp;
847 type = "FC_BSG_HST_VENDOR_LOOPBACK";
848 ql_dbg(ql_dbg_user, vha, 0x7028,
849 "BSG request type: %s.\n", type);
851 command_sent = INT_DEF_LB_LOOPBACK_CMD;
852 rval = qla2x00_loopback_test(vha, &elreq, response);
854 if (response[0] == MBS_COMMAND_ERROR &&
855 response[1] == MBS_LB_RESET) {
856 ql_log(ql_log_warn, vha, 0x7029,
857 "MBX command error, Aborting ISP.\n");
858 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
859 qla2xxx_wake_dpc(vha);
860 qla2x00_wait_for_chip_reset(vha);
861 /* Also reset the MPI */
862 if (IS_QLA81XX(ha)) {
863 if (qla81xx_restart_mpi_firmware(vha) !=
864 QLA_SUCCESS) {
865 ql_log(ql_log_warn, vha, 0x702a,
866 "MPI reset failed.\n");
870 rval = -EIO;
871 goto done_free_dma_rsp;
874 if (new_config[0]) {
875 int ret;
877 /* Revert back to original port config
878 * Also clear internal loopback
880 ret = qla81xx_reset_loopback_mode(vha,
881 new_config, 0, 1);
882 if (ret) {
884 * If the reset of the loopback mode
885 * doesn't work take FCoE dump and then
886 * reset the chip.
888 ha->isp_ops->fw_dump(vha, 0);
889 set_bit(ISP_ABORT_NEEDED,
890 &vha->dpc_flags);
895 } else {
896 type = "FC_BSG_HST_VENDOR_LOOPBACK";
897 ql_dbg(ql_dbg_user, vha, 0x702b,
898 "BSG request type: %s.\n", type);
899 command_sent = INT_DEF_LB_LOOPBACK_CMD;
900 rval = qla2x00_loopback_test(vha, &elreq, response);
904 if (rval) {
905 ql_log(ql_log_warn, vha, 0x702c,
906 "Vendor request %s failed.\n", type);
908 rval = 0;
909 bsg_reply->result = (DID_ERROR << 16);
910 bsg_reply->reply_payload_rcv_len = 0;
911 } else {
912 ql_dbg(ql_dbg_user, vha, 0x702d,
913 "Vendor request %s completed.\n", type);
914 bsg_reply->result = (DID_OK << 16);
915 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
916 bsg_job->reply_payload.sg_cnt, rsp_data,
917 rsp_data_len);
920 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
921 sizeof(response) + sizeof(uint8_t);
922 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
923 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
924 sizeof(response));
925 fw_sts_ptr += sizeof(response);
926 *fw_sts_ptr = command_sent;
928 done_free_dma_rsp:
929 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
930 rsp_data, rsp_data_dma);
931 done_free_dma_req:
932 dma_free_coherent(&ha->pdev->dev, req_data_len,
933 req_data, req_data_dma);
934 done_unmap_sg:
935 dma_unmap_sg(&ha->pdev->dev,
936 bsg_job->reply_payload.sg_list,
937 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
938 done_unmap_req_sg:
939 dma_unmap_sg(&ha->pdev->dev,
940 bsg_job->request_payload.sg_list,
941 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
942 if (!rval)
943 bsg_job_done(bsg_job, bsg_reply->result,
944 bsg_reply->reply_payload_rcv_len);
945 return rval;
948 static int
949 qla84xx_reset(struct bsg_job *bsg_job)
951 struct fc_bsg_request *bsg_request = bsg_job->request;
952 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
953 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
954 scsi_qla_host_t *vha = shost_priv(host);
955 struct qla_hw_data *ha = vha->hw;
956 int rval = 0;
957 uint32_t flag;
959 if (!IS_QLA84XX(ha)) {
960 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
961 return -EINVAL;
964 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
966 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
968 if (rval) {
969 ql_log(ql_log_warn, vha, 0x7030,
970 "Vendor request 84xx reset failed.\n");
971 rval = (DID_ERROR << 16);
973 } else {
974 ql_dbg(ql_dbg_user, vha, 0x7031,
975 "Vendor request 84xx reset completed.\n");
976 bsg_reply->result = DID_OK;
977 bsg_job_done(bsg_job, bsg_reply->result,
978 bsg_reply->reply_payload_rcv_len);
981 return rval;
984 static int
985 qla84xx_updatefw(struct bsg_job *bsg_job)
987 struct fc_bsg_request *bsg_request = bsg_job->request;
988 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
989 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
990 scsi_qla_host_t *vha = shost_priv(host);
991 struct qla_hw_data *ha = vha->hw;
992 struct verify_chip_entry_84xx *mn = NULL;
993 dma_addr_t mn_dma, fw_dma;
994 void *fw_buf = NULL;
995 int rval = 0;
996 uint32_t sg_cnt;
997 uint32_t data_len;
998 uint16_t options;
999 uint32_t flag;
1000 uint32_t fw_ver;
1002 if (!IS_QLA84XX(ha)) {
1003 ql_dbg(ql_dbg_user, vha, 0x7032,
1004 "Not 84xx, exiting.\n");
1005 return -EINVAL;
1008 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1009 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1010 if (!sg_cnt) {
1011 ql_log(ql_log_warn, vha, 0x7033,
1012 "dma_map_sg returned %d for request.\n", sg_cnt);
1013 return -ENOMEM;
1016 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1017 ql_log(ql_log_warn, vha, 0x7034,
1018 "DMA mapping resulted in different sg counts, "
1019 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1020 bsg_job->request_payload.sg_cnt, sg_cnt);
1021 rval = -EAGAIN;
1022 goto done_unmap_sg;
1025 data_len = bsg_job->request_payload.payload_len;
1026 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1027 &fw_dma, GFP_KERNEL);
1028 if (!fw_buf) {
1029 ql_log(ql_log_warn, vha, 0x7035,
1030 "DMA alloc failed for fw_buf.\n");
1031 rval = -ENOMEM;
1032 goto done_unmap_sg;
1035 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1036 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1038 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1039 if (!mn) {
1040 ql_log(ql_log_warn, vha, 0x7036,
1041 "DMA alloc failed for fw buffer.\n");
1042 rval = -ENOMEM;
1043 goto done_free_fw_buf;
1046 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1047 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1049 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1050 mn->entry_count = 1;
1052 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1053 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1054 options |= VCO_DIAG_FW;
1056 mn->options = cpu_to_le16(options);
1057 mn->fw_ver = cpu_to_le32(fw_ver);
1058 mn->fw_size = cpu_to_le32(data_len);
1059 mn->fw_seq_size = cpu_to_le32(data_len);
1060 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1061 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1062 mn->dseg_length = cpu_to_le32(data_len);
1063 mn->data_seg_cnt = cpu_to_le16(1);
1065 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1067 if (rval) {
1068 ql_log(ql_log_warn, vha, 0x7037,
1069 "Vendor request 84xx updatefw failed.\n");
1071 rval = (DID_ERROR << 16);
1072 } else {
1073 ql_dbg(ql_dbg_user, vha, 0x7038,
1074 "Vendor request 84xx updatefw completed.\n");
1076 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1077 bsg_reply->result = DID_OK;
1080 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1082 done_free_fw_buf:
1083 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1085 done_unmap_sg:
1086 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1087 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1089 if (!rval)
1090 bsg_job_done(bsg_job, bsg_reply->result,
1091 bsg_reply->reply_payload_rcv_len);
1092 return rval;
1095 static int
1096 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1098 struct fc_bsg_request *bsg_request = bsg_job->request;
1099 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1100 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1101 scsi_qla_host_t *vha = shost_priv(host);
1102 struct qla_hw_data *ha = vha->hw;
1103 struct access_chip_84xx *mn = NULL;
1104 dma_addr_t mn_dma, mgmt_dma;
1105 void *mgmt_b = NULL;
1106 int rval = 0;
1107 struct qla_bsg_a84_mgmt *ql84_mgmt;
1108 uint32_t sg_cnt;
1109 uint32_t data_len = 0;
1110 uint32_t dma_direction = DMA_NONE;
1112 if (!IS_QLA84XX(ha)) {
1113 ql_log(ql_log_warn, vha, 0x703a,
1114 "Not 84xx, exiting.\n");
1115 return -EINVAL;
1118 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1119 if (!mn) {
1120 ql_log(ql_log_warn, vha, 0x703c,
1121 "DMA alloc failed for fw buffer.\n");
1122 return -ENOMEM;
1125 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1126 mn->entry_count = 1;
1127 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1128 switch (ql84_mgmt->mgmt.cmd) {
1129 case QLA84_MGMT_READ_MEM:
1130 case QLA84_MGMT_GET_INFO:
1131 sg_cnt = dma_map_sg(&ha->pdev->dev,
1132 bsg_job->reply_payload.sg_list,
1133 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1134 if (!sg_cnt) {
1135 ql_log(ql_log_warn, vha, 0x703d,
1136 "dma_map_sg returned %d for reply.\n", sg_cnt);
1137 rval = -ENOMEM;
1138 goto exit_mgmt;
1141 dma_direction = DMA_FROM_DEVICE;
1143 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1144 ql_log(ql_log_warn, vha, 0x703e,
1145 "DMA mapping resulted in different sg counts, "
1146 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1147 bsg_job->reply_payload.sg_cnt, sg_cnt);
1148 rval = -EAGAIN;
1149 goto done_unmap_sg;
1152 data_len = bsg_job->reply_payload.payload_len;
1154 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1155 &mgmt_dma, GFP_KERNEL);
1156 if (!mgmt_b) {
1157 ql_log(ql_log_warn, vha, 0x703f,
1158 "DMA alloc failed for mgmt_b.\n");
1159 rval = -ENOMEM;
1160 goto done_unmap_sg;
1163 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1164 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1165 mn->parameter1 =
1166 cpu_to_le32(
1167 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1169 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1170 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1171 mn->parameter1 =
1172 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1174 mn->parameter2 =
1175 cpu_to_le32(
1176 ql84_mgmt->mgmt.mgmtp.u.info.context);
1178 break;
1180 case QLA84_MGMT_WRITE_MEM:
1181 sg_cnt = dma_map_sg(&ha->pdev->dev,
1182 bsg_job->request_payload.sg_list,
1183 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1185 if (!sg_cnt) {
1186 ql_log(ql_log_warn, vha, 0x7040,
1187 "dma_map_sg returned %d.\n", sg_cnt);
1188 rval = -ENOMEM;
1189 goto exit_mgmt;
1192 dma_direction = DMA_TO_DEVICE;
1194 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1195 ql_log(ql_log_warn, vha, 0x7041,
1196 "DMA mapping resulted in different sg counts, "
1197 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1198 bsg_job->request_payload.sg_cnt, sg_cnt);
1199 rval = -EAGAIN;
1200 goto done_unmap_sg;
1203 data_len = bsg_job->request_payload.payload_len;
1204 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1205 &mgmt_dma, GFP_KERNEL);
1206 if (!mgmt_b) {
1207 ql_log(ql_log_warn, vha, 0x7042,
1208 "DMA alloc failed for mgmt_b.\n");
1209 rval = -ENOMEM;
1210 goto done_unmap_sg;
1213 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1214 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1216 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1217 mn->parameter1 =
1218 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1219 break;
1221 case QLA84_MGMT_CHNG_CONFIG:
1222 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1223 mn->parameter1 =
1224 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1226 mn->parameter2 =
1227 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1229 mn->parameter3 =
1230 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1231 break;
1233 default:
1234 rval = -EIO;
1235 goto exit_mgmt;
1238 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1239 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1240 mn->dseg_count = cpu_to_le16(1);
1241 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1242 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1243 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1246 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1248 if (rval) {
1249 ql_log(ql_log_warn, vha, 0x7043,
1250 "Vendor request 84xx mgmt failed.\n");
1252 rval = (DID_ERROR << 16);
1254 } else {
1255 ql_dbg(ql_dbg_user, vha, 0x7044,
1256 "Vendor request 84xx mgmt completed.\n");
1258 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1259 bsg_reply->result = DID_OK;
1261 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1262 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1263 bsg_reply->reply_payload_rcv_len =
1264 bsg_job->reply_payload.payload_len;
1266 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1267 bsg_job->reply_payload.sg_cnt, mgmt_b,
1268 data_len);
1272 done_unmap_sg:
1273 if (mgmt_b)
1274 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1276 if (dma_direction == DMA_TO_DEVICE)
1277 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1278 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1279 else if (dma_direction == DMA_FROM_DEVICE)
1280 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1281 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1283 exit_mgmt:
1284 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1286 if (!rval)
1287 bsg_job_done(bsg_job, bsg_reply->result,
1288 bsg_reply->reply_payload_rcv_len);
1289 return rval;
1292 static int
1293 qla24xx_iidma(struct bsg_job *bsg_job)
1295 struct fc_bsg_request *bsg_request = bsg_job->request;
1296 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1297 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1298 scsi_qla_host_t *vha = shost_priv(host);
1299 int rval = 0;
1300 struct qla_port_param *port_param = NULL;
1301 fc_port_t *fcport = NULL;
1302 int found = 0;
1303 uint16_t mb[MAILBOX_REGISTER_COUNT];
1304 uint8_t *rsp_ptr = NULL;
1306 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1307 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1308 return -EINVAL;
1311 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1312 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1313 ql_log(ql_log_warn, vha, 0x7048,
1314 "Invalid destination type.\n");
1315 return -EINVAL;
1318 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1319 if (fcport->port_type != FCT_TARGET)
1320 continue;
1322 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1323 fcport->port_name, sizeof(fcport->port_name)))
1324 continue;
1326 found = 1;
1327 break;
1330 if (!found) {
1331 ql_log(ql_log_warn, vha, 0x7049,
1332 "Failed to find port.\n");
1333 return -EINVAL;
1336 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1337 ql_log(ql_log_warn, vha, 0x704a,
1338 "Port is not online.\n");
1339 return -EINVAL;
1342 if (fcport->flags & FCF_LOGIN_NEEDED) {
1343 ql_log(ql_log_warn, vha, 0x704b,
1344 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1345 return -EINVAL;
1348 if (port_param->mode)
1349 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1350 port_param->speed, mb);
1351 else
1352 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1353 &port_param->speed, mb);
1355 if (rval) {
1356 ql_log(ql_log_warn, vha, 0x704c,
1357 "iIDMA cmd failed for %8phN -- "
1358 "%04x %x %04x %04x.\n", fcport->port_name,
1359 rval, fcport->fp_speed, mb[0], mb[1]);
1360 rval = (DID_ERROR << 16);
1361 } else {
1362 if (!port_param->mode) {
1363 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1364 sizeof(struct qla_port_param);
1366 rsp_ptr = ((uint8_t *)bsg_reply) +
1367 sizeof(struct fc_bsg_reply);
1369 memcpy(rsp_ptr, port_param,
1370 sizeof(struct qla_port_param));
1373 bsg_reply->result = DID_OK;
1374 bsg_job_done(bsg_job, bsg_reply->result,
1375 bsg_reply->reply_payload_rcv_len);
1378 return rval;
1381 static int
1382 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1383 uint8_t is_update)
1385 struct fc_bsg_request *bsg_request = bsg_job->request;
1386 uint32_t start = 0;
1387 int valid = 0;
1388 struct qla_hw_data *ha = vha->hw;
1390 if (unlikely(pci_channel_offline(ha->pdev)))
1391 return -EINVAL;
1393 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1394 if (start > ha->optrom_size) {
1395 ql_log(ql_log_warn, vha, 0x7055,
1396 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1397 return -EINVAL;
1400 if (ha->optrom_state != QLA_SWAITING) {
1401 ql_log(ql_log_info, vha, 0x7056,
1402 "optrom_state %d.\n", ha->optrom_state);
1403 return -EBUSY;
1406 ha->optrom_region_start = start;
1407 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1408 if (is_update) {
1409 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1410 valid = 1;
1411 else if (start == (ha->flt_region_boot * 4) ||
1412 start == (ha->flt_region_fw * 4))
1413 valid = 1;
1414 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1415 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1416 valid = 1;
1417 if (!valid) {
1418 ql_log(ql_log_warn, vha, 0x7058,
1419 "Invalid start region 0x%x/0x%x.\n", start,
1420 bsg_job->request_payload.payload_len);
1421 return -EINVAL;
1424 ha->optrom_region_size = start +
1425 bsg_job->request_payload.payload_len > ha->optrom_size ?
1426 ha->optrom_size - start :
1427 bsg_job->request_payload.payload_len;
1428 ha->optrom_state = QLA_SWRITING;
1429 } else {
1430 ha->optrom_region_size = start +
1431 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1432 ha->optrom_size - start :
1433 bsg_job->reply_payload.payload_len;
1434 ha->optrom_state = QLA_SREADING;
1437 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1438 if (!ha->optrom_buffer) {
1439 ql_log(ql_log_warn, vha, 0x7059,
1440 "Read: Unable to allocate memory for optrom retrieval "
1441 "(%x)\n", ha->optrom_region_size);
1443 ha->optrom_state = QLA_SWAITING;
1444 return -ENOMEM;
1447 return 0;
1450 static int
1451 qla2x00_read_optrom(struct bsg_job *bsg_job)
1453 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1454 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1455 scsi_qla_host_t *vha = shost_priv(host);
1456 struct qla_hw_data *ha = vha->hw;
1457 int rval = 0;
1459 if (ha->flags.nic_core_reset_hdlr_active)
1460 return -EBUSY;
1462 mutex_lock(&ha->optrom_mutex);
1463 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1464 if (rval) {
1465 mutex_unlock(&ha->optrom_mutex);
1466 return rval;
1469 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1470 ha->optrom_region_start, ha->optrom_region_size);
1472 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1473 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1474 ha->optrom_region_size);
1476 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1477 bsg_reply->result = DID_OK;
1478 vfree(ha->optrom_buffer);
1479 ha->optrom_buffer = NULL;
1480 ha->optrom_state = QLA_SWAITING;
1481 mutex_unlock(&ha->optrom_mutex);
1482 bsg_job_done(bsg_job, bsg_reply->result,
1483 bsg_reply->reply_payload_rcv_len);
1484 return rval;
1487 static int
1488 qla2x00_update_optrom(struct bsg_job *bsg_job)
1490 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1491 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1492 scsi_qla_host_t *vha = shost_priv(host);
1493 struct qla_hw_data *ha = vha->hw;
1494 int rval = 0;
1496 mutex_lock(&ha->optrom_mutex);
1497 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1498 if (rval) {
1499 mutex_unlock(&ha->optrom_mutex);
1500 return rval;
1503 /* Set the isp82xx_no_md_cap not to capture minidump */
1504 ha->flags.isp82xx_no_md_cap = 1;
1506 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1507 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1508 ha->optrom_region_size);
1510 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1511 ha->optrom_region_start, ha->optrom_region_size);
1513 bsg_reply->result = DID_OK;
1514 vfree(ha->optrom_buffer);
1515 ha->optrom_buffer = NULL;
1516 ha->optrom_state = QLA_SWAITING;
1517 mutex_unlock(&ha->optrom_mutex);
1518 bsg_job_done(bsg_job, bsg_reply->result,
1519 bsg_reply->reply_payload_rcv_len);
1520 return rval;
1523 static int
1524 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1526 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1527 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1528 scsi_qla_host_t *vha = shost_priv(host);
1529 struct qla_hw_data *ha = vha->hw;
1530 int rval = 0;
1531 uint8_t bsg[DMA_POOL_SIZE];
1532 struct qla_image_version_list *list = (void *)bsg;
1533 struct qla_image_version *image;
1534 uint32_t count;
1535 dma_addr_t sfp_dma;
1536 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1537 if (!sfp) {
1538 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1539 EXT_STATUS_NO_MEMORY;
1540 goto done;
1543 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1544 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1546 image = list->version;
1547 count = list->count;
1548 while (count--) {
1549 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1550 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1551 image->field_address.device, image->field_address.offset,
1552 sizeof(image->field_info), image->field_address.option);
1553 if (rval) {
1554 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1555 EXT_STATUS_MAILBOX;
1556 goto dealloc;
1558 image++;
1561 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1563 dealloc:
1564 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1566 done:
1567 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1568 bsg_reply->result = DID_OK << 16;
1569 bsg_job_done(bsg_job, bsg_reply->result,
1570 bsg_reply->reply_payload_rcv_len);
1572 return 0;
1575 static int
1576 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1578 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1579 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1580 scsi_qla_host_t *vha = shost_priv(host);
1581 struct qla_hw_data *ha = vha->hw;
1582 int rval = 0;
1583 uint8_t bsg[DMA_POOL_SIZE];
1584 struct qla_status_reg *sr = (void *)bsg;
1585 dma_addr_t sfp_dma;
1586 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1587 if (!sfp) {
1588 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1589 EXT_STATUS_NO_MEMORY;
1590 goto done;
1593 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1594 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1596 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1597 sr->field_address.device, sr->field_address.offset,
1598 sizeof(sr->status_reg), sr->field_address.option);
1599 sr->status_reg = *sfp;
1601 if (rval) {
1602 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1603 EXT_STATUS_MAILBOX;
1604 goto dealloc;
1607 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1608 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1610 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1612 dealloc:
1613 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1615 done:
1616 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1617 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1618 bsg_reply->result = DID_OK << 16;
1619 bsg_job_done(bsg_job, bsg_reply->result,
1620 bsg_reply->reply_payload_rcv_len);
1622 return 0;
1625 static int
1626 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1628 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1629 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1630 scsi_qla_host_t *vha = shost_priv(host);
1631 struct qla_hw_data *ha = vha->hw;
1632 int rval = 0;
1633 uint8_t bsg[DMA_POOL_SIZE];
1634 struct qla_status_reg *sr = (void *)bsg;
1635 dma_addr_t sfp_dma;
1636 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1637 if (!sfp) {
1638 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1639 EXT_STATUS_NO_MEMORY;
1640 goto done;
1643 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1644 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1646 *sfp = sr->status_reg;
1647 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1648 sr->field_address.device, sr->field_address.offset,
1649 sizeof(sr->status_reg), sr->field_address.option);
1651 if (rval) {
1652 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1653 EXT_STATUS_MAILBOX;
1654 goto dealloc;
1657 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1659 dealloc:
1660 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1662 done:
1663 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1664 bsg_reply->result = DID_OK << 16;
1665 bsg_job_done(bsg_job, bsg_reply->result,
1666 bsg_reply->reply_payload_rcv_len);
1668 return 0;
1671 static int
1672 qla2x00_write_i2c(struct bsg_job *bsg_job)
1674 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1675 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1676 scsi_qla_host_t *vha = shost_priv(host);
1677 struct qla_hw_data *ha = vha->hw;
1678 int rval = 0;
1679 uint8_t bsg[DMA_POOL_SIZE];
1680 struct qla_i2c_access *i2c = (void *)bsg;
1681 dma_addr_t sfp_dma;
1682 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1683 if (!sfp) {
1684 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1685 EXT_STATUS_NO_MEMORY;
1686 goto done;
1689 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1690 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1692 memcpy(sfp, i2c->buffer, i2c->length);
1693 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1694 i2c->device, i2c->offset, i2c->length, i2c->option);
1696 if (rval) {
1697 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1698 EXT_STATUS_MAILBOX;
1699 goto dealloc;
1702 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1704 dealloc:
1705 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1707 done:
1708 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1709 bsg_reply->result = DID_OK << 16;
1710 bsg_job_done(bsg_job, bsg_reply->result,
1711 bsg_reply->reply_payload_rcv_len);
1713 return 0;
1716 static int
1717 qla2x00_read_i2c(struct bsg_job *bsg_job)
1719 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1720 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1721 scsi_qla_host_t *vha = shost_priv(host);
1722 struct qla_hw_data *ha = vha->hw;
1723 int rval = 0;
1724 uint8_t bsg[DMA_POOL_SIZE];
1725 struct qla_i2c_access *i2c = (void *)bsg;
1726 dma_addr_t sfp_dma;
1727 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1728 if (!sfp) {
1729 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1730 EXT_STATUS_NO_MEMORY;
1731 goto done;
1734 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1735 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1737 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1738 i2c->device, i2c->offset, i2c->length, i2c->option);
1740 if (rval) {
1741 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1742 EXT_STATUS_MAILBOX;
1743 goto dealloc;
1746 memcpy(i2c->buffer, sfp, i2c->length);
1747 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1748 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1750 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1752 dealloc:
1753 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1755 done:
1756 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1757 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1758 bsg_reply->result = DID_OK << 16;
1759 bsg_job_done(bsg_job, bsg_reply->result,
1760 bsg_reply->reply_payload_rcv_len);
1762 return 0;
1765 static int
1766 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1768 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1769 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1770 scsi_qla_host_t *vha = shost_priv(host);
1771 struct qla_hw_data *ha = vha->hw;
1772 uint32_t rval = EXT_STATUS_OK;
1773 uint16_t req_sg_cnt = 0;
1774 uint16_t rsp_sg_cnt = 0;
1775 uint16_t nextlid = 0;
1776 uint32_t tot_dsds;
1777 srb_t *sp = NULL;
1778 uint32_t req_data_len = 0;
1779 uint32_t rsp_data_len = 0;
1781 /* Check the type of the adapter */
1782 if (!IS_BIDI_CAPABLE(ha)) {
1783 ql_log(ql_log_warn, vha, 0x70a0,
1784 "This adapter is not supported\n");
1785 rval = EXT_STATUS_NOT_SUPPORTED;
1786 goto done;
1789 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1790 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1791 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1792 rval = EXT_STATUS_BUSY;
1793 goto done;
1796 /* Check if host is online */
1797 if (!vha->flags.online) {
1798 ql_log(ql_log_warn, vha, 0x70a1,
1799 "Host is not online\n");
1800 rval = EXT_STATUS_DEVICE_OFFLINE;
1801 goto done;
1804 /* Check if cable is plugged in or not */
1805 if (vha->device_flags & DFLG_NO_CABLE) {
1806 ql_log(ql_log_warn, vha, 0x70a2,
1807 "Cable is unplugged...\n");
1808 rval = EXT_STATUS_INVALID_CFG;
1809 goto done;
1812 /* Check if the switch is connected or not */
1813 if (ha->current_topology != ISP_CFG_F) {
1814 ql_log(ql_log_warn, vha, 0x70a3,
1815 "Host is not connected to the switch\n");
1816 rval = EXT_STATUS_INVALID_CFG;
1817 goto done;
1820 /* Check if operating mode is P2P */
1821 if (ha->operating_mode != P2P) {
1822 ql_log(ql_log_warn, vha, 0x70a4,
1823 "Host operating mode is not P2p\n");
1824 rval = EXT_STATUS_INVALID_CFG;
1825 goto done;
1828 mutex_lock(&ha->selflogin_lock);
1829 if (vha->self_login_loop_id == 0) {
1830 /* Initialize all required fields of fcport */
1831 vha->bidir_fcport.vha = vha;
1832 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1833 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1834 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1835 vha->bidir_fcport.loop_id = vha->loop_id;
1837 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1838 ql_log(ql_log_warn, vha, 0x70a7,
1839 "Failed to login port %06X for bidirectional IOCB\n",
1840 vha->bidir_fcport.d_id.b24);
1841 mutex_unlock(&ha->selflogin_lock);
1842 rval = EXT_STATUS_MAILBOX;
1843 goto done;
1845 vha->self_login_loop_id = nextlid - 1;
1848 /* Assign the self login loop id to fcport */
1849 mutex_unlock(&ha->selflogin_lock);
1851 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1853 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1854 bsg_job->request_payload.sg_list,
1855 bsg_job->request_payload.sg_cnt,
1856 DMA_TO_DEVICE);
1858 if (!req_sg_cnt) {
1859 rval = EXT_STATUS_NO_MEMORY;
1860 goto done;
1863 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1864 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1865 DMA_FROM_DEVICE);
1867 if (!rsp_sg_cnt) {
1868 rval = EXT_STATUS_NO_MEMORY;
1869 goto done_unmap_req_sg;
1872 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1873 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1874 ql_dbg(ql_dbg_user, vha, 0x70a9,
1875 "Dma mapping resulted in different sg counts "
1876 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1877 "%x dma_reply_sg_cnt: %x]\n",
1878 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1879 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1880 rval = EXT_STATUS_NO_MEMORY;
1881 goto done_unmap_sg;
1884 if (req_data_len != rsp_data_len) {
1885 rval = EXT_STATUS_BUSY;
1886 ql_log(ql_log_warn, vha, 0x70aa,
1887 "req_data_len != rsp_data_len\n");
1888 goto done_unmap_sg;
1891 req_data_len = bsg_job->request_payload.payload_len;
1892 rsp_data_len = bsg_job->reply_payload.payload_len;
1895 /* Alloc SRB structure */
1896 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1897 if (!sp) {
1898 ql_dbg(ql_dbg_user, vha, 0x70ac,
1899 "Alloc SRB structure failed\n");
1900 rval = EXT_STATUS_NO_MEMORY;
1901 goto done_unmap_sg;
1904 /*Populate srb->ctx with bidir ctx*/
1905 sp->u.bsg_job = bsg_job;
1906 sp->free = qla2x00_bsg_sp_free;
1907 sp->type = SRB_BIDI_CMD;
1908 sp->done = qla2x00_bsg_job_done;
1910 /* Add the read and write sg count */
1911 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1913 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1914 if (rval != EXT_STATUS_OK)
1915 goto done_free_srb;
1916 /* the bsg request will be completed in the interrupt handler */
1917 return rval;
1919 done_free_srb:
1920 mempool_free(sp, ha->srb_mempool);
1921 done_unmap_sg:
1922 dma_unmap_sg(&ha->pdev->dev,
1923 bsg_job->reply_payload.sg_list,
1924 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1925 done_unmap_req_sg:
1926 dma_unmap_sg(&ha->pdev->dev,
1927 bsg_job->request_payload.sg_list,
1928 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1929 done:
1931 /* Return an error vendor specific response
1932 * and complete the bsg request
1934 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1935 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1936 bsg_reply->reply_payload_rcv_len = 0;
1937 bsg_reply->result = (DID_OK) << 16;
1938 bsg_job_done(bsg_job, bsg_reply->result,
1939 bsg_reply->reply_payload_rcv_len);
1940 /* Always return success, vendor rsp carries correct status */
1941 return 0;
1944 static int
1945 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1947 struct fc_bsg_request *bsg_request = bsg_job->request;
1948 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1949 scsi_qla_host_t *vha = shost_priv(host);
1950 struct qla_hw_data *ha = vha->hw;
1951 int rval = (DRIVER_ERROR << 16);
1952 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1953 srb_t *sp;
1954 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1955 struct fc_port *fcport;
1956 char *type = "FC_BSG_HST_FX_MGMT";
1958 /* Copy the IOCB specific information */
1959 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1960 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1962 /* Dump the vendor information */
1963 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1964 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1966 if (!vha->flags.online) {
1967 ql_log(ql_log_warn, vha, 0x70d0,
1968 "Host is not online.\n");
1969 rval = -EIO;
1970 goto done;
1973 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1974 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1975 bsg_job->request_payload.sg_list,
1976 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1977 if (!req_sg_cnt) {
1978 ql_log(ql_log_warn, vha, 0x70c7,
1979 "dma_map_sg return %d for request\n", req_sg_cnt);
1980 rval = -ENOMEM;
1981 goto done;
1985 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1986 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1987 bsg_job->reply_payload.sg_list,
1988 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1989 if (!rsp_sg_cnt) {
1990 ql_log(ql_log_warn, vha, 0x70c8,
1991 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1992 rval = -ENOMEM;
1993 goto done_unmap_req_sg;
1997 ql_dbg(ql_dbg_user, vha, 0x70c9,
1998 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
1999 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2000 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2002 /* Allocate a dummy fcport structure, since functions preparing the
2003 * IOCB and mailbox command retrieves port specific information
2004 * from fcport structure. For Host based ELS commands there will be
2005 * no fcport structure allocated
2007 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2008 if (!fcport) {
2009 ql_log(ql_log_warn, vha, 0x70ca,
2010 "Failed to allocate fcport.\n");
2011 rval = -ENOMEM;
2012 goto done_unmap_rsp_sg;
2015 /* Alloc SRB structure */
2016 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2017 if (!sp) {
2018 ql_log(ql_log_warn, vha, 0x70cb,
2019 "qla2x00_get_sp failed.\n");
2020 rval = -ENOMEM;
2021 goto done_free_fcport;
2024 /* Initialize all required fields of fcport */
2025 fcport->vha = vha;
2026 fcport->loop_id = piocb_rqst->dataword;
2028 sp->type = SRB_FXIOCB_BCMD;
2029 sp->name = "bsg_fx_mgmt";
2030 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2031 sp->u.bsg_job = bsg_job;
2032 sp->free = qla2x00_bsg_sp_free;
2033 sp->done = qla2x00_bsg_job_done;
2035 ql_dbg(ql_dbg_user, vha, 0x70cc,
2036 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2037 type, piocb_rqst->func_type, fcport->loop_id);
2039 rval = qla2x00_start_sp(sp);
2040 if (rval != QLA_SUCCESS) {
2041 ql_log(ql_log_warn, vha, 0x70cd,
2042 "qla2x00_start_sp failed=%d.\n", rval);
2043 mempool_free(sp, ha->srb_mempool);
2044 rval = -EIO;
2045 goto done_free_fcport;
2047 return rval;
2049 done_free_fcport:
2050 kfree(fcport);
2052 done_unmap_rsp_sg:
2053 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2054 dma_unmap_sg(&ha->pdev->dev,
2055 bsg_job->reply_payload.sg_list,
2056 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2057 done_unmap_req_sg:
2058 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2059 dma_unmap_sg(&ha->pdev->dev,
2060 bsg_job->request_payload.sg_list,
2061 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2063 done:
2064 return rval;
2067 static int
2068 qla26xx_serdes_op(struct bsg_job *bsg_job)
2070 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2071 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2072 scsi_qla_host_t *vha = shost_priv(host);
2073 int rval = 0;
2074 struct qla_serdes_reg sr;
2076 memset(&sr, 0, sizeof(sr));
2078 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2079 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2081 switch (sr.cmd) {
2082 case INT_SC_SERDES_WRITE_REG:
2083 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2084 bsg_reply->reply_payload_rcv_len = 0;
2085 break;
2086 case INT_SC_SERDES_READ_REG:
2087 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2088 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2089 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2090 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2091 break;
2092 default:
2093 ql_dbg(ql_dbg_user, vha, 0x708c,
2094 "Unknown serdes cmd %x.\n", sr.cmd);
2095 rval = -EINVAL;
2096 break;
2099 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2100 rval ? EXT_STATUS_MAILBOX : 0;
2102 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2103 bsg_reply->result = DID_OK << 16;
2104 bsg_job_done(bsg_job, bsg_reply->result,
2105 bsg_reply->reply_payload_rcv_len);
2106 return 0;
2109 static int
2110 qla8044_serdes_op(struct bsg_job *bsg_job)
2112 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2113 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2114 scsi_qla_host_t *vha = shost_priv(host);
2115 int rval = 0;
2116 struct qla_serdes_reg_ex sr;
2118 memset(&sr, 0, sizeof(sr));
2120 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2121 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2123 switch (sr.cmd) {
2124 case INT_SC_SERDES_WRITE_REG:
2125 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2126 bsg_reply->reply_payload_rcv_len = 0;
2127 break;
2128 case INT_SC_SERDES_READ_REG:
2129 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2130 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2131 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2132 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2133 break;
2134 default:
2135 ql_dbg(ql_dbg_user, vha, 0x7020,
2136 "Unknown serdes cmd %x.\n", sr.cmd);
2137 rval = -EINVAL;
2138 break;
2141 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2142 rval ? EXT_STATUS_MAILBOX : 0;
2144 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2145 bsg_reply->result = DID_OK << 16;
2146 bsg_job_done(bsg_job, bsg_reply->result,
2147 bsg_reply->reply_payload_rcv_len);
2148 return 0;
2151 static int
2152 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2154 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2155 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2156 scsi_qla_host_t *vha = shost_priv(host);
2157 struct qla_hw_data *ha = vha->hw;
2158 struct qla_flash_update_caps cap;
2160 if (!(IS_QLA27XX(ha)))
2161 return -EPERM;
2163 memset(&cap, 0, sizeof(cap));
2164 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2165 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2166 (uint64_t)ha->fw_attributes_h << 16 |
2167 (uint64_t)ha->fw_attributes;
2169 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2170 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2171 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2173 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2174 EXT_STATUS_OK;
2176 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2177 bsg_reply->result = DID_OK << 16;
2178 bsg_job_done(bsg_job, bsg_reply->result,
2179 bsg_reply->reply_payload_rcv_len);
2180 return 0;
2183 static int
2184 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2186 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2187 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2188 scsi_qla_host_t *vha = shost_priv(host);
2189 struct qla_hw_data *ha = vha->hw;
2190 uint64_t online_fw_attr = 0;
2191 struct qla_flash_update_caps cap;
2193 if (!(IS_QLA27XX(ha)))
2194 return -EPERM;
2196 memset(&cap, 0, sizeof(cap));
2197 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2198 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2200 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2201 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2202 (uint64_t)ha->fw_attributes_h << 16 |
2203 (uint64_t)ha->fw_attributes;
2205 if (online_fw_attr != cap.capabilities) {
2206 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2207 EXT_STATUS_INVALID_PARAM;
2208 return -EINVAL;
2211 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2212 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2213 EXT_STATUS_INVALID_PARAM;
2214 return -EINVAL;
2217 bsg_reply->reply_payload_rcv_len = 0;
2219 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2220 EXT_STATUS_OK;
2222 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2223 bsg_reply->result = DID_OK << 16;
2224 bsg_job_done(bsg_job, bsg_reply->result,
2225 bsg_reply->reply_payload_rcv_len);
2226 return 0;
2229 static int
2230 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2232 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2233 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2234 scsi_qla_host_t *vha = shost_priv(host);
2235 struct qla_hw_data *ha = vha->hw;
2236 struct qla_bbcr_data bbcr;
2237 uint16_t loop_id, topo, sw_cap;
2238 uint8_t domain, area, al_pa, state;
2239 int rval;
2241 if (!(IS_QLA27XX(ha)))
2242 return -EPERM;
2244 memset(&bbcr, 0, sizeof(bbcr));
2246 if (vha->flags.bbcr_enable)
2247 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2248 else
2249 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2251 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2252 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2253 &area, &domain, &topo, &sw_cap);
2254 if (rval != QLA_SUCCESS) {
2255 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2256 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2257 bbcr.mbx1 = loop_id;
2258 goto done;
2261 state = (vha->bbcr >> 12) & 0x1;
2263 if (state) {
2264 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2265 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2266 } else {
2267 bbcr.state = QLA_BBCR_STATE_ONLINE;
2268 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2271 bbcr.configured_bbscn = vha->bbcr & 0xf;
2274 done:
2275 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2276 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2277 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2279 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2281 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2282 bsg_reply->result = DID_OK << 16;
2283 bsg_job_done(bsg_job, bsg_reply->result,
2284 bsg_reply->reply_payload_rcv_len);
2285 return 0;
2288 static int
2289 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2291 struct fc_bsg_request *bsg_request = bsg_job->request;
2292 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2293 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2294 scsi_qla_host_t *vha = shost_priv(host);
2295 struct qla_hw_data *ha = vha->hw;
2296 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2297 struct link_statistics *stats = NULL;
2298 dma_addr_t stats_dma;
2299 int rval;
2300 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2301 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2303 if (test_bit(UNLOADING, &vha->dpc_flags))
2304 return -ENODEV;
2306 if (unlikely(pci_channel_offline(ha->pdev)))
2307 return -ENODEV;
2309 if (qla2x00_reset_active(vha))
2310 return -EBUSY;
2312 if (!IS_FWI2_CAPABLE(ha))
2313 return -EPERM;
2315 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2316 GFP_KERNEL);
2317 if (!stats) {
2318 ql_log(ql_log_warn, vha, 0x70e2,
2319 "Failed to allocate memory for stats.\n");
2320 return -ENOMEM;
2323 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2325 if (rval == QLA_SUCCESS) {
2326 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2327 (uint8_t *)stats, sizeof(*stats));
2328 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2329 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2332 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2333 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2334 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2336 bsg_job->reply_len = sizeof(*bsg_reply);
2337 bsg_reply->result = DID_OK << 16;
2338 bsg_job_done(bsg_job, bsg_reply->result,
2339 bsg_reply->reply_payload_rcv_len);
2341 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2342 stats, stats_dma);
2344 return 0;
2347 static int
2348 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2350 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2351 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2352 scsi_qla_host_t *vha = shost_priv(host);
2353 int rval;
2354 struct qla_dport_diag *dd;
2356 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2357 return -EPERM;
2359 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2360 if (!dd) {
2361 ql_log(ql_log_warn, vha, 0x70db,
2362 "Failed to allocate memory for dport.\n");
2363 return -ENOMEM;
2366 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2367 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2369 rval = qla26xx_dport_diagnostics(
2370 vha, dd->buf, sizeof(dd->buf), dd->options);
2371 if (rval == QLA_SUCCESS) {
2372 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2373 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2376 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2377 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2378 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2380 bsg_job->reply_len = sizeof(*bsg_reply);
2381 bsg_reply->result = DID_OK << 16;
2382 bsg_job_done(bsg_job, bsg_reply->result,
2383 bsg_reply->reply_payload_rcv_len);
2385 kfree(dd);
2387 return 0;
2390 static int
2391 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2393 struct fc_bsg_request *bsg_request = bsg_job->request;
2395 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2396 case QL_VND_LOOPBACK:
2397 return qla2x00_process_loopback(bsg_job);
2399 case QL_VND_A84_RESET:
2400 return qla84xx_reset(bsg_job);
2402 case QL_VND_A84_UPDATE_FW:
2403 return qla84xx_updatefw(bsg_job);
2405 case QL_VND_A84_MGMT_CMD:
2406 return qla84xx_mgmt_cmd(bsg_job);
2408 case QL_VND_IIDMA:
2409 return qla24xx_iidma(bsg_job);
2411 case QL_VND_FCP_PRIO_CFG_CMD:
2412 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2414 case QL_VND_READ_FLASH:
2415 return qla2x00_read_optrom(bsg_job);
2417 case QL_VND_UPDATE_FLASH:
2418 return qla2x00_update_optrom(bsg_job);
2420 case QL_VND_SET_FRU_VERSION:
2421 return qla2x00_update_fru_versions(bsg_job);
2423 case QL_VND_READ_FRU_STATUS:
2424 return qla2x00_read_fru_status(bsg_job);
2426 case QL_VND_WRITE_FRU_STATUS:
2427 return qla2x00_write_fru_status(bsg_job);
2429 case QL_VND_WRITE_I2C:
2430 return qla2x00_write_i2c(bsg_job);
2432 case QL_VND_READ_I2C:
2433 return qla2x00_read_i2c(bsg_job);
2435 case QL_VND_DIAG_IO_CMD:
2436 return qla24xx_process_bidir_cmd(bsg_job);
2438 case QL_VND_FX00_MGMT_CMD:
2439 return qlafx00_mgmt_cmd(bsg_job);
2441 case QL_VND_SERDES_OP:
2442 return qla26xx_serdes_op(bsg_job);
2444 case QL_VND_SERDES_OP_EX:
2445 return qla8044_serdes_op(bsg_job);
2447 case QL_VND_GET_FLASH_UPDATE_CAPS:
2448 return qla27xx_get_flash_upd_cap(bsg_job);
2450 case QL_VND_SET_FLASH_UPDATE_CAPS:
2451 return qla27xx_set_flash_upd_cap(bsg_job);
2453 case QL_VND_GET_BBCR_DATA:
2454 return qla27xx_get_bbcr_data(bsg_job);
2456 case QL_VND_GET_PRIV_STATS:
2457 case QL_VND_GET_PRIV_STATS_EX:
2458 return qla2x00_get_priv_stats(bsg_job);
2460 case QL_VND_DPORT_DIAGNOSTICS:
2461 return qla2x00_do_dport_diagnostics(bsg_job);
2463 default:
2464 return -ENOSYS;
2469 qla24xx_bsg_request(struct bsg_job *bsg_job)
2471 struct fc_bsg_request *bsg_request = bsg_job->request;
2472 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2473 int ret = -EINVAL;
2474 struct fc_rport *rport;
2475 struct Scsi_Host *host;
2476 scsi_qla_host_t *vha;
2478 /* In case no data transferred. */
2479 bsg_reply->reply_payload_rcv_len = 0;
2481 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2482 rport = fc_bsg_to_rport(bsg_job);
2483 host = rport_to_shost(rport);
2484 vha = shost_priv(host);
2485 } else {
2486 host = fc_bsg_to_shost(bsg_job);
2487 vha = shost_priv(host);
2490 if (qla2x00_chip_is_down(vha)) {
2491 ql_dbg(ql_dbg_user, vha, 0x709f,
2492 "BSG: ISP abort active/needed -- cmd=%d.\n",
2493 bsg_request->msgcode);
2494 return -EBUSY;
2497 ql_dbg(ql_dbg_user, vha, 0x7000,
2498 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2500 switch (bsg_request->msgcode) {
2501 case FC_BSG_RPT_ELS:
2502 case FC_BSG_HST_ELS_NOLOGIN:
2503 ret = qla2x00_process_els(bsg_job);
2504 break;
2505 case FC_BSG_HST_CT:
2506 ret = qla2x00_process_ct(bsg_job);
2507 break;
2508 case FC_BSG_HST_VENDOR:
2509 ret = qla2x00_process_vendor_specific(bsg_job);
2510 break;
2511 case FC_BSG_HST_ADD_RPORT:
2512 case FC_BSG_HST_DEL_RPORT:
2513 case FC_BSG_RPT_CT:
2514 default:
2515 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2516 break;
2518 return ret;
2522 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2524 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2525 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2526 struct qla_hw_data *ha = vha->hw;
2527 srb_t *sp;
2528 int cnt, que;
2529 unsigned long flags;
2530 struct req_que *req;
2532 /* find the bsg job from the active list of commands */
2533 spin_lock_irqsave(&ha->hardware_lock, flags);
2534 for (que = 0; que < ha->max_req_queues; que++) {
2535 req = ha->req_q_map[que];
2536 if (!req)
2537 continue;
2539 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2540 sp = req->outstanding_cmds[cnt];
2541 if (sp) {
2542 if (((sp->type == SRB_CT_CMD) ||
2543 (sp->type == SRB_ELS_CMD_HST) ||
2544 (sp->type == SRB_FXIOCB_BCMD))
2545 && (sp->u.bsg_job == bsg_job)) {
2546 req->outstanding_cmds[cnt] = NULL;
2547 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2548 if (ha->isp_ops->abort_command(sp)) {
2549 ql_log(ql_log_warn, vha, 0x7089,
2550 "mbx abort_command "
2551 "failed.\n");
2552 bsg_reply->result = -EIO;
2553 } else {
2554 ql_dbg(ql_dbg_user, vha, 0x708a,
2555 "mbx abort_command "
2556 "success.\n");
2557 bsg_reply->result = 0;
2559 spin_lock_irqsave(&ha->hardware_lock, flags);
2560 goto done;
2565 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2566 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2567 bsg_reply->result = -ENXIO;
2568 return 0;
2570 done:
2571 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2572 sp->free(sp);
2573 return 0;