Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_bsg.c
blob97b51c477972bee59f522a512f6040c522dd3f00
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
17 free_work);
19 qla2x00_free_fcport(fcport);
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
28 bsg_reply->result = res;
29 bsg_job_done(bsg_job, bsg_reply->result,
30 bsg_reply->reply_payload_rcv_len);
31 sp->free(sp);
34 void qla2x00_bsg_sp_free(srb_t *sp)
36 struct qla_hw_data *ha = sp->vha->hw;
37 struct bsg_job *bsg_job = sp->u.bsg_job;
38 struct fc_bsg_request *bsg_request = bsg_job->request;
39 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
41 if (sp->type == SRB_FXIOCB_BCMD) {
42 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
43 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
45 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
46 dma_unmap_sg(&ha->pdev->dev,
47 bsg_job->request_payload.sg_list,
48 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
50 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
51 dma_unmap_sg(&ha->pdev->dev,
52 bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 } else {
55 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
56 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
58 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
62 if (sp->type == SRB_CT_CMD ||
63 sp->type == SRB_FXIOCB_BCMD ||
64 sp->type == SRB_ELS_CMD_HST) {
65 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
66 queue_work(ha->wq, &sp->fcport->free_work);
69 qla2x00_rel_sp(sp);
72 int
73 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
74 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
76 int i, ret, num_valid;
77 uint8_t *bcode;
78 struct qla_fcp_prio_entry *pri_entry;
79 uint32_t *bcode_val_ptr, bcode_val;
81 ret = 1;
82 num_valid = 0;
83 bcode = (uint8_t *)pri_cfg;
84 bcode_val_ptr = (uint32_t *)pri_cfg;
85 bcode_val = (uint32_t)(*bcode_val_ptr);
87 if (bcode_val == 0xFFFFFFFF) {
88 /* No FCP Priority config data in flash */
89 ql_dbg(ql_dbg_user, vha, 0x7051,
90 "No FCP Priority config data.\n");
91 return 0;
94 if (memcmp(bcode, "HQOS", 4)) {
95 /* Invalid FCP priority data header*/
96 ql_dbg(ql_dbg_user, vha, 0x7052,
97 "Invalid FCP Priority data header. bcode=0x%x.\n",
98 bcode_val);
99 return 0;
101 if (flag != 1)
102 return ret;
104 pri_entry = &pri_cfg->entry[0];
105 for (i = 0; i < pri_cfg->num_entries; i++) {
106 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
107 num_valid++;
108 pri_entry++;
111 if (num_valid == 0) {
112 /* No valid FCP priority data entries */
113 ql_dbg(ql_dbg_user, vha, 0x7053,
114 "No valid FCP Priority data entries.\n");
115 ret = 0;
116 } else {
117 /* FCP priority data is valid */
118 ql_dbg(ql_dbg_user, vha, 0x7054,
119 "Valid FCP priority data. num entries = %d.\n",
120 num_valid);
123 return ret;
126 static int
127 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
129 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
130 struct fc_bsg_request *bsg_request = bsg_job->request;
131 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
132 scsi_qla_host_t *vha = shost_priv(host);
133 struct qla_hw_data *ha = vha->hw;
134 int ret = 0;
135 uint32_t len;
136 uint32_t oper;
138 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
139 ret = -EINVAL;
140 goto exit_fcp_prio_cfg;
143 /* Get the sub command */
144 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
146 /* Only set config is allowed if config memory is not allocated */
147 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
148 ret = -EINVAL;
149 goto exit_fcp_prio_cfg;
151 switch (oper) {
152 case QLFC_FCP_PRIO_DISABLE:
153 if (ha->flags.fcp_prio_enabled) {
154 ha->flags.fcp_prio_enabled = 0;
155 ha->fcp_prio_cfg->attributes &=
156 ~FCP_PRIO_ATTR_ENABLE;
157 qla24xx_update_all_fcp_prio(vha);
158 bsg_reply->result = DID_OK;
159 } else {
160 ret = -EINVAL;
161 bsg_reply->result = (DID_ERROR << 16);
162 goto exit_fcp_prio_cfg;
164 break;
166 case QLFC_FCP_PRIO_ENABLE:
167 if (!ha->flags.fcp_prio_enabled) {
168 if (ha->fcp_prio_cfg) {
169 ha->flags.fcp_prio_enabled = 1;
170 ha->fcp_prio_cfg->attributes |=
171 FCP_PRIO_ATTR_ENABLE;
172 qla24xx_update_all_fcp_prio(vha);
173 bsg_reply->result = DID_OK;
174 } else {
175 ret = -EINVAL;
176 bsg_reply->result = (DID_ERROR << 16);
177 goto exit_fcp_prio_cfg;
180 break;
182 case QLFC_FCP_PRIO_GET_CONFIG:
183 len = bsg_job->reply_payload.payload_len;
184 if (!len || len > FCP_PRIO_CFG_SIZE) {
185 ret = -EINVAL;
186 bsg_reply->result = (DID_ERROR << 16);
187 goto exit_fcp_prio_cfg;
190 bsg_reply->result = DID_OK;
191 bsg_reply->reply_payload_rcv_len =
192 sg_copy_from_buffer(
193 bsg_job->reply_payload.sg_list,
194 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
195 len);
197 break;
199 case QLFC_FCP_PRIO_SET_CONFIG:
200 len = bsg_job->request_payload.payload_len;
201 if (!len || len > FCP_PRIO_CFG_SIZE) {
202 bsg_reply->result = (DID_ERROR << 16);
203 ret = -EINVAL;
204 goto exit_fcp_prio_cfg;
207 if (!ha->fcp_prio_cfg) {
208 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
209 if (!ha->fcp_prio_cfg) {
210 ql_log(ql_log_warn, vha, 0x7050,
211 "Unable to allocate memory for fcp prio "
212 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
213 bsg_reply->result = (DID_ERROR << 16);
214 ret = -ENOMEM;
215 goto exit_fcp_prio_cfg;
219 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
220 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
221 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
222 FCP_PRIO_CFG_SIZE);
224 /* validate fcp priority data */
226 if (!qla24xx_fcp_prio_cfg_valid(vha,
227 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
228 bsg_reply->result = (DID_ERROR << 16);
229 ret = -EINVAL;
230 /* If buffer was invalidatic int
231 * fcp_prio_cfg is of no use
233 vfree(ha->fcp_prio_cfg);
234 ha->fcp_prio_cfg = NULL;
235 goto exit_fcp_prio_cfg;
238 ha->flags.fcp_prio_enabled = 0;
239 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
240 ha->flags.fcp_prio_enabled = 1;
241 qla24xx_update_all_fcp_prio(vha);
242 bsg_reply->result = DID_OK;
243 break;
244 default:
245 ret = -EINVAL;
246 break;
248 exit_fcp_prio_cfg:
249 if (!ret)
250 bsg_job_done(bsg_job, bsg_reply->result,
251 bsg_reply->reply_payload_rcv_len);
252 return ret;
255 static int
256 qla2x00_process_els(struct bsg_job *bsg_job)
258 struct fc_bsg_request *bsg_request = bsg_job->request;
259 struct fc_rport *rport;
260 fc_port_t *fcport = NULL;
261 struct Scsi_Host *host;
262 scsi_qla_host_t *vha;
263 struct qla_hw_data *ha;
264 srb_t *sp;
265 const char *type;
266 int req_sg_cnt, rsp_sg_cnt;
267 int rval = (DID_ERROR << 16);
268 uint16_t nextlid = 0;
270 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
271 rport = fc_bsg_to_rport(bsg_job);
272 fcport = *(fc_port_t **) rport->dd_data;
273 host = rport_to_shost(rport);
274 vha = shost_priv(host);
275 ha = vha->hw;
276 type = "FC_BSG_RPT_ELS";
277 } else {
278 host = fc_bsg_to_shost(bsg_job);
279 vha = shost_priv(host);
280 ha = vha->hw;
281 type = "FC_BSG_HST_ELS_NOLOGIN";
284 if (!vha->flags.online) {
285 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
286 rval = -EIO;
287 goto done;
290 /* pass through is supported only for ISP 4Gb or higher */
291 if (!IS_FWI2_CAPABLE(ha)) {
292 ql_dbg(ql_dbg_user, vha, 0x7001,
293 "ELS passthru not supported for ISP23xx based adapters.\n");
294 rval = -EPERM;
295 goto done;
298 /* Multiple SG's are not supported for ELS requests */
299 if (bsg_job->request_payload.sg_cnt > 1 ||
300 bsg_job->reply_payload.sg_cnt > 1) {
301 ql_dbg(ql_dbg_user, vha, 0x7002,
302 "Multiple SG's are not supported for ELS requests, "
303 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
304 bsg_job->request_payload.sg_cnt,
305 bsg_job->reply_payload.sg_cnt);
306 rval = -EPERM;
307 goto done;
310 /* ELS request for rport */
311 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
312 /* make sure the rport is logged in,
313 * if not perform fabric login
315 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
316 ql_dbg(ql_dbg_user, vha, 0x7003,
317 "Failed to login port %06X for ELS passthru.\n",
318 fcport->d_id.b24);
319 rval = -EIO;
320 goto done;
322 } else {
323 /* Allocate a dummy fcport structure, since functions
324 * preparing the IOCB and mailbox command retrieves port
325 * specific information from fcport structure. For Host based
326 * ELS commands there will be no fcport structure allocated
328 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
329 if (!fcport) {
330 rval = -ENOMEM;
331 goto done;
334 /* Initialize all required fields of fcport */
335 fcport->vha = vha;
336 fcport->d_id.b.al_pa =
337 bsg_request->rqst_data.h_els.port_id[0];
338 fcport->d_id.b.area =
339 bsg_request->rqst_data.h_els.port_id[1];
340 fcport->d_id.b.domain =
341 bsg_request->rqst_data.h_els.port_id[2];
342 fcport->loop_id =
343 (fcport->d_id.b.al_pa == 0xFD) ?
344 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
347 req_sg_cnt =
348 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
349 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
350 if (!req_sg_cnt) {
351 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
352 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
353 rval = -ENOMEM;
354 goto done_free_fcport;
357 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
358 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
359 if (!rsp_sg_cnt) {
360 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
361 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
362 rval = -ENOMEM;
363 goto done_free_fcport;
366 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
367 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
368 ql_log(ql_log_warn, vha, 0x7008,
369 "dma mapping resulted in different sg counts, "
370 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
371 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
372 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
373 rval = -EAGAIN;
374 goto done_unmap_sg;
377 /* Alloc SRB structure */
378 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
379 if (!sp) {
380 rval = -ENOMEM;
381 goto done_unmap_sg;
384 sp->type =
385 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
386 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
387 sp->name =
388 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
389 "bsg_els_rpt" : "bsg_els_hst");
390 sp->u.bsg_job = bsg_job;
391 sp->free = qla2x00_bsg_sp_free;
392 sp->done = qla2x00_bsg_job_done;
394 ql_dbg(ql_dbg_user, vha, 0x700a,
395 "bsg rqst type: %s els type: %x - loop-id=%x "
396 "portid=%-2x%02x%02x.\n", type,
397 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
398 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
400 rval = qla2x00_start_sp(sp);
401 if (rval != QLA_SUCCESS) {
402 ql_log(ql_log_warn, vha, 0x700e,
403 "qla2x00_start_sp failed = %d\n", rval);
404 qla2x00_rel_sp(sp);
405 rval = -EIO;
406 goto done_unmap_sg;
408 return rval;
410 done_unmap_sg:
411 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
414 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
415 goto done_free_fcport;
417 done_free_fcport:
418 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
419 qla2x00_free_fcport(fcport);
420 done:
421 return rval;
424 static inline uint16_t
425 qla24xx_calc_ct_iocbs(uint16_t dsds)
427 uint16_t iocbs;
429 iocbs = 1;
430 if (dsds > 2) {
431 iocbs += (dsds - 2) / 5;
432 if ((dsds - 2) % 5)
433 iocbs++;
435 return iocbs;
438 static int
439 qla2x00_process_ct(struct bsg_job *bsg_job)
441 srb_t *sp;
442 struct fc_bsg_request *bsg_request = bsg_job->request;
443 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
444 scsi_qla_host_t *vha = shost_priv(host);
445 struct qla_hw_data *ha = vha->hw;
446 int rval = (DID_ERROR << 16);
447 int req_sg_cnt, rsp_sg_cnt;
448 uint16_t loop_id;
449 struct fc_port *fcport;
450 char *type = "FC_BSG_HST_CT";
452 req_sg_cnt =
453 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
454 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
455 if (!req_sg_cnt) {
456 ql_log(ql_log_warn, vha, 0x700f,
457 "dma_map_sg return %d for request\n", req_sg_cnt);
458 rval = -ENOMEM;
459 goto done;
462 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
463 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
464 if (!rsp_sg_cnt) {
465 ql_log(ql_log_warn, vha, 0x7010,
466 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
467 rval = -ENOMEM;
468 goto done;
471 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
472 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
473 ql_log(ql_log_warn, vha, 0x7011,
474 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
475 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
476 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
477 rval = -EAGAIN;
478 goto done_unmap_sg;
481 if (!vha->flags.online) {
482 ql_log(ql_log_warn, vha, 0x7012,
483 "Host is not online.\n");
484 rval = -EIO;
485 goto done_unmap_sg;
488 loop_id =
489 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
490 >> 24;
491 switch (loop_id) {
492 case 0xFC:
493 loop_id = cpu_to_le16(NPH_SNS);
494 break;
495 case 0xFA:
496 loop_id = vha->mgmt_svr_loop_id;
497 break;
498 default:
499 ql_dbg(ql_dbg_user, vha, 0x7013,
500 "Unknown loop id: %x.\n", loop_id);
501 rval = -EINVAL;
502 goto done_unmap_sg;
505 /* Allocate a dummy fcport structure, since functions preparing the
506 * IOCB and mailbox command retrieves port specific information
507 * from fcport structure. For Host based ELS commands there will be
508 * no fcport structure allocated
510 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
511 if (!fcport) {
512 ql_log(ql_log_warn, vha, 0x7014,
513 "Failed to allocate fcport.\n");
514 rval = -ENOMEM;
515 goto done_unmap_sg;
518 /* Initialize all required fields of fcport */
519 fcport->vha = vha;
520 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
521 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
522 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
523 fcport->loop_id = loop_id;
525 /* Alloc SRB structure */
526 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
527 if (!sp) {
528 ql_log(ql_log_warn, vha, 0x7015,
529 "qla2x00_get_sp failed.\n");
530 rval = -ENOMEM;
531 goto done_free_fcport;
534 sp->type = SRB_CT_CMD;
535 sp->name = "bsg_ct";
536 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
537 sp->u.bsg_job = bsg_job;
538 sp->free = qla2x00_bsg_sp_free;
539 sp->done = qla2x00_bsg_job_done;
541 ql_dbg(ql_dbg_user, vha, 0x7016,
542 "bsg rqst type: %s else type: %x - "
543 "loop-id=%x portid=%02x%02x%02x.\n", type,
544 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
545 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
546 fcport->d_id.b.al_pa);
548 rval = qla2x00_start_sp(sp);
549 if (rval != QLA_SUCCESS) {
550 ql_log(ql_log_warn, vha, 0x7017,
551 "qla2x00_start_sp failed=%d.\n", rval);
552 qla2x00_rel_sp(sp);
553 rval = -EIO;
554 goto done_free_fcport;
556 return rval;
558 done_free_fcport:
559 qla2x00_free_fcport(fcport);
560 done_unmap_sg:
561 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
562 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
563 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
564 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
565 done:
566 return rval;
569 /* Disable loopback mode */
570 static inline int
571 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
572 int wait, int wait2)
574 int ret = 0;
575 int rval = 0;
576 uint16_t new_config[4];
577 struct qla_hw_data *ha = vha->hw;
579 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
580 goto done_reset_internal;
582 memset(new_config, 0 , sizeof(new_config));
583 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
584 ENABLE_INTERNAL_LOOPBACK ||
585 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
586 ENABLE_EXTERNAL_LOOPBACK) {
587 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
588 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
589 (new_config[0] & INTERNAL_LOOPBACK_MASK));
590 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
592 ha->notify_dcbx_comp = wait;
593 ha->notify_lb_portup_comp = wait2;
595 ret = qla81xx_set_port_config(vha, new_config);
596 if (ret != QLA_SUCCESS) {
597 ql_log(ql_log_warn, vha, 0x7025,
598 "Set port config failed.\n");
599 ha->notify_dcbx_comp = 0;
600 ha->notify_lb_portup_comp = 0;
601 rval = -EINVAL;
602 goto done_reset_internal;
605 /* Wait for DCBX complete event */
606 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
607 (DCBX_COMP_TIMEOUT * HZ))) {
608 ql_dbg(ql_dbg_user, vha, 0x7026,
609 "DCBX completion not received.\n");
610 ha->notify_dcbx_comp = 0;
611 ha->notify_lb_portup_comp = 0;
612 rval = -EINVAL;
613 goto done_reset_internal;
614 } else
615 ql_dbg(ql_dbg_user, vha, 0x7027,
616 "DCBX completion received.\n");
618 if (wait2 &&
619 !wait_for_completion_timeout(&ha->lb_portup_comp,
620 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
621 ql_dbg(ql_dbg_user, vha, 0x70c5,
622 "Port up completion not received.\n");
623 ha->notify_lb_portup_comp = 0;
624 rval = -EINVAL;
625 goto done_reset_internal;
626 } else
627 ql_dbg(ql_dbg_user, vha, 0x70c6,
628 "Port up completion received.\n");
630 ha->notify_dcbx_comp = 0;
631 ha->notify_lb_portup_comp = 0;
633 done_reset_internal:
634 return rval;
638 * Set the port configuration to enable the internal or external loopback
639 * depending on the loopback mode.
641 static inline int
642 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
643 uint16_t *new_config, uint16_t mode)
645 int ret = 0;
646 int rval = 0;
647 unsigned long rem_tmo = 0, current_tmo = 0;
648 struct qla_hw_data *ha = vha->hw;
650 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
651 goto done_set_internal;
653 if (mode == INTERNAL_LOOPBACK)
654 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
655 else if (mode == EXTERNAL_LOOPBACK)
656 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
657 ql_dbg(ql_dbg_user, vha, 0x70be,
658 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
660 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
662 ha->notify_dcbx_comp = 1;
663 ret = qla81xx_set_port_config(vha, new_config);
664 if (ret != QLA_SUCCESS) {
665 ql_log(ql_log_warn, vha, 0x7021,
666 "set port config failed.\n");
667 ha->notify_dcbx_comp = 0;
668 rval = -EINVAL;
669 goto done_set_internal;
672 /* Wait for DCBX complete event */
673 current_tmo = DCBX_COMP_TIMEOUT * HZ;
674 while (1) {
675 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
676 current_tmo);
677 if (!ha->idc_extend_tmo || rem_tmo) {
678 ha->idc_extend_tmo = 0;
679 break;
681 current_tmo = ha->idc_extend_tmo * HZ;
682 ha->idc_extend_tmo = 0;
685 if (!rem_tmo) {
686 ql_dbg(ql_dbg_user, vha, 0x7022,
687 "DCBX completion not received.\n");
688 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
690 * If the reset of the loopback mode doesn't work take a FCoE
691 * dump and reset the chip.
693 if (ret) {
694 ha->isp_ops->fw_dump(vha, 0);
695 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
697 rval = -EINVAL;
698 } else {
699 if (ha->flags.idc_compl_status) {
700 ql_dbg(ql_dbg_user, vha, 0x70c3,
701 "Bad status in IDC Completion AEN\n");
702 rval = -EINVAL;
703 ha->flags.idc_compl_status = 0;
704 } else
705 ql_dbg(ql_dbg_user, vha, 0x7023,
706 "DCBX completion received.\n");
709 ha->notify_dcbx_comp = 0;
710 ha->idc_extend_tmo = 0;
712 done_set_internal:
713 return rval;
716 static int
717 qla2x00_process_loopback(struct bsg_job *bsg_job)
719 struct fc_bsg_request *bsg_request = bsg_job->request;
720 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
721 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
722 scsi_qla_host_t *vha = shost_priv(host);
723 struct qla_hw_data *ha = vha->hw;
724 int rval;
725 uint8_t command_sent;
726 char *type;
727 struct msg_echo_lb elreq;
728 uint16_t response[MAILBOX_REGISTER_COUNT];
729 uint16_t config[4], new_config[4];
730 uint8_t *fw_sts_ptr;
731 void *req_data = NULL;
732 dma_addr_t req_data_dma;
733 uint32_t req_data_len;
734 uint8_t *rsp_data = NULL;
735 dma_addr_t rsp_data_dma;
736 uint32_t rsp_data_len;
738 if (!vha->flags.online) {
739 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
740 return -EIO;
743 memset(&elreq, 0, sizeof(elreq));
745 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
746 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
747 DMA_TO_DEVICE);
749 if (!elreq.req_sg_cnt) {
750 ql_log(ql_log_warn, vha, 0x701a,
751 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
752 return -ENOMEM;
755 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
756 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
757 DMA_FROM_DEVICE);
759 if (!elreq.rsp_sg_cnt) {
760 ql_log(ql_log_warn, vha, 0x701b,
761 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
762 rval = -ENOMEM;
763 goto done_unmap_req_sg;
766 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
767 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
768 ql_log(ql_log_warn, vha, 0x701c,
769 "dma mapping resulted in different sg counts, "
770 "request_sg_cnt: %x dma_request_sg_cnt: %x "
771 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
772 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
773 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
774 rval = -EAGAIN;
775 goto done_unmap_sg;
777 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
778 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
779 &req_data_dma, GFP_KERNEL);
780 if (!req_data) {
781 ql_log(ql_log_warn, vha, 0x701d,
782 "dma alloc failed for req_data.\n");
783 rval = -ENOMEM;
784 goto done_unmap_sg;
787 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
788 &rsp_data_dma, GFP_KERNEL);
789 if (!rsp_data) {
790 ql_log(ql_log_warn, vha, 0x7004,
791 "dma alloc failed for rsp_data.\n");
792 rval = -ENOMEM;
793 goto done_free_dma_req;
796 /* Copy the request buffer in req_data now */
797 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
798 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
800 elreq.send_dma = req_data_dma;
801 elreq.rcv_dma = rsp_data_dma;
802 elreq.transfer_size = req_data_len;
804 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
805 elreq.iteration_count =
806 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
808 if (atomic_read(&vha->loop_state) == LOOP_READY &&
809 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
810 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
811 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
812 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
813 elreq.options == EXTERNAL_LOOPBACK))) {
814 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
815 ql_dbg(ql_dbg_user, vha, 0x701e,
816 "BSG request type: %s.\n", type);
817 command_sent = INT_DEF_LB_ECHO_CMD;
818 rval = qla2x00_echo_test(vha, &elreq, response);
819 } else {
820 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
821 memset(config, 0, sizeof(config));
822 memset(new_config, 0, sizeof(new_config));
824 if (qla81xx_get_port_config(vha, config)) {
825 ql_log(ql_log_warn, vha, 0x701f,
826 "Get port config failed.\n");
827 rval = -EPERM;
828 goto done_free_dma_rsp;
831 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
832 ql_dbg(ql_dbg_user, vha, 0x70c4,
833 "Loopback operation already in "
834 "progress.\n");
835 rval = -EAGAIN;
836 goto done_free_dma_rsp;
839 ql_dbg(ql_dbg_user, vha, 0x70c0,
840 "elreq.options=%04x\n", elreq.options);
842 if (elreq.options == EXTERNAL_LOOPBACK)
843 if (IS_QLA8031(ha) || IS_QLA8044(ha))
844 rval = qla81xx_set_loopback_mode(vha,
845 config, new_config, elreq.options);
846 else
847 rval = qla81xx_reset_loopback_mode(vha,
848 config, 1, 0);
849 else
850 rval = qla81xx_set_loopback_mode(vha, config,
851 new_config, elreq.options);
853 if (rval) {
854 rval = -EPERM;
855 goto done_free_dma_rsp;
858 type = "FC_BSG_HST_VENDOR_LOOPBACK";
859 ql_dbg(ql_dbg_user, vha, 0x7028,
860 "BSG request type: %s.\n", type);
862 command_sent = INT_DEF_LB_LOOPBACK_CMD;
863 rval = qla2x00_loopback_test(vha, &elreq, response);
865 if (response[0] == MBS_COMMAND_ERROR &&
866 response[1] == MBS_LB_RESET) {
867 ql_log(ql_log_warn, vha, 0x7029,
868 "MBX command error, Aborting ISP.\n");
869 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
870 qla2xxx_wake_dpc(vha);
871 qla2x00_wait_for_chip_reset(vha);
872 /* Also reset the MPI */
873 if (IS_QLA81XX(ha)) {
874 if (qla81xx_restart_mpi_firmware(vha) !=
875 QLA_SUCCESS) {
876 ql_log(ql_log_warn, vha, 0x702a,
877 "MPI reset failed.\n");
881 rval = -EIO;
882 goto done_free_dma_rsp;
885 if (new_config[0]) {
886 int ret;
888 /* Revert back to original port config
889 * Also clear internal loopback
891 ret = qla81xx_reset_loopback_mode(vha,
892 new_config, 0, 1);
893 if (ret) {
895 * If the reset of the loopback mode
896 * doesn't work take FCoE dump and then
897 * reset the chip.
899 ha->isp_ops->fw_dump(vha, 0);
900 set_bit(ISP_ABORT_NEEDED,
901 &vha->dpc_flags);
906 } else {
907 type = "FC_BSG_HST_VENDOR_LOOPBACK";
908 ql_dbg(ql_dbg_user, vha, 0x702b,
909 "BSG request type: %s.\n", type);
910 command_sent = INT_DEF_LB_LOOPBACK_CMD;
911 rval = qla2x00_loopback_test(vha, &elreq, response);
915 if (rval) {
916 ql_log(ql_log_warn, vha, 0x702c,
917 "Vendor request %s failed.\n", type);
919 rval = 0;
920 bsg_reply->result = (DID_ERROR << 16);
921 bsg_reply->reply_payload_rcv_len = 0;
922 } else {
923 ql_dbg(ql_dbg_user, vha, 0x702d,
924 "Vendor request %s completed.\n", type);
925 bsg_reply->result = (DID_OK << 16);
926 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
927 bsg_job->reply_payload.sg_cnt, rsp_data,
928 rsp_data_len);
931 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
932 sizeof(response) + sizeof(uint8_t);
933 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
934 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
935 sizeof(response));
936 fw_sts_ptr += sizeof(response);
937 *fw_sts_ptr = command_sent;
939 done_free_dma_rsp:
940 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
941 rsp_data, rsp_data_dma);
942 done_free_dma_req:
943 dma_free_coherent(&ha->pdev->dev, req_data_len,
944 req_data, req_data_dma);
945 done_unmap_sg:
946 dma_unmap_sg(&ha->pdev->dev,
947 bsg_job->reply_payload.sg_list,
948 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
949 done_unmap_req_sg:
950 dma_unmap_sg(&ha->pdev->dev,
951 bsg_job->request_payload.sg_list,
952 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
953 if (!rval)
954 bsg_job_done(bsg_job, bsg_reply->result,
955 bsg_reply->reply_payload_rcv_len);
956 return rval;
959 static int
960 qla84xx_reset(struct bsg_job *bsg_job)
962 struct fc_bsg_request *bsg_request = bsg_job->request;
963 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
964 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
965 scsi_qla_host_t *vha = shost_priv(host);
966 struct qla_hw_data *ha = vha->hw;
967 int rval = 0;
968 uint32_t flag;
970 if (!IS_QLA84XX(ha)) {
971 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
972 return -EINVAL;
975 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
977 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
979 if (rval) {
980 ql_log(ql_log_warn, vha, 0x7030,
981 "Vendor request 84xx reset failed.\n");
982 rval = (DID_ERROR << 16);
984 } else {
985 ql_dbg(ql_dbg_user, vha, 0x7031,
986 "Vendor request 84xx reset completed.\n");
987 bsg_reply->result = DID_OK;
988 bsg_job_done(bsg_job, bsg_reply->result,
989 bsg_reply->reply_payload_rcv_len);
992 return rval;
995 static int
996 qla84xx_updatefw(struct bsg_job *bsg_job)
998 struct fc_bsg_request *bsg_request = bsg_job->request;
999 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1000 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1001 scsi_qla_host_t *vha = shost_priv(host);
1002 struct qla_hw_data *ha = vha->hw;
1003 struct verify_chip_entry_84xx *mn = NULL;
1004 dma_addr_t mn_dma, fw_dma;
1005 void *fw_buf = NULL;
1006 int rval = 0;
1007 uint32_t sg_cnt;
1008 uint32_t data_len;
1009 uint16_t options;
1010 uint32_t flag;
1011 uint32_t fw_ver;
1013 if (!IS_QLA84XX(ha)) {
1014 ql_dbg(ql_dbg_user, vha, 0x7032,
1015 "Not 84xx, exiting.\n");
1016 return -EINVAL;
1019 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1020 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1021 if (!sg_cnt) {
1022 ql_log(ql_log_warn, vha, 0x7033,
1023 "dma_map_sg returned %d for request.\n", sg_cnt);
1024 return -ENOMEM;
1027 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1028 ql_log(ql_log_warn, vha, 0x7034,
1029 "DMA mapping resulted in different sg counts, "
1030 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1031 bsg_job->request_payload.sg_cnt, sg_cnt);
1032 rval = -EAGAIN;
1033 goto done_unmap_sg;
1036 data_len = bsg_job->request_payload.payload_len;
1037 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1038 &fw_dma, GFP_KERNEL);
1039 if (!fw_buf) {
1040 ql_log(ql_log_warn, vha, 0x7035,
1041 "DMA alloc failed for fw_buf.\n");
1042 rval = -ENOMEM;
1043 goto done_unmap_sg;
1046 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1047 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1049 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1050 if (!mn) {
1051 ql_log(ql_log_warn, vha, 0x7036,
1052 "DMA alloc failed for fw buffer.\n");
1053 rval = -ENOMEM;
1054 goto done_free_fw_buf;
1057 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1058 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1060 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1061 mn->entry_count = 1;
1063 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1064 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1065 options |= VCO_DIAG_FW;
1067 mn->options = cpu_to_le16(options);
1068 mn->fw_ver = cpu_to_le32(fw_ver);
1069 mn->fw_size = cpu_to_le32(data_len);
1070 mn->fw_seq_size = cpu_to_le32(data_len);
1071 put_unaligned_le64(fw_dma, &mn->dsd.address);
1072 mn->dsd.length = cpu_to_le32(data_len);
1073 mn->data_seg_cnt = cpu_to_le16(1);
1075 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1077 if (rval) {
1078 ql_log(ql_log_warn, vha, 0x7037,
1079 "Vendor request 84xx updatefw failed.\n");
1081 rval = (DID_ERROR << 16);
1082 } else {
1083 ql_dbg(ql_dbg_user, vha, 0x7038,
1084 "Vendor request 84xx updatefw completed.\n");
1086 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1087 bsg_reply->result = DID_OK;
1090 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1092 done_free_fw_buf:
1093 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1095 done_unmap_sg:
1096 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1097 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1099 if (!rval)
1100 bsg_job_done(bsg_job, bsg_reply->result,
1101 bsg_reply->reply_payload_rcv_len);
1102 return rval;
1105 static int
1106 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1108 struct fc_bsg_request *bsg_request = bsg_job->request;
1109 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1110 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1111 scsi_qla_host_t *vha = shost_priv(host);
1112 struct qla_hw_data *ha = vha->hw;
1113 struct access_chip_84xx *mn = NULL;
1114 dma_addr_t mn_dma, mgmt_dma;
1115 void *mgmt_b = NULL;
1116 int rval = 0;
1117 struct qla_bsg_a84_mgmt *ql84_mgmt;
1118 uint32_t sg_cnt;
1119 uint32_t data_len = 0;
1120 uint32_t dma_direction = DMA_NONE;
1122 if (!IS_QLA84XX(ha)) {
1123 ql_log(ql_log_warn, vha, 0x703a,
1124 "Not 84xx, exiting.\n");
1125 return -EINVAL;
1128 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1129 if (!mn) {
1130 ql_log(ql_log_warn, vha, 0x703c,
1131 "DMA alloc failed for fw buffer.\n");
1132 return -ENOMEM;
1135 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1136 mn->entry_count = 1;
1137 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1138 switch (ql84_mgmt->mgmt.cmd) {
1139 case QLA84_MGMT_READ_MEM:
1140 case QLA84_MGMT_GET_INFO:
1141 sg_cnt = dma_map_sg(&ha->pdev->dev,
1142 bsg_job->reply_payload.sg_list,
1143 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1144 if (!sg_cnt) {
1145 ql_log(ql_log_warn, vha, 0x703d,
1146 "dma_map_sg returned %d for reply.\n", sg_cnt);
1147 rval = -ENOMEM;
1148 goto exit_mgmt;
1151 dma_direction = DMA_FROM_DEVICE;
1153 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1154 ql_log(ql_log_warn, vha, 0x703e,
1155 "DMA mapping resulted in different sg counts, "
1156 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1157 bsg_job->reply_payload.sg_cnt, sg_cnt);
1158 rval = -EAGAIN;
1159 goto done_unmap_sg;
1162 data_len = bsg_job->reply_payload.payload_len;
1164 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1165 &mgmt_dma, GFP_KERNEL);
1166 if (!mgmt_b) {
1167 ql_log(ql_log_warn, vha, 0x703f,
1168 "DMA alloc failed for mgmt_b.\n");
1169 rval = -ENOMEM;
1170 goto done_unmap_sg;
1173 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1174 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1175 mn->parameter1 =
1176 cpu_to_le32(
1177 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1179 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1180 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1181 mn->parameter1 =
1182 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1184 mn->parameter2 =
1185 cpu_to_le32(
1186 ql84_mgmt->mgmt.mgmtp.u.info.context);
1188 break;
1190 case QLA84_MGMT_WRITE_MEM:
1191 sg_cnt = dma_map_sg(&ha->pdev->dev,
1192 bsg_job->request_payload.sg_list,
1193 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1195 if (!sg_cnt) {
1196 ql_log(ql_log_warn, vha, 0x7040,
1197 "dma_map_sg returned %d.\n", sg_cnt);
1198 rval = -ENOMEM;
1199 goto exit_mgmt;
1202 dma_direction = DMA_TO_DEVICE;
1204 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1205 ql_log(ql_log_warn, vha, 0x7041,
1206 "DMA mapping resulted in different sg counts, "
1207 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1208 bsg_job->request_payload.sg_cnt, sg_cnt);
1209 rval = -EAGAIN;
1210 goto done_unmap_sg;
1213 data_len = bsg_job->request_payload.payload_len;
1214 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1215 &mgmt_dma, GFP_KERNEL);
1216 if (!mgmt_b) {
1217 ql_log(ql_log_warn, vha, 0x7042,
1218 "DMA alloc failed for mgmt_b.\n");
1219 rval = -ENOMEM;
1220 goto done_unmap_sg;
1223 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1224 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1226 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1227 mn->parameter1 =
1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1229 break;
1231 case QLA84_MGMT_CHNG_CONFIG:
1232 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1233 mn->parameter1 =
1234 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1236 mn->parameter2 =
1237 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1239 mn->parameter3 =
1240 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1241 break;
1243 default:
1244 rval = -EIO;
1245 goto exit_mgmt;
1248 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1249 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1250 mn->dseg_count = cpu_to_le16(1);
1251 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1252 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1255 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1257 if (rval) {
1258 ql_log(ql_log_warn, vha, 0x7043,
1259 "Vendor request 84xx mgmt failed.\n");
1261 rval = (DID_ERROR << 16);
1263 } else {
1264 ql_dbg(ql_dbg_user, vha, 0x7044,
1265 "Vendor request 84xx mgmt completed.\n");
1267 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1268 bsg_reply->result = DID_OK;
1270 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1271 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1272 bsg_reply->reply_payload_rcv_len =
1273 bsg_job->reply_payload.payload_len;
1275 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1276 bsg_job->reply_payload.sg_cnt, mgmt_b,
1277 data_len);
1281 done_unmap_sg:
1282 if (mgmt_b)
1283 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1285 if (dma_direction == DMA_TO_DEVICE)
1286 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1287 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1288 else if (dma_direction == DMA_FROM_DEVICE)
1289 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1290 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1292 exit_mgmt:
1293 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1295 if (!rval)
1296 bsg_job_done(bsg_job, bsg_reply->result,
1297 bsg_reply->reply_payload_rcv_len);
1298 return rval;
1301 static int
1302 qla24xx_iidma(struct bsg_job *bsg_job)
1304 struct fc_bsg_request *bsg_request = bsg_job->request;
1305 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1306 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1307 scsi_qla_host_t *vha = shost_priv(host);
1308 int rval = 0;
1309 struct qla_port_param *port_param = NULL;
1310 fc_port_t *fcport = NULL;
1311 int found = 0;
1312 uint16_t mb[MAILBOX_REGISTER_COUNT];
1313 uint8_t *rsp_ptr = NULL;
1315 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1316 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1317 return -EINVAL;
1320 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1321 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1322 ql_log(ql_log_warn, vha, 0x7048,
1323 "Invalid destination type.\n");
1324 return -EINVAL;
1327 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1328 if (fcport->port_type != FCT_TARGET)
1329 continue;
1331 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1332 fcport->port_name, sizeof(fcport->port_name)))
1333 continue;
1335 found = 1;
1336 break;
1339 if (!found) {
1340 ql_log(ql_log_warn, vha, 0x7049,
1341 "Failed to find port.\n");
1342 return -EINVAL;
1345 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1346 ql_log(ql_log_warn, vha, 0x704a,
1347 "Port is not online.\n");
1348 return -EINVAL;
1351 if (fcport->flags & FCF_LOGIN_NEEDED) {
1352 ql_log(ql_log_warn, vha, 0x704b,
1353 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1354 return -EINVAL;
1357 if (port_param->mode)
1358 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1359 port_param->speed, mb);
1360 else
1361 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1362 &port_param->speed, mb);
1364 if (rval) {
1365 ql_log(ql_log_warn, vha, 0x704c,
1366 "iiDMA cmd failed for %8phN -- "
1367 "%04x %x %04x %04x.\n", fcport->port_name,
1368 rval, fcport->fp_speed, mb[0], mb[1]);
1369 rval = (DID_ERROR << 16);
1370 } else {
1371 if (!port_param->mode) {
1372 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1373 sizeof(struct qla_port_param);
1375 rsp_ptr = ((uint8_t *)bsg_reply) +
1376 sizeof(struct fc_bsg_reply);
1378 memcpy(rsp_ptr, port_param,
1379 sizeof(struct qla_port_param));
1382 bsg_reply->result = DID_OK;
1383 bsg_job_done(bsg_job, bsg_reply->result,
1384 bsg_reply->reply_payload_rcv_len);
1387 return rval;
1390 static int
1391 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1392 uint8_t is_update)
1394 struct fc_bsg_request *bsg_request = bsg_job->request;
1395 uint32_t start = 0;
1396 int valid = 0;
1397 struct qla_hw_data *ha = vha->hw;
1399 if (unlikely(pci_channel_offline(ha->pdev)))
1400 return -EINVAL;
1402 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1403 if (start > ha->optrom_size) {
1404 ql_log(ql_log_warn, vha, 0x7055,
1405 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1406 return -EINVAL;
1409 if (ha->optrom_state != QLA_SWAITING) {
1410 ql_log(ql_log_info, vha, 0x7056,
1411 "optrom_state %d.\n", ha->optrom_state);
1412 return -EBUSY;
1415 ha->optrom_region_start = start;
1416 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1417 if (is_update) {
1418 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1419 valid = 1;
1420 else if (start == (ha->flt_region_boot * 4) ||
1421 start == (ha->flt_region_fw * 4))
1422 valid = 1;
1423 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1424 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1425 IS_QLA28XX(ha))
1426 valid = 1;
1427 if (!valid) {
1428 ql_log(ql_log_warn, vha, 0x7058,
1429 "Invalid start region 0x%x/0x%x.\n", start,
1430 bsg_job->request_payload.payload_len);
1431 return -EINVAL;
1434 ha->optrom_region_size = start +
1435 bsg_job->request_payload.payload_len > ha->optrom_size ?
1436 ha->optrom_size - start :
1437 bsg_job->request_payload.payload_len;
1438 ha->optrom_state = QLA_SWRITING;
1439 } else {
1440 ha->optrom_region_size = start +
1441 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1442 ha->optrom_size - start :
1443 bsg_job->reply_payload.payload_len;
1444 ha->optrom_state = QLA_SREADING;
1447 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1448 if (!ha->optrom_buffer) {
1449 ql_log(ql_log_warn, vha, 0x7059,
1450 "Read: Unable to allocate memory for optrom retrieval "
1451 "(%x)\n", ha->optrom_region_size);
1453 ha->optrom_state = QLA_SWAITING;
1454 return -ENOMEM;
1457 return 0;
1460 static int
1461 qla2x00_read_optrom(struct bsg_job *bsg_job)
1463 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1464 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1465 scsi_qla_host_t *vha = shost_priv(host);
1466 struct qla_hw_data *ha = vha->hw;
1467 int rval = 0;
1469 if (ha->flags.nic_core_reset_hdlr_active)
1470 return -EBUSY;
1472 mutex_lock(&ha->optrom_mutex);
1473 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1474 if (rval) {
1475 mutex_unlock(&ha->optrom_mutex);
1476 return rval;
1479 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1480 ha->optrom_region_start, ha->optrom_region_size);
1482 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1483 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1484 ha->optrom_region_size);
1486 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1487 bsg_reply->result = DID_OK;
1488 vfree(ha->optrom_buffer);
1489 ha->optrom_buffer = NULL;
1490 ha->optrom_state = QLA_SWAITING;
1491 mutex_unlock(&ha->optrom_mutex);
1492 bsg_job_done(bsg_job, bsg_reply->result,
1493 bsg_reply->reply_payload_rcv_len);
1494 return rval;
1497 static int
1498 qla2x00_update_optrom(struct bsg_job *bsg_job)
1500 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1501 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1502 scsi_qla_host_t *vha = shost_priv(host);
1503 struct qla_hw_data *ha = vha->hw;
1504 int rval = 0;
1506 mutex_lock(&ha->optrom_mutex);
1507 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1508 if (rval) {
1509 mutex_unlock(&ha->optrom_mutex);
1510 return rval;
1513 /* Set the isp82xx_no_md_cap not to capture minidump */
1514 ha->flags.isp82xx_no_md_cap = 1;
1516 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1517 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1518 ha->optrom_region_size);
1520 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1521 ha->optrom_region_start, ha->optrom_region_size);
1523 if (rval) {
1524 bsg_reply->result = -EINVAL;
1525 rval = -EINVAL;
1526 } else {
1527 bsg_reply->result = DID_OK;
1529 vfree(ha->optrom_buffer);
1530 ha->optrom_buffer = NULL;
1531 ha->optrom_state = QLA_SWAITING;
1532 mutex_unlock(&ha->optrom_mutex);
1533 bsg_job_done(bsg_job, bsg_reply->result,
1534 bsg_reply->reply_payload_rcv_len);
1535 return rval;
1538 static int
1539 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1541 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1542 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1543 scsi_qla_host_t *vha = shost_priv(host);
1544 struct qla_hw_data *ha = vha->hw;
1545 int rval = 0;
1546 uint8_t bsg[DMA_POOL_SIZE];
1547 struct qla_image_version_list *list = (void *)bsg;
1548 struct qla_image_version *image;
1549 uint32_t count;
1550 dma_addr_t sfp_dma;
1551 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1553 if (!sfp) {
1554 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1555 EXT_STATUS_NO_MEMORY;
1556 goto done;
1559 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1560 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1562 image = list->version;
1563 count = list->count;
1564 while (count--) {
1565 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1566 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1567 image->field_address.device, image->field_address.offset,
1568 sizeof(image->field_info), image->field_address.option);
1569 if (rval) {
1570 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1571 EXT_STATUS_MAILBOX;
1572 goto dealloc;
1574 image++;
1577 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1579 dealloc:
1580 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1582 done:
1583 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1584 bsg_reply->result = DID_OK << 16;
1585 bsg_job_done(bsg_job, bsg_reply->result,
1586 bsg_reply->reply_payload_rcv_len);
1588 return 0;
1591 static int
1592 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1594 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1595 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1596 scsi_qla_host_t *vha = shost_priv(host);
1597 struct qla_hw_data *ha = vha->hw;
1598 int rval = 0;
1599 uint8_t bsg[DMA_POOL_SIZE];
1600 struct qla_status_reg *sr = (void *)bsg;
1601 dma_addr_t sfp_dma;
1602 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1604 if (!sfp) {
1605 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1606 EXT_STATUS_NO_MEMORY;
1607 goto done;
1610 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1611 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1613 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1614 sr->field_address.device, sr->field_address.offset,
1615 sizeof(sr->status_reg), sr->field_address.option);
1616 sr->status_reg = *sfp;
1618 if (rval) {
1619 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1620 EXT_STATUS_MAILBOX;
1621 goto dealloc;
1624 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1625 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1627 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1629 dealloc:
1630 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1632 done:
1633 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1634 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1635 bsg_reply->result = DID_OK << 16;
1636 bsg_job_done(bsg_job, bsg_reply->result,
1637 bsg_reply->reply_payload_rcv_len);
1639 return 0;
1642 static int
1643 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1645 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1646 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1647 scsi_qla_host_t *vha = shost_priv(host);
1648 struct qla_hw_data *ha = vha->hw;
1649 int rval = 0;
1650 uint8_t bsg[DMA_POOL_SIZE];
1651 struct qla_status_reg *sr = (void *)bsg;
1652 dma_addr_t sfp_dma;
1653 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1655 if (!sfp) {
1656 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1657 EXT_STATUS_NO_MEMORY;
1658 goto done;
1661 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1662 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1664 *sfp = sr->status_reg;
1665 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1666 sr->field_address.device, sr->field_address.offset,
1667 sizeof(sr->status_reg), sr->field_address.option);
1669 if (rval) {
1670 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1671 EXT_STATUS_MAILBOX;
1672 goto dealloc;
1675 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1677 dealloc:
1678 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1680 done:
1681 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1682 bsg_reply->result = DID_OK << 16;
1683 bsg_job_done(bsg_job, bsg_reply->result,
1684 bsg_reply->reply_payload_rcv_len);
1686 return 0;
1689 static int
1690 qla2x00_write_i2c(struct bsg_job *bsg_job)
1692 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1693 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1694 scsi_qla_host_t *vha = shost_priv(host);
1695 struct qla_hw_data *ha = vha->hw;
1696 int rval = 0;
1697 uint8_t bsg[DMA_POOL_SIZE];
1698 struct qla_i2c_access *i2c = (void *)bsg;
1699 dma_addr_t sfp_dma;
1700 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1702 if (!sfp) {
1703 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1704 EXT_STATUS_NO_MEMORY;
1705 goto done;
1708 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1709 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1711 memcpy(sfp, i2c->buffer, i2c->length);
1712 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1713 i2c->device, i2c->offset, i2c->length, i2c->option);
1715 if (rval) {
1716 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1717 EXT_STATUS_MAILBOX;
1718 goto dealloc;
1721 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1723 dealloc:
1724 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1726 done:
1727 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1728 bsg_reply->result = DID_OK << 16;
1729 bsg_job_done(bsg_job, bsg_reply->result,
1730 bsg_reply->reply_payload_rcv_len);
1732 return 0;
1735 static int
1736 qla2x00_read_i2c(struct bsg_job *bsg_job)
1738 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1739 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1740 scsi_qla_host_t *vha = shost_priv(host);
1741 struct qla_hw_data *ha = vha->hw;
1742 int rval = 0;
1743 uint8_t bsg[DMA_POOL_SIZE];
1744 struct qla_i2c_access *i2c = (void *)bsg;
1745 dma_addr_t sfp_dma;
1746 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1748 if (!sfp) {
1749 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1750 EXT_STATUS_NO_MEMORY;
1751 goto done;
1754 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1755 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1757 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1758 i2c->device, i2c->offset, i2c->length, i2c->option);
1760 if (rval) {
1761 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1762 EXT_STATUS_MAILBOX;
1763 goto dealloc;
1766 memcpy(i2c->buffer, sfp, i2c->length);
1767 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1768 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1770 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1772 dealloc:
1773 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1775 done:
1776 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1777 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1778 bsg_reply->result = DID_OK << 16;
1779 bsg_job_done(bsg_job, bsg_reply->result,
1780 bsg_reply->reply_payload_rcv_len);
1782 return 0;
1785 static int
1786 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1788 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1789 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1790 scsi_qla_host_t *vha = shost_priv(host);
1791 struct qla_hw_data *ha = vha->hw;
1792 uint32_t rval = EXT_STATUS_OK;
1793 uint16_t req_sg_cnt = 0;
1794 uint16_t rsp_sg_cnt = 0;
1795 uint16_t nextlid = 0;
1796 uint32_t tot_dsds;
1797 srb_t *sp = NULL;
1798 uint32_t req_data_len;
1799 uint32_t rsp_data_len;
1801 /* Check the type of the adapter */
1802 if (!IS_BIDI_CAPABLE(ha)) {
1803 ql_log(ql_log_warn, vha, 0x70a0,
1804 "This adapter is not supported\n");
1805 rval = EXT_STATUS_NOT_SUPPORTED;
1806 goto done;
1809 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1810 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1811 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1812 rval = EXT_STATUS_BUSY;
1813 goto done;
1816 /* Check if host is online */
1817 if (!vha->flags.online) {
1818 ql_log(ql_log_warn, vha, 0x70a1,
1819 "Host is not online\n");
1820 rval = EXT_STATUS_DEVICE_OFFLINE;
1821 goto done;
1824 /* Check if cable is plugged in or not */
1825 if (vha->device_flags & DFLG_NO_CABLE) {
1826 ql_log(ql_log_warn, vha, 0x70a2,
1827 "Cable is unplugged...\n");
1828 rval = EXT_STATUS_INVALID_CFG;
1829 goto done;
1832 /* Check if the switch is connected or not */
1833 if (ha->current_topology != ISP_CFG_F) {
1834 ql_log(ql_log_warn, vha, 0x70a3,
1835 "Host is not connected to the switch\n");
1836 rval = EXT_STATUS_INVALID_CFG;
1837 goto done;
1840 /* Check if operating mode is P2P */
1841 if (ha->operating_mode != P2P) {
1842 ql_log(ql_log_warn, vha, 0x70a4,
1843 "Host operating mode is not P2p\n");
1844 rval = EXT_STATUS_INVALID_CFG;
1845 goto done;
1848 mutex_lock(&ha->selflogin_lock);
1849 if (vha->self_login_loop_id == 0) {
1850 /* Initialize all required fields of fcport */
1851 vha->bidir_fcport.vha = vha;
1852 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1853 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1854 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1855 vha->bidir_fcport.loop_id = vha->loop_id;
1857 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1858 ql_log(ql_log_warn, vha, 0x70a7,
1859 "Failed to login port %06X for bidirectional IOCB\n",
1860 vha->bidir_fcport.d_id.b24);
1861 mutex_unlock(&ha->selflogin_lock);
1862 rval = EXT_STATUS_MAILBOX;
1863 goto done;
1865 vha->self_login_loop_id = nextlid - 1;
1868 /* Assign the self login loop id to fcport */
1869 mutex_unlock(&ha->selflogin_lock);
1871 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1873 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1874 bsg_job->request_payload.sg_list,
1875 bsg_job->request_payload.sg_cnt,
1876 DMA_TO_DEVICE);
1878 if (!req_sg_cnt) {
1879 rval = EXT_STATUS_NO_MEMORY;
1880 goto done;
1883 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1884 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1885 DMA_FROM_DEVICE);
1887 if (!rsp_sg_cnt) {
1888 rval = EXT_STATUS_NO_MEMORY;
1889 goto done_unmap_req_sg;
1892 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1893 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1894 ql_dbg(ql_dbg_user, vha, 0x70a9,
1895 "Dma mapping resulted in different sg counts "
1896 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1897 "%x dma_reply_sg_cnt: %x]\n",
1898 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1899 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1900 rval = EXT_STATUS_NO_MEMORY;
1901 goto done_unmap_sg;
1904 req_data_len = bsg_job->request_payload.payload_len;
1905 rsp_data_len = bsg_job->reply_payload.payload_len;
1907 if (req_data_len != rsp_data_len) {
1908 rval = EXT_STATUS_BUSY;
1909 ql_log(ql_log_warn, vha, 0x70aa,
1910 "req_data_len != rsp_data_len\n");
1911 goto done_unmap_sg;
1914 /* Alloc SRB structure */
1915 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1916 if (!sp) {
1917 ql_dbg(ql_dbg_user, vha, 0x70ac,
1918 "Alloc SRB structure failed\n");
1919 rval = EXT_STATUS_NO_MEMORY;
1920 goto done_unmap_sg;
1923 /*Populate srb->ctx with bidir ctx*/
1924 sp->u.bsg_job = bsg_job;
1925 sp->free = qla2x00_bsg_sp_free;
1926 sp->type = SRB_BIDI_CMD;
1927 sp->done = qla2x00_bsg_job_done;
1929 /* Add the read and write sg count */
1930 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1932 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1933 if (rval != EXT_STATUS_OK)
1934 goto done_free_srb;
1935 /* the bsg request will be completed in the interrupt handler */
1936 return rval;
1938 done_free_srb:
1939 mempool_free(sp, ha->srb_mempool);
1940 done_unmap_sg:
1941 dma_unmap_sg(&ha->pdev->dev,
1942 bsg_job->reply_payload.sg_list,
1943 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1944 done_unmap_req_sg:
1945 dma_unmap_sg(&ha->pdev->dev,
1946 bsg_job->request_payload.sg_list,
1947 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1948 done:
1950 /* Return an error vendor specific response
1951 * and complete the bsg request
1953 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1954 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1955 bsg_reply->reply_payload_rcv_len = 0;
1956 bsg_reply->result = (DID_OK) << 16;
1957 bsg_job_done(bsg_job, bsg_reply->result,
1958 bsg_reply->reply_payload_rcv_len);
1959 /* Always return success, vendor rsp carries correct status */
1960 return 0;
1963 static int
1964 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1966 struct fc_bsg_request *bsg_request = bsg_job->request;
1967 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1968 scsi_qla_host_t *vha = shost_priv(host);
1969 struct qla_hw_data *ha = vha->hw;
1970 int rval = (DID_ERROR << 16);
1971 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1972 srb_t *sp;
1973 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1974 struct fc_port *fcport;
1975 char *type = "FC_BSG_HST_FX_MGMT";
1977 /* Copy the IOCB specific information */
1978 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1979 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1981 /* Dump the vendor information */
1982 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1983 piocb_rqst, sizeof(*piocb_rqst));
1985 if (!vha->flags.online) {
1986 ql_log(ql_log_warn, vha, 0x70d0,
1987 "Host is not online.\n");
1988 rval = -EIO;
1989 goto done;
1992 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1993 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1994 bsg_job->request_payload.sg_list,
1995 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1996 if (!req_sg_cnt) {
1997 ql_log(ql_log_warn, vha, 0x70c7,
1998 "dma_map_sg return %d for request\n", req_sg_cnt);
1999 rval = -ENOMEM;
2000 goto done;
2004 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2005 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2006 bsg_job->reply_payload.sg_list,
2007 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2008 if (!rsp_sg_cnt) {
2009 ql_log(ql_log_warn, vha, 0x70c8,
2010 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2011 rval = -ENOMEM;
2012 goto done_unmap_req_sg;
2016 ql_dbg(ql_dbg_user, vha, 0x70c9,
2017 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2018 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2019 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2021 /* Allocate a dummy fcport structure, since functions preparing the
2022 * IOCB and mailbox command retrieves port specific information
2023 * from fcport structure. For Host based ELS commands there will be
2024 * no fcport structure allocated
2026 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2027 if (!fcport) {
2028 ql_log(ql_log_warn, vha, 0x70ca,
2029 "Failed to allocate fcport.\n");
2030 rval = -ENOMEM;
2031 goto done_unmap_rsp_sg;
2034 /* Alloc SRB structure */
2035 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2036 if (!sp) {
2037 ql_log(ql_log_warn, vha, 0x70cb,
2038 "qla2x00_get_sp failed.\n");
2039 rval = -ENOMEM;
2040 goto done_free_fcport;
2043 /* Initialize all required fields of fcport */
2044 fcport->vha = vha;
2045 fcport->loop_id = piocb_rqst->dataword;
2047 sp->type = SRB_FXIOCB_BCMD;
2048 sp->name = "bsg_fx_mgmt";
2049 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2050 sp->u.bsg_job = bsg_job;
2051 sp->free = qla2x00_bsg_sp_free;
2052 sp->done = qla2x00_bsg_job_done;
2054 ql_dbg(ql_dbg_user, vha, 0x70cc,
2055 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2056 type, piocb_rqst->func_type, fcport->loop_id);
2058 rval = qla2x00_start_sp(sp);
2059 if (rval != QLA_SUCCESS) {
2060 ql_log(ql_log_warn, vha, 0x70cd,
2061 "qla2x00_start_sp failed=%d.\n", rval);
2062 mempool_free(sp, ha->srb_mempool);
2063 rval = -EIO;
2064 goto done_free_fcport;
2066 return rval;
2068 done_free_fcport:
2069 qla2x00_free_fcport(fcport);
2071 done_unmap_rsp_sg:
2072 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2073 dma_unmap_sg(&ha->pdev->dev,
2074 bsg_job->reply_payload.sg_list,
2075 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2076 done_unmap_req_sg:
2077 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2078 dma_unmap_sg(&ha->pdev->dev,
2079 bsg_job->request_payload.sg_list,
2080 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2082 done:
2083 return rval;
2086 static int
2087 qla26xx_serdes_op(struct bsg_job *bsg_job)
2089 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2090 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2091 scsi_qla_host_t *vha = shost_priv(host);
2092 int rval = 0;
2093 struct qla_serdes_reg sr;
2095 memset(&sr, 0, sizeof(sr));
2097 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2098 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2100 switch (sr.cmd) {
2101 case INT_SC_SERDES_WRITE_REG:
2102 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2103 bsg_reply->reply_payload_rcv_len = 0;
2104 break;
2105 case INT_SC_SERDES_READ_REG:
2106 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2107 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2108 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2109 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2110 break;
2111 default:
2112 ql_dbg(ql_dbg_user, vha, 0x708c,
2113 "Unknown serdes cmd %x.\n", sr.cmd);
2114 rval = -EINVAL;
2115 break;
2118 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2119 rval ? EXT_STATUS_MAILBOX : 0;
2121 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2122 bsg_reply->result = DID_OK << 16;
2123 bsg_job_done(bsg_job, bsg_reply->result,
2124 bsg_reply->reply_payload_rcv_len);
2125 return 0;
2128 static int
2129 qla8044_serdes_op(struct bsg_job *bsg_job)
2131 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2132 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2133 scsi_qla_host_t *vha = shost_priv(host);
2134 int rval = 0;
2135 struct qla_serdes_reg_ex sr;
2137 memset(&sr, 0, sizeof(sr));
2139 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2140 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2142 switch (sr.cmd) {
2143 case INT_SC_SERDES_WRITE_REG:
2144 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2145 bsg_reply->reply_payload_rcv_len = 0;
2146 break;
2147 case INT_SC_SERDES_READ_REG:
2148 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2149 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2150 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2151 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2152 break;
2153 default:
2154 ql_dbg(ql_dbg_user, vha, 0x7020,
2155 "Unknown serdes cmd %x.\n", sr.cmd);
2156 rval = -EINVAL;
2157 break;
2160 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2161 rval ? EXT_STATUS_MAILBOX : 0;
2163 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2164 bsg_reply->result = DID_OK << 16;
2165 bsg_job_done(bsg_job, bsg_reply->result,
2166 bsg_reply->reply_payload_rcv_len);
2167 return 0;
2170 static int
2171 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2173 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2174 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2175 scsi_qla_host_t *vha = shost_priv(host);
2176 struct qla_hw_data *ha = vha->hw;
2177 struct qla_flash_update_caps cap;
2179 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2180 return -EPERM;
2182 memset(&cap, 0, sizeof(cap));
2183 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2184 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2185 (uint64_t)ha->fw_attributes_h << 16 |
2186 (uint64_t)ha->fw_attributes;
2188 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2189 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2190 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2192 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2193 EXT_STATUS_OK;
2195 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2196 bsg_reply->result = DID_OK << 16;
2197 bsg_job_done(bsg_job, bsg_reply->result,
2198 bsg_reply->reply_payload_rcv_len);
2199 return 0;
2202 static int
2203 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2205 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2206 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2207 scsi_qla_host_t *vha = shost_priv(host);
2208 struct qla_hw_data *ha = vha->hw;
2209 uint64_t online_fw_attr = 0;
2210 struct qla_flash_update_caps cap;
2212 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2213 return -EPERM;
2215 memset(&cap, 0, sizeof(cap));
2216 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2217 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2219 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2220 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2221 (uint64_t)ha->fw_attributes_h << 16 |
2222 (uint64_t)ha->fw_attributes;
2224 if (online_fw_attr != cap.capabilities) {
2225 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2226 EXT_STATUS_INVALID_PARAM;
2227 return -EINVAL;
2230 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2231 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2232 EXT_STATUS_INVALID_PARAM;
2233 return -EINVAL;
2236 bsg_reply->reply_payload_rcv_len = 0;
2238 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2239 EXT_STATUS_OK;
2241 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2242 bsg_reply->result = DID_OK << 16;
2243 bsg_job_done(bsg_job, bsg_reply->result,
2244 bsg_reply->reply_payload_rcv_len);
2245 return 0;
2248 static int
2249 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2251 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2252 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2253 scsi_qla_host_t *vha = shost_priv(host);
2254 struct qla_hw_data *ha = vha->hw;
2255 struct qla_bbcr_data bbcr;
2256 uint16_t loop_id, topo, sw_cap;
2257 uint8_t domain, area, al_pa, state;
2258 int rval;
2260 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2261 return -EPERM;
2263 memset(&bbcr, 0, sizeof(bbcr));
2265 if (vha->flags.bbcr_enable)
2266 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2267 else
2268 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2270 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2271 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2272 &area, &domain, &topo, &sw_cap);
2273 if (rval != QLA_SUCCESS) {
2274 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2275 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2276 bbcr.mbx1 = loop_id;
2277 goto done;
2280 state = (vha->bbcr >> 12) & 0x1;
2282 if (state) {
2283 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2284 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2285 } else {
2286 bbcr.state = QLA_BBCR_STATE_ONLINE;
2287 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2290 bbcr.configured_bbscn = vha->bbcr & 0xf;
2293 done:
2294 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2295 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2296 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2298 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2300 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2301 bsg_reply->result = DID_OK << 16;
2302 bsg_job_done(bsg_job, bsg_reply->result,
2303 bsg_reply->reply_payload_rcv_len);
2304 return 0;
2307 static int
2308 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2310 struct fc_bsg_request *bsg_request = bsg_job->request;
2311 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2312 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2313 scsi_qla_host_t *vha = shost_priv(host);
2314 struct qla_hw_data *ha = vha->hw;
2315 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2316 struct link_statistics *stats = NULL;
2317 dma_addr_t stats_dma;
2318 int rval;
2319 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2320 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2322 if (test_bit(UNLOADING, &vha->dpc_flags))
2323 return -ENODEV;
2325 if (unlikely(pci_channel_offline(ha->pdev)))
2326 return -ENODEV;
2328 if (qla2x00_reset_active(vha))
2329 return -EBUSY;
2331 if (!IS_FWI2_CAPABLE(ha))
2332 return -EPERM;
2334 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2335 GFP_KERNEL);
2336 if (!stats) {
2337 ql_log(ql_log_warn, vha, 0x70e2,
2338 "Failed to allocate memory for stats.\n");
2339 return -ENOMEM;
2342 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2344 if (rval == QLA_SUCCESS) {
2345 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2346 stats, sizeof(*stats));
2347 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2348 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2351 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2352 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2353 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2355 bsg_job->reply_len = sizeof(*bsg_reply);
2356 bsg_reply->result = DID_OK << 16;
2357 bsg_job_done(bsg_job, bsg_reply->result,
2358 bsg_reply->reply_payload_rcv_len);
2360 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2361 stats, stats_dma);
2363 return 0;
2366 static int
2367 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2369 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2370 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2371 scsi_qla_host_t *vha = shost_priv(host);
2372 int rval;
2373 struct qla_dport_diag *dd;
2375 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2376 !IS_QLA28XX(vha->hw))
2377 return -EPERM;
2379 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2380 if (!dd) {
2381 ql_log(ql_log_warn, vha, 0x70db,
2382 "Failed to allocate memory for dport.\n");
2383 return -ENOMEM;
2386 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2387 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2389 rval = qla26xx_dport_diagnostics(
2390 vha, dd->buf, sizeof(dd->buf), dd->options);
2391 if (rval == QLA_SUCCESS) {
2392 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2393 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2396 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2397 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2398 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2400 bsg_job->reply_len = sizeof(*bsg_reply);
2401 bsg_reply->result = DID_OK << 16;
2402 bsg_job_done(bsg_job, bsg_reply->result,
2403 bsg_reply->reply_payload_rcv_len);
2405 kfree(dd);
2407 return 0;
2410 static int
2411 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2413 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2414 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2415 struct qla_hw_data *ha = vha->hw;
2416 struct qla_active_regions regions = { };
2417 struct active_regions active_regions = { };
2419 qla27xx_get_active_image(vha, &active_regions);
2420 regions.global_image = active_regions.global;
2422 if (IS_QLA28XX(ha)) {
2423 qla28xx_get_aux_images(vha, &active_regions);
2424 regions.board_config = active_regions.aux.board_config;
2425 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2426 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2427 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2430 ql_dbg(ql_dbg_user, vha, 0x70e1,
2431 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2432 __func__, vha->host_no, regions.global_image,
2433 regions.board_config, regions.vpd_nvram,
2434 regions.npiv_config_0_1, regions.npiv_config_2_3);
2436 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2437 bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
2439 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2440 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2441 bsg_reply->result = DID_OK << 16;
2442 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2443 bsg_job_done(bsg_job, bsg_reply->result,
2444 bsg_reply->reply_payload_rcv_len);
2446 return 0;
2449 static int
2450 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2452 struct fc_bsg_request *bsg_request = bsg_job->request;
2454 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2455 case QL_VND_LOOPBACK:
2456 return qla2x00_process_loopback(bsg_job);
2458 case QL_VND_A84_RESET:
2459 return qla84xx_reset(bsg_job);
2461 case QL_VND_A84_UPDATE_FW:
2462 return qla84xx_updatefw(bsg_job);
2464 case QL_VND_A84_MGMT_CMD:
2465 return qla84xx_mgmt_cmd(bsg_job);
2467 case QL_VND_IIDMA:
2468 return qla24xx_iidma(bsg_job);
2470 case QL_VND_FCP_PRIO_CFG_CMD:
2471 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2473 case QL_VND_READ_FLASH:
2474 return qla2x00_read_optrom(bsg_job);
2476 case QL_VND_UPDATE_FLASH:
2477 return qla2x00_update_optrom(bsg_job);
2479 case QL_VND_SET_FRU_VERSION:
2480 return qla2x00_update_fru_versions(bsg_job);
2482 case QL_VND_READ_FRU_STATUS:
2483 return qla2x00_read_fru_status(bsg_job);
2485 case QL_VND_WRITE_FRU_STATUS:
2486 return qla2x00_write_fru_status(bsg_job);
2488 case QL_VND_WRITE_I2C:
2489 return qla2x00_write_i2c(bsg_job);
2491 case QL_VND_READ_I2C:
2492 return qla2x00_read_i2c(bsg_job);
2494 case QL_VND_DIAG_IO_CMD:
2495 return qla24xx_process_bidir_cmd(bsg_job);
2497 case QL_VND_FX00_MGMT_CMD:
2498 return qlafx00_mgmt_cmd(bsg_job);
2500 case QL_VND_SERDES_OP:
2501 return qla26xx_serdes_op(bsg_job);
2503 case QL_VND_SERDES_OP_EX:
2504 return qla8044_serdes_op(bsg_job);
2506 case QL_VND_GET_FLASH_UPDATE_CAPS:
2507 return qla27xx_get_flash_upd_cap(bsg_job);
2509 case QL_VND_SET_FLASH_UPDATE_CAPS:
2510 return qla27xx_set_flash_upd_cap(bsg_job);
2512 case QL_VND_GET_BBCR_DATA:
2513 return qla27xx_get_bbcr_data(bsg_job);
2515 case QL_VND_GET_PRIV_STATS:
2516 case QL_VND_GET_PRIV_STATS_EX:
2517 return qla2x00_get_priv_stats(bsg_job);
2519 case QL_VND_DPORT_DIAGNOSTICS:
2520 return qla2x00_do_dport_diagnostics(bsg_job);
2522 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2523 return qla2x00_get_flash_image_status(bsg_job);
2525 default:
2526 return -ENOSYS;
2531 qla24xx_bsg_request(struct bsg_job *bsg_job)
2533 struct fc_bsg_request *bsg_request = bsg_job->request;
2534 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2535 int ret = -EINVAL;
2536 struct fc_rport *rport;
2537 struct Scsi_Host *host;
2538 scsi_qla_host_t *vha;
2540 /* In case no data transferred. */
2541 bsg_reply->reply_payload_rcv_len = 0;
2543 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2544 rport = fc_bsg_to_rport(bsg_job);
2545 host = rport_to_shost(rport);
2546 vha = shost_priv(host);
2547 } else {
2548 host = fc_bsg_to_shost(bsg_job);
2549 vha = shost_priv(host);
2552 if (qla2x00_chip_is_down(vha)) {
2553 ql_dbg(ql_dbg_user, vha, 0x709f,
2554 "BSG: ISP abort active/needed -- cmd=%d.\n",
2555 bsg_request->msgcode);
2556 return -EBUSY;
2559 ql_dbg(ql_dbg_user, vha, 0x7000,
2560 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2562 switch (bsg_request->msgcode) {
2563 case FC_BSG_RPT_ELS:
2564 case FC_BSG_HST_ELS_NOLOGIN:
2565 ret = qla2x00_process_els(bsg_job);
2566 break;
2567 case FC_BSG_HST_CT:
2568 ret = qla2x00_process_ct(bsg_job);
2569 break;
2570 case FC_BSG_HST_VENDOR:
2571 ret = qla2x00_process_vendor_specific(bsg_job);
2572 break;
2573 case FC_BSG_HST_ADD_RPORT:
2574 case FC_BSG_HST_DEL_RPORT:
2575 case FC_BSG_RPT_CT:
2576 default:
2577 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2578 break;
2580 return ret;
2584 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2586 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2587 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2588 struct qla_hw_data *ha = vha->hw;
2589 srb_t *sp;
2590 int cnt, que;
2591 unsigned long flags;
2592 struct req_que *req;
2594 /* find the bsg job from the active list of commands */
2595 spin_lock_irqsave(&ha->hardware_lock, flags);
2596 for (que = 0; que < ha->max_req_queues; que++) {
2597 req = ha->req_q_map[que];
2598 if (!req)
2599 continue;
2601 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2602 sp = req->outstanding_cmds[cnt];
2603 if (sp) {
2604 if (((sp->type == SRB_CT_CMD) ||
2605 (sp->type == SRB_ELS_CMD_HST) ||
2606 (sp->type == SRB_FXIOCB_BCMD))
2607 && (sp->u.bsg_job == bsg_job)) {
2608 req->outstanding_cmds[cnt] = NULL;
2609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2610 if (ha->isp_ops->abort_command(sp)) {
2611 ql_log(ql_log_warn, vha, 0x7089,
2612 "mbx abort_command "
2613 "failed.\n");
2614 bsg_reply->result = -EIO;
2615 } else {
2616 ql_dbg(ql_dbg_user, vha, 0x708a,
2617 "mbx abort_command "
2618 "success.\n");
2619 bsg_reply->result = 0;
2621 spin_lock_irqsave(&ha->hardware_lock, flags);
2622 goto done;
2627 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2628 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2629 bsg_reply->result = -ENXIO;
2630 return 0;
2632 done:
2633 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2634 sp->free(sp);
2635 return 0;