Linux 4.19.133
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_bsg.c
blob47f062e96e62c077fc80d73c6bf01cbe9a469130
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 /* BSG support for ELS/CT pass through */
15 void
16 qla2x00_bsg_job_done(void *ptr, int res)
18 srb_t *sp = ptr;
19 struct bsg_job *bsg_job = sp->u.bsg_job;
20 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
22 bsg_reply->result = res;
23 bsg_job_done(bsg_job, bsg_reply->result,
24 bsg_reply->reply_payload_rcv_len);
25 sp->free(sp);
28 void
29 qla2x00_bsg_sp_free(void *ptr)
31 srb_t *sp = ptr;
32 struct qla_hw_data *ha = sp->vha->hw;
33 struct bsg_job *bsg_job = sp->u.bsg_job;
34 struct fc_bsg_request *bsg_request = bsg_job->request;
35 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
37 if (sp->type == SRB_FXIOCB_BCMD) {
38 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
39 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
41 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
42 dma_unmap_sg(&ha->pdev->dev,
43 bsg_job->request_payload.sg_list,
44 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
46 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
47 dma_unmap_sg(&ha->pdev->dev,
48 bsg_job->reply_payload.sg_list,
49 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
50 } else {
51 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
52 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
54 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
55 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
58 if (sp->type == SRB_CT_CMD ||
59 sp->type == SRB_FXIOCB_BCMD ||
60 sp->type == SRB_ELS_CMD_HST)
61 kfree(sp->fcport);
62 qla2x00_rel_sp(sp);
65 int
66 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
67 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
69 int i, ret, num_valid;
70 uint8_t *bcode;
71 struct qla_fcp_prio_entry *pri_entry;
72 uint32_t *bcode_val_ptr, bcode_val;
74 ret = 1;
75 num_valid = 0;
76 bcode = (uint8_t *)pri_cfg;
77 bcode_val_ptr = (uint32_t *)pri_cfg;
78 bcode_val = (uint32_t)(*bcode_val_ptr);
80 if (bcode_val == 0xFFFFFFFF) {
81 /* No FCP Priority config data in flash */
82 ql_dbg(ql_dbg_user, vha, 0x7051,
83 "No FCP Priority config data.\n");
84 return 0;
87 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
88 bcode[3] != 'S') {
89 /* Invalid FCP priority data header*/
90 ql_dbg(ql_dbg_user, vha, 0x7052,
91 "Invalid FCP Priority data header. bcode=0x%x.\n",
92 bcode_val);
93 return 0;
95 if (flag != 1)
96 return ret;
98 pri_entry = &pri_cfg->entry[0];
99 for (i = 0; i < pri_cfg->num_entries; i++) {
100 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
101 num_valid++;
102 pri_entry++;
105 if (num_valid == 0) {
106 /* No valid FCP priority data entries */
107 ql_dbg(ql_dbg_user, vha, 0x7053,
108 "No valid FCP Priority data entries.\n");
109 ret = 0;
110 } else {
111 /* FCP priority data is valid */
112 ql_dbg(ql_dbg_user, vha, 0x7054,
113 "Valid FCP priority data. num entries = %d.\n",
114 num_valid);
117 return ret;
120 static int
121 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
123 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
124 struct fc_bsg_request *bsg_request = bsg_job->request;
125 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
126 scsi_qla_host_t *vha = shost_priv(host);
127 struct qla_hw_data *ha = vha->hw;
128 int ret = 0;
129 uint32_t len;
130 uint32_t oper;
132 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
133 ret = -EINVAL;
134 goto exit_fcp_prio_cfg;
137 /* Get the sub command */
138 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
140 /* Only set config is allowed if config memory is not allocated */
141 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
142 ret = -EINVAL;
143 goto exit_fcp_prio_cfg;
145 switch (oper) {
146 case QLFC_FCP_PRIO_DISABLE:
147 if (ha->flags.fcp_prio_enabled) {
148 ha->flags.fcp_prio_enabled = 0;
149 ha->fcp_prio_cfg->attributes &=
150 ~FCP_PRIO_ATTR_ENABLE;
151 qla24xx_update_all_fcp_prio(vha);
152 bsg_reply->result = DID_OK;
153 } else {
154 ret = -EINVAL;
155 bsg_reply->result = (DID_ERROR << 16);
156 goto exit_fcp_prio_cfg;
158 break;
160 case QLFC_FCP_PRIO_ENABLE:
161 if (!ha->flags.fcp_prio_enabled) {
162 if (ha->fcp_prio_cfg) {
163 ha->flags.fcp_prio_enabled = 1;
164 ha->fcp_prio_cfg->attributes |=
165 FCP_PRIO_ATTR_ENABLE;
166 qla24xx_update_all_fcp_prio(vha);
167 bsg_reply->result = DID_OK;
168 } else {
169 ret = -EINVAL;
170 bsg_reply->result = (DID_ERROR << 16);
171 goto exit_fcp_prio_cfg;
174 break;
176 case QLFC_FCP_PRIO_GET_CONFIG:
177 len = bsg_job->reply_payload.payload_len;
178 if (!len || len > FCP_PRIO_CFG_SIZE) {
179 ret = -EINVAL;
180 bsg_reply->result = (DID_ERROR << 16);
181 goto exit_fcp_prio_cfg;
184 bsg_reply->result = DID_OK;
185 bsg_reply->reply_payload_rcv_len =
186 sg_copy_from_buffer(
187 bsg_job->reply_payload.sg_list,
188 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
189 len);
191 break;
193 case QLFC_FCP_PRIO_SET_CONFIG:
194 len = bsg_job->request_payload.payload_len;
195 if (!len || len > FCP_PRIO_CFG_SIZE) {
196 bsg_reply->result = (DID_ERROR << 16);
197 ret = -EINVAL;
198 goto exit_fcp_prio_cfg;
201 if (!ha->fcp_prio_cfg) {
202 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
203 if (!ha->fcp_prio_cfg) {
204 ql_log(ql_log_warn, vha, 0x7050,
205 "Unable to allocate memory for fcp prio "
206 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
207 bsg_reply->result = (DID_ERROR << 16);
208 ret = -ENOMEM;
209 goto exit_fcp_prio_cfg;
213 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
214 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
215 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
216 FCP_PRIO_CFG_SIZE);
218 /* validate fcp priority data */
220 if (!qla24xx_fcp_prio_cfg_valid(vha,
221 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
222 bsg_reply->result = (DID_ERROR << 16);
223 ret = -EINVAL;
224 /* If buffer was invalidatic int
225 * fcp_prio_cfg is of no use
227 vfree(ha->fcp_prio_cfg);
228 ha->fcp_prio_cfg = NULL;
229 goto exit_fcp_prio_cfg;
232 ha->flags.fcp_prio_enabled = 0;
233 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
234 ha->flags.fcp_prio_enabled = 1;
235 qla24xx_update_all_fcp_prio(vha);
236 bsg_reply->result = DID_OK;
237 break;
238 default:
239 ret = -EINVAL;
240 break;
242 exit_fcp_prio_cfg:
243 if (!ret)
244 bsg_job_done(bsg_job, bsg_reply->result,
245 bsg_reply->reply_payload_rcv_len);
246 return ret;
249 static int
250 qla2x00_process_els(struct bsg_job *bsg_job)
252 struct fc_bsg_request *bsg_request = bsg_job->request;
253 struct fc_rport *rport;
254 fc_port_t *fcport = NULL;
255 struct Scsi_Host *host;
256 scsi_qla_host_t *vha;
257 struct qla_hw_data *ha;
258 srb_t *sp;
259 const char *type;
260 int req_sg_cnt, rsp_sg_cnt;
261 int rval = (DID_ERROR << 16);
262 uint16_t nextlid = 0;
264 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
265 rport = fc_bsg_to_rport(bsg_job);
266 fcport = *(fc_port_t **) rport->dd_data;
267 host = rport_to_shost(rport);
268 vha = shost_priv(host);
269 ha = vha->hw;
270 type = "FC_BSG_RPT_ELS";
271 } else {
272 host = fc_bsg_to_shost(bsg_job);
273 vha = shost_priv(host);
274 ha = vha->hw;
275 type = "FC_BSG_HST_ELS_NOLOGIN";
278 if (!vha->flags.online) {
279 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
280 rval = -EIO;
281 goto done;
284 /* pass through is supported only for ISP 4Gb or higher */
285 if (!IS_FWI2_CAPABLE(ha)) {
286 ql_dbg(ql_dbg_user, vha, 0x7001,
287 "ELS passthru not supported for ISP23xx based adapters.\n");
288 rval = -EPERM;
289 goto done;
292 /* Multiple SG's are not supported for ELS requests */
293 if (bsg_job->request_payload.sg_cnt > 1 ||
294 bsg_job->reply_payload.sg_cnt > 1) {
295 ql_dbg(ql_dbg_user, vha, 0x7002,
296 "Multiple SG's are not supported for ELS requests, "
297 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
298 bsg_job->request_payload.sg_cnt,
299 bsg_job->reply_payload.sg_cnt);
300 rval = -EPERM;
301 goto done;
304 /* ELS request for rport */
305 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
306 /* make sure the rport is logged in,
307 * if not perform fabric login
309 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
310 ql_dbg(ql_dbg_user, vha, 0x7003,
311 "Failed to login port %06X for ELS passthru.\n",
312 fcport->d_id.b24);
313 rval = -EIO;
314 goto done;
316 } else {
317 /* Allocate a dummy fcport structure, since functions
318 * preparing the IOCB and mailbox command retrieves port
319 * specific information from fcport structure. For Host based
320 * ELS commands there will be no fcport structure allocated
322 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
323 if (!fcport) {
324 rval = -ENOMEM;
325 goto done;
328 /* Initialize all required fields of fcport */
329 fcport->vha = vha;
330 fcport->d_id.b.al_pa =
331 bsg_request->rqst_data.h_els.port_id[0];
332 fcport->d_id.b.area =
333 bsg_request->rqst_data.h_els.port_id[1];
334 fcport->d_id.b.domain =
335 bsg_request->rqst_data.h_els.port_id[2];
336 fcport->loop_id =
337 (fcport->d_id.b.al_pa == 0xFD) ?
338 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
341 req_sg_cnt =
342 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
343 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
344 if (!req_sg_cnt) {
345 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
346 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
347 rval = -ENOMEM;
348 goto done_free_fcport;
351 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
352 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
353 if (!rsp_sg_cnt) {
354 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
355 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
356 rval = -ENOMEM;
357 goto done_free_fcport;
360 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
361 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
362 ql_log(ql_log_warn, vha, 0x7008,
363 "dma mapping resulted in different sg counts, "
364 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
365 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
366 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
367 rval = -EAGAIN;
368 goto done_unmap_sg;
371 /* Alloc SRB structure */
372 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
373 if (!sp) {
374 rval = -ENOMEM;
375 goto done_unmap_sg;
378 sp->type =
379 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
380 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
381 sp->name =
382 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
383 "bsg_els_rpt" : "bsg_els_hst");
384 sp->u.bsg_job = bsg_job;
385 sp->free = qla2x00_bsg_sp_free;
386 sp->done = qla2x00_bsg_job_done;
388 ql_dbg(ql_dbg_user, vha, 0x700a,
389 "bsg rqst type: %s els type: %x - loop-id=%x "
390 "portid=%-2x%02x%02x.\n", type,
391 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
392 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
394 rval = qla2x00_start_sp(sp);
395 if (rval != QLA_SUCCESS) {
396 ql_log(ql_log_warn, vha, 0x700e,
397 "qla2x00_start_sp failed = %d\n", rval);
398 qla2x00_rel_sp(sp);
399 rval = -EIO;
400 goto done_unmap_sg;
402 return rval;
404 done_unmap_sg:
405 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
406 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
407 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
408 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
409 goto done_free_fcport;
411 done_free_fcport:
412 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
413 kfree(fcport);
414 done:
415 return rval;
418 static inline uint16_t
419 qla24xx_calc_ct_iocbs(uint16_t dsds)
421 uint16_t iocbs;
423 iocbs = 1;
424 if (dsds > 2) {
425 iocbs += (dsds - 2) / 5;
426 if ((dsds - 2) % 5)
427 iocbs++;
429 return iocbs;
432 static int
433 qla2x00_process_ct(struct bsg_job *bsg_job)
435 srb_t *sp;
436 struct fc_bsg_request *bsg_request = bsg_job->request;
437 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
438 scsi_qla_host_t *vha = shost_priv(host);
439 struct qla_hw_data *ha = vha->hw;
440 int rval = (DID_ERROR << 16);
441 int req_sg_cnt, rsp_sg_cnt;
442 uint16_t loop_id;
443 struct fc_port *fcport;
444 char *type = "FC_BSG_HST_CT";
446 req_sg_cnt =
447 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
448 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
449 if (!req_sg_cnt) {
450 ql_log(ql_log_warn, vha, 0x700f,
451 "dma_map_sg return %d for request\n", req_sg_cnt);
452 rval = -ENOMEM;
453 goto done;
456 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
457 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
458 if (!rsp_sg_cnt) {
459 ql_log(ql_log_warn, vha, 0x7010,
460 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
461 rval = -ENOMEM;
462 goto done;
465 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
466 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
467 ql_log(ql_log_warn, vha, 0x7011,
468 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
469 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
470 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
471 rval = -EAGAIN;
472 goto done_unmap_sg;
475 if (!vha->flags.online) {
476 ql_log(ql_log_warn, vha, 0x7012,
477 "Host is not online.\n");
478 rval = -EIO;
479 goto done_unmap_sg;
482 loop_id =
483 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
484 >> 24;
485 switch (loop_id) {
486 case 0xFC:
487 loop_id = cpu_to_le16(NPH_SNS);
488 break;
489 case 0xFA:
490 loop_id = vha->mgmt_svr_loop_id;
491 break;
492 default:
493 ql_dbg(ql_dbg_user, vha, 0x7013,
494 "Unknown loop id: %x.\n", loop_id);
495 rval = -EINVAL;
496 goto done_unmap_sg;
499 /* Allocate a dummy fcport structure, since functions preparing the
500 * IOCB and mailbox command retrieves port specific information
501 * from fcport structure. For Host based ELS commands there will be
502 * no fcport structure allocated
504 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
505 if (!fcport) {
506 ql_log(ql_log_warn, vha, 0x7014,
507 "Failed to allocate fcport.\n");
508 rval = -ENOMEM;
509 goto done_unmap_sg;
512 /* Initialize all required fields of fcport */
513 fcport->vha = vha;
514 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
515 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
516 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
517 fcport->loop_id = loop_id;
519 /* Alloc SRB structure */
520 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
521 if (!sp) {
522 ql_log(ql_log_warn, vha, 0x7015,
523 "qla2x00_get_sp failed.\n");
524 rval = -ENOMEM;
525 goto done_free_fcport;
528 sp->type = SRB_CT_CMD;
529 sp->name = "bsg_ct";
530 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
531 sp->u.bsg_job = bsg_job;
532 sp->free = qla2x00_bsg_sp_free;
533 sp->done = qla2x00_bsg_job_done;
535 ql_dbg(ql_dbg_user, vha, 0x7016,
536 "bsg rqst type: %s else type: %x - "
537 "loop-id=%x portid=%02x%02x%02x.\n", type,
538 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
539 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
540 fcport->d_id.b.al_pa);
542 rval = qla2x00_start_sp(sp);
543 if (rval != QLA_SUCCESS) {
544 ql_log(ql_log_warn, vha, 0x7017,
545 "qla2x00_start_sp failed=%d.\n", rval);
546 qla2x00_rel_sp(sp);
547 rval = -EIO;
548 goto done_free_fcport;
550 return rval;
552 done_free_fcport:
553 kfree(fcport);
554 done_unmap_sg:
555 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
556 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
557 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
558 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
559 done:
560 return rval;
563 /* Disable loopback mode */
564 static inline int
565 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
566 int wait, int wait2)
568 int ret = 0;
569 int rval = 0;
570 uint16_t new_config[4];
571 struct qla_hw_data *ha = vha->hw;
573 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
574 goto done_reset_internal;
576 memset(new_config, 0 , sizeof(new_config));
577 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
578 ENABLE_INTERNAL_LOOPBACK ||
579 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
580 ENABLE_EXTERNAL_LOOPBACK) {
581 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
582 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
583 (new_config[0] & INTERNAL_LOOPBACK_MASK));
584 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
586 ha->notify_dcbx_comp = wait;
587 ha->notify_lb_portup_comp = wait2;
589 ret = qla81xx_set_port_config(vha, new_config);
590 if (ret != QLA_SUCCESS) {
591 ql_log(ql_log_warn, vha, 0x7025,
592 "Set port config failed.\n");
593 ha->notify_dcbx_comp = 0;
594 ha->notify_lb_portup_comp = 0;
595 rval = -EINVAL;
596 goto done_reset_internal;
599 /* Wait for DCBX complete event */
600 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
601 (DCBX_COMP_TIMEOUT * HZ))) {
602 ql_dbg(ql_dbg_user, vha, 0x7026,
603 "DCBX completion not received.\n");
604 ha->notify_dcbx_comp = 0;
605 ha->notify_lb_portup_comp = 0;
606 rval = -EINVAL;
607 goto done_reset_internal;
608 } else
609 ql_dbg(ql_dbg_user, vha, 0x7027,
610 "DCBX completion received.\n");
612 if (wait2 &&
613 !wait_for_completion_timeout(&ha->lb_portup_comp,
614 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
615 ql_dbg(ql_dbg_user, vha, 0x70c5,
616 "Port up completion not received.\n");
617 ha->notify_lb_portup_comp = 0;
618 rval = -EINVAL;
619 goto done_reset_internal;
620 } else
621 ql_dbg(ql_dbg_user, vha, 0x70c6,
622 "Port up completion received.\n");
624 ha->notify_dcbx_comp = 0;
625 ha->notify_lb_portup_comp = 0;
627 done_reset_internal:
628 return rval;
632 * Set the port configuration to enable the internal or external loopback
633 * depending on the loopback mode.
635 static inline int
636 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
637 uint16_t *new_config, uint16_t mode)
639 int ret = 0;
640 int rval = 0;
641 unsigned long rem_tmo = 0, current_tmo = 0;
642 struct qla_hw_data *ha = vha->hw;
644 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
645 goto done_set_internal;
647 if (mode == INTERNAL_LOOPBACK)
648 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
649 else if (mode == EXTERNAL_LOOPBACK)
650 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
651 ql_dbg(ql_dbg_user, vha, 0x70be,
652 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
654 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
656 ha->notify_dcbx_comp = 1;
657 ret = qla81xx_set_port_config(vha, new_config);
658 if (ret != QLA_SUCCESS) {
659 ql_log(ql_log_warn, vha, 0x7021,
660 "set port config failed.\n");
661 ha->notify_dcbx_comp = 0;
662 rval = -EINVAL;
663 goto done_set_internal;
666 /* Wait for DCBX complete event */
667 current_tmo = DCBX_COMP_TIMEOUT * HZ;
668 while (1) {
669 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
670 current_tmo);
671 if (!ha->idc_extend_tmo || rem_tmo) {
672 ha->idc_extend_tmo = 0;
673 break;
675 current_tmo = ha->idc_extend_tmo * HZ;
676 ha->idc_extend_tmo = 0;
679 if (!rem_tmo) {
680 ql_dbg(ql_dbg_user, vha, 0x7022,
681 "DCBX completion not received.\n");
682 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
684 * If the reset of the loopback mode doesn't work take a FCoE
685 * dump and reset the chip.
687 if (ret) {
688 ha->isp_ops->fw_dump(vha, 0);
689 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
691 rval = -EINVAL;
692 } else {
693 if (ha->flags.idc_compl_status) {
694 ql_dbg(ql_dbg_user, vha, 0x70c3,
695 "Bad status in IDC Completion AEN\n");
696 rval = -EINVAL;
697 ha->flags.idc_compl_status = 0;
698 } else
699 ql_dbg(ql_dbg_user, vha, 0x7023,
700 "DCBX completion received.\n");
703 ha->notify_dcbx_comp = 0;
704 ha->idc_extend_tmo = 0;
706 done_set_internal:
707 return rval;
710 static int
711 qla2x00_process_loopback(struct bsg_job *bsg_job)
713 struct fc_bsg_request *bsg_request = bsg_job->request;
714 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
715 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
716 scsi_qla_host_t *vha = shost_priv(host);
717 struct qla_hw_data *ha = vha->hw;
718 int rval;
719 uint8_t command_sent;
720 char *type;
721 struct msg_echo_lb elreq;
722 uint16_t response[MAILBOX_REGISTER_COUNT];
723 uint16_t config[4], new_config[4];
724 uint8_t *fw_sts_ptr;
725 uint8_t *req_data = NULL;
726 dma_addr_t req_data_dma;
727 uint32_t req_data_len;
728 uint8_t *rsp_data = NULL;
729 dma_addr_t rsp_data_dma;
730 uint32_t rsp_data_len;
732 if (!vha->flags.online) {
733 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
734 return -EIO;
737 memset(&elreq, 0, sizeof(elreq));
739 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
740 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
741 DMA_TO_DEVICE);
743 if (!elreq.req_sg_cnt) {
744 ql_log(ql_log_warn, vha, 0x701a,
745 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
746 return -ENOMEM;
749 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
750 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
751 DMA_FROM_DEVICE);
753 if (!elreq.rsp_sg_cnt) {
754 ql_log(ql_log_warn, vha, 0x701b,
755 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
756 rval = -ENOMEM;
757 goto done_unmap_req_sg;
760 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
761 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
762 ql_log(ql_log_warn, vha, 0x701c,
763 "dma mapping resulted in different sg counts, "
764 "request_sg_cnt: %x dma_request_sg_cnt: %x "
765 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
766 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
767 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
768 rval = -EAGAIN;
769 goto done_unmap_sg;
771 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
772 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
773 &req_data_dma, GFP_KERNEL);
774 if (!req_data) {
775 ql_log(ql_log_warn, vha, 0x701d,
776 "dma alloc failed for req_data.\n");
777 rval = -ENOMEM;
778 goto done_unmap_sg;
781 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
782 &rsp_data_dma, GFP_KERNEL);
783 if (!rsp_data) {
784 ql_log(ql_log_warn, vha, 0x7004,
785 "dma alloc failed for rsp_data.\n");
786 rval = -ENOMEM;
787 goto done_free_dma_req;
790 /* Copy the request buffer in req_data now */
791 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
792 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
794 elreq.send_dma = req_data_dma;
795 elreq.rcv_dma = rsp_data_dma;
796 elreq.transfer_size = req_data_len;
798 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
799 elreq.iteration_count =
800 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
802 if (atomic_read(&vha->loop_state) == LOOP_READY &&
803 (ha->current_topology == ISP_CFG_F ||
804 (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
805 req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
806 elreq.options == EXTERNAL_LOOPBACK) {
807 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
808 ql_dbg(ql_dbg_user, vha, 0x701e,
809 "BSG request type: %s.\n", type);
810 command_sent = INT_DEF_LB_ECHO_CMD;
811 rval = qla2x00_echo_test(vha, &elreq, response);
812 } else {
813 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
814 memset(config, 0, sizeof(config));
815 memset(new_config, 0, sizeof(new_config));
817 if (qla81xx_get_port_config(vha, config)) {
818 ql_log(ql_log_warn, vha, 0x701f,
819 "Get port config failed.\n");
820 rval = -EPERM;
821 goto done_free_dma_rsp;
824 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
825 ql_dbg(ql_dbg_user, vha, 0x70c4,
826 "Loopback operation already in "
827 "progress.\n");
828 rval = -EAGAIN;
829 goto done_free_dma_rsp;
832 ql_dbg(ql_dbg_user, vha, 0x70c0,
833 "elreq.options=%04x\n", elreq.options);
835 if (elreq.options == EXTERNAL_LOOPBACK)
836 if (IS_QLA8031(ha) || IS_QLA8044(ha))
837 rval = qla81xx_set_loopback_mode(vha,
838 config, new_config, elreq.options);
839 else
840 rval = qla81xx_reset_loopback_mode(vha,
841 config, 1, 0);
842 else
843 rval = qla81xx_set_loopback_mode(vha, config,
844 new_config, elreq.options);
846 if (rval) {
847 rval = -EPERM;
848 goto done_free_dma_rsp;
851 type = "FC_BSG_HST_VENDOR_LOOPBACK";
852 ql_dbg(ql_dbg_user, vha, 0x7028,
853 "BSG request type: %s.\n", type);
855 command_sent = INT_DEF_LB_LOOPBACK_CMD;
856 rval = qla2x00_loopback_test(vha, &elreq, response);
858 if (response[0] == MBS_COMMAND_ERROR &&
859 response[1] == MBS_LB_RESET) {
860 ql_log(ql_log_warn, vha, 0x7029,
861 "MBX command error, Aborting ISP.\n");
862 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
863 qla2xxx_wake_dpc(vha);
864 qla2x00_wait_for_chip_reset(vha);
865 /* Also reset the MPI */
866 if (IS_QLA81XX(ha)) {
867 if (qla81xx_restart_mpi_firmware(vha) !=
868 QLA_SUCCESS) {
869 ql_log(ql_log_warn, vha, 0x702a,
870 "MPI reset failed.\n");
874 rval = -EIO;
875 goto done_free_dma_rsp;
878 if (new_config[0]) {
879 int ret;
881 /* Revert back to original port config
882 * Also clear internal loopback
884 ret = qla81xx_reset_loopback_mode(vha,
885 new_config, 0, 1);
886 if (ret) {
888 * If the reset of the loopback mode
889 * doesn't work take FCoE dump and then
890 * reset the chip.
892 ha->isp_ops->fw_dump(vha, 0);
893 set_bit(ISP_ABORT_NEEDED,
894 &vha->dpc_flags);
899 } else {
900 type = "FC_BSG_HST_VENDOR_LOOPBACK";
901 ql_dbg(ql_dbg_user, vha, 0x702b,
902 "BSG request type: %s.\n", type);
903 command_sent = INT_DEF_LB_LOOPBACK_CMD;
904 rval = qla2x00_loopback_test(vha, &elreq, response);
908 if (rval) {
909 ql_log(ql_log_warn, vha, 0x702c,
910 "Vendor request %s failed.\n", type);
912 rval = 0;
913 bsg_reply->result = (DID_ERROR << 16);
914 bsg_reply->reply_payload_rcv_len = 0;
915 } else {
916 ql_dbg(ql_dbg_user, vha, 0x702d,
917 "Vendor request %s completed.\n", type);
918 bsg_reply->result = (DID_OK << 16);
919 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
920 bsg_job->reply_payload.sg_cnt, rsp_data,
921 rsp_data_len);
924 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
925 sizeof(response) + sizeof(uint8_t);
926 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
927 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
928 sizeof(response));
929 fw_sts_ptr += sizeof(response);
930 *fw_sts_ptr = command_sent;
932 done_free_dma_rsp:
933 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
934 rsp_data, rsp_data_dma);
935 done_free_dma_req:
936 dma_free_coherent(&ha->pdev->dev, req_data_len,
937 req_data, req_data_dma);
938 done_unmap_sg:
939 dma_unmap_sg(&ha->pdev->dev,
940 bsg_job->reply_payload.sg_list,
941 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
942 done_unmap_req_sg:
943 dma_unmap_sg(&ha->pdev->dev,
944 bsg_job->request_payload.sg_list,
945 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
946 if (!rval)
947 bsg_job_done(bsg_job, bsg_reply->result,
948 bsg_reply->reply_payload_rcv_len);
949 return rval;
952 static int
953 qla84xx_reset(struct bsg_job *bsg_job)
955 struct fc_bsg_request *bsg_request = bsg_job->request;
956 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
957 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
958 scsi_qla_host_t *vha = shost_priv(host);
959 struct qla_hw_data *ha = vha->hw;
960 int rval = 0;
961 uint32_t flag;
963 if (!IS_QLA84XX(ha)) {
964 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
965 return -EINVAL;
968 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
970 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
972 if (rval) {
973 ql_log(ql_log_warn, vha, 0x7030,
974 "Vendor request 84xx reset failed.\n");
975 rval = (DID_ERROR << 16);
977 } else {
978 ql_dbg(ql_dbg_user, vha, 0x7031,
979 "Vendor request 84xx reset completed.\n");
980 bsg_reply->result = DID_OK;
981 bsg_job_done(bsg_job, bsg_reply->result,
982 bsg_reply->reply_payload_rcv_len);
985 return rval;
988 static int
989 qla84xx_updatefw(struct bsg_job *bsg_job)
991 struct fc_bsg_request *bsg_request = bsg_job->request;
992 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
993 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
994 scsi_qla_host_t *vha = shost_priv(host);
995 struct qla_hw_data *ha = vha->hw;
996 struct verify_chip_entry_84xx *mn = NULL;
997 dma_addr_t mn_dma, fw_dma;
998 void *fw_buf = NULL;
999 int rval = 0;
1000 uint32_t sg_cnt;
1001 uint32_t data_len;
1002 uint16_t options;
1003 uint32_t flag;
1004 uint32_t fw_ver;
1006 if (!IS_QLA84XX(ha)) {
1007 ql_dbg(ql_dbg_user, vha, 0x7032,
1008 "Not 84xx, exiting.\n");
1009 return -EINVAL;
1012 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1013 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1014 if (!sg_cnt) {
1015 ql_log(ql_log_warn, vha, 0x7033,
1016 "dma_map_sg returned %d for request.\n", sg_cnt);
1017 return -ENOMEM;
1020 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1021 ql_log(ql_log_warn, vha, 0x7034,
1022 "DMA mapping resulted in different sg counts, "
1023 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1024 bsg_job->request_payload.sg_cnt, sg_cnt);
1025 rval = -EAGAIN;
1026 goto done_unmap_sg;
1029 data_len = bsg_job->request_payload.payload_len;
1030 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1031 &fw_dma, GFP_KERNEL);
1032 if (!fw_buf) {
1033 ql_log(ql_log_warn, vha, 0x7035,
1034 "DMA alloc failed for fw_buf.\n");
1035 rval = -ENOMEM;
1036 goto done_unmap_sg;
1039 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1040 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1042 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1043 if (!mn) {
1044 ql_log(ql_log_warn, vha, 0x7036,
1045 "DMA alloc failed for fw buffer.\n");
1046 rval = -ENOMEM;
1047 goto done_free_fw_buf;
1050 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1051 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1053 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1054 mn->entry_count = 1;
1056 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1057 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1058 options |= VCO_DIAG_FW;
1060 mn->options = cpu_to_le16(options);
1061 mn->fw_ver = cpu_to_le32(fw_ver);
1062 mn->fw_size = cpu_to_le32(data_len);
1063 mn->fw_seq_size = cpu_to_le32(data_len);
1064 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1065 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1066 mn->dseg_length = cpu_to_le32(data_len);
1067 mn->data_seg_cnt = cpu_to_le16(1);
1069 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1071 if (rval) {
1072 ql_log(ql_log_warn, vha, 0x7037,
1073 "Vendor request 84xx updatefw failed.\n");
1075 rval = (DID_ERROR << 16);
1076 } else {
1077 ql_dbg(ql_dbg_user, vha, 0x7038,
1078 "Vendor request 84xx updatefw completed.\n");
1080 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1081 bsg_reply->result = DID_OK;
1084 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1086 done_free_fw_buf:
1087 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1089 done_unmap_sg:
1090 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1091 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1093 if (!rval)
1094 bsg_job_done(bsg_job, bsg_reply->result,
1095 bsg_reply->reply_payload_rcv_len);
1096 return rval;
1099 static int
1100 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1102 struct fc_bsg_request *bsg_request = bsg_job->request;
1103 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1104 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1105 scsi_qla_host_t *vha = shost_priv(host);
1106 struct qla_hw_data *ha = vha->hw;
1107 struct access_chip_84xx *mn = NULL;
1108 dma_addr_t mn_dma, mgmt_dma;
1109 void *mgmt_b = NULL;
1110 int rval = 0;
1111 struct qla_bsg_a84_mgmt *ql84_mgmt;
1112 uint32_t sg_cnt;
1113 uint32_t data_len = 0;
1114 uint32_t dma_direction = DMA_NONE;
1116 if (!IS_QLA84XX(ha)) {
1117 ql_log(ql_log_warn, vha, 0x703a,
1118 "Not 84xx, exiting.\n");
1119 return -EINVAL;
1122 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1123 if (!mn) {
1124 ql_log(ql_log_warn, vha, 0x703c,
1125 "DMA alloc failed for fw buffer.\n");
1126 return -ENOMEM;
1129 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1130 mn->entry_count = 1;
1131 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1132 switch (ql84_mgmt->mgmt.cmd) {
1133 case QLA84_MGMT_READ_MEM:
1134 case QLA84_MGMT_GET_INFO:
1135 sg_cnt = dma_map_sg(&ha->pdev->dev,
1136 bsg_job->reply_payload.sg_list,
1137 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1138 if (!sg_cnt) {
1139 ql_log(ql_log_warn, vha, 0x703d,
1140 "dma_map_sg returned %d for reply.\n", sg_cnt);
1141 rval = -ENOMEM;
1142 goto exit_mgmt;
1145 dma_direction = DMA_FROM_DEVICE;
1147 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1148 ql_log(ql_log_warn, vha, 0x703e,
1149 "DMA mapping resulted in different sg counts, "
1150 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1151 bsg_job->reply_payload.sg_cnt, sg_cnt);
1152 rval = -EAGAIN;
1153 goto done_unmap_sg;
1156 data_len = bsg_job->reply_payload.payload_len;
1158 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1159 &mgmt_dma, GFP_KERNEL);
1160 if (!mgmt_b) {
1161 ql_log(ql_log_warn, vha, 0x703f,
1162 "DMA alloc failed for mgmt_b.\n");
1163 rval = -ENOMEM;
1164 goto done_unmap_sg;
1167 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1168 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1169 mn->parameter1 =
1170 cpu_to_le32(
1171 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1173 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1174 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1175 mn->parameter1 =
1176 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1178 mn->parameter2 =
1179 cpu_to_le32(
1180 ql84_mgmt->mgmt.mgmtp.u.info.context);
1182 break;
1184 case QLA84_MGMT_WRITE_MEM:
1185 sg_cnt = dma_map_sg(&ha->pdev->dev,
1186 bsg_job->request_payload.sg_list,
1187 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1189 if (!sg_cnt) {
1190 ql_log(ql_log_warn, vha, 0x7040,
1191 "dma_map_sg returned %d.\n", sg_cnt);
1192 rval = -ENOMEM;
1193 goto exit_mgmt;
1196 dma_direction = DMA_TO_DEVICE;
1198 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1199 ql_log(ql_log_warn, vha, 0x7041,
1200 "DMA mapping resulted in different sg counts, "
1201 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1202 bsg_job->request_payload.sg_cnt, sg_cnt);
1203 rval = -EAGAIN;
1204 goto done_unmap_sg;
1207 data_len = bsg_job->request_payload.payload_len;
1208 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1209 &mgmt_dma, GFP_KERNEL);
1210 if (!mgmt_b) {
1211 ql_log(ql_log_warn, vha, 0x7042,
1212 "DMA alloc failed for mgmt_b.\n");
1213 rval = -ENOMEM;
1214 goto done_unmap_sg;
1217 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1218 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1220 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1221 mn->parameter1 =
1222 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1223 break;
1225 case QLA84_MGMT_CHNG_CONFIG:
1226 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1227 mn->parameter1 =
1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1230 mn->parameter2 =
1231 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1233 mn->parameter3 =
1234 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1235 break;
1237 default:
1238 rval = -EIO;
1239 goto exit_mgmt;
1242 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1243 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1244 mn->dseg_count = cpu_to_le16(1);
1245 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1246 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1247 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1250 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1252 if (rval) {
1253 ql_log(ql_log_warn, vha, 0x7043,
1254 "Vendor request 84xx mgmt failed.\n");
1256 rval = (DID_ERROR << 16);
1258 } else {
1259 ql_dbg(ql_dbg_user, vha, 0x7044,
1260 "Vendor request 84xx mgmt completed.\n");
1262 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1263 bsg_reply->result = DID_OK;
1265 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1266 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1267 bsg_reply->reply_payload_rcv_len =
1268 bsg_job->reply_payload.payload_len;
1270 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1271 bsg_job->reply_payload.sg_cnt, mgmt_b,
1272 data_len);
1276 done_unmap_sg:
1277 if (mgmt_b)
1278 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1280 if (dma_direction == DMA_TO_DEVICE)
1281 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1282 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1283 else if (dma_direction == DMA_FROM_DEVICE)
1284 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1285 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1287 exit_mgmt:
1288 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1290 if (!rval)
1291 bsg_job_done(bsg_job, bsg_reply->result,
1292 bsg_reply->reply_payload_rcv_len);
1293 return rval;
1296 static int
1297 qla24xx_iidma(struct bsg_job *bsg_job)
1299 struct fc_bsg_request *bsg_request = bsg_job->request;
1300 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1301 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1302 scsi_qla_host_t *vha = shost_priv(host);
1303 int rval = 0;
1304 struct qla_port_param *port_param = NULL;
1305 fc_port_t *fcport = NULL;
1306 int found = 0;
1307 uint16_t mb[MAILBOX_REGISTER_COUNT];
1308 uint8_t *rsp_ptr = NULL;
1310 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1311 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1312 return -EINVAL;
1315 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1316 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1317 ql_log(ql_log_warn, vha, 0x7048,
1318 "Invalid destination type.\n");
1319 return -EINVAL;
1322 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1323 if (fcport->port_type != FCT_TARGET)
1324 continue;
1326 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1327 fcport->port_name, sizeof(fcport->port_name)))
1328 continue;
1330 found = 1;
1331 break;
1334 if (!found) {
1335 ql_log(ql_log_warn, vha, 0x7049,
1336 "Failed to find port.\n");
1337 return -EINVAL;
1340 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1341 ql_log(ql_log_warn, vha, 0x704a,
1342 "Port is not online.\n");
1343 return -EINVAL;
1346 if (fcport->flags & FCF_LOGIN_NEEDED) {
1347 ql_log(ql_log_warn, vha, 0x704b,
1348 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1349 return -EINVAL;
1352 if (port_param->mode)
1353 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1354 port_param->speed, mb);
1355 else
1356 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1357 &port_param->speed, mb);
1359 if (rval) {
1360 ql_log(ql_log_warn, vha, 0x704c,
1361 "iIDMA cmd failed for %8phN -- "
1362 "%04x %x %04x %04x.\n", fcport->port_name,
1363 rval, fcport->fp_speed, mb[0], mb[1]);
1364 rval = (DID_ERROR << 16);
1365 } else {
1366 if (!port_param->mode) {
1367 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1368 sizeof(struct qla_port_param);
1370 rsp_ptr = ((uint8_t *)bsg_reply) +
1371 sizeof(struct fc_bsg_reply);
1373 memcpy(rsp_ptr, port_param,
1374 sizeof(struct qla_port_param));
1377 bsg_reply->result = DID_OK;
1378 bsg_job_done(bsg_job, bsg_reply->result,
1379 bsg_reply->reply_payload_rcv_len);
1382 return rval;
1385 static int
1386 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1387 uint8_t is_update)
1389 struct fc_bsg_request *bsg_request = bsg_job->request;
1390 uint32_t start = 0;
1391 int valid = 0;
1392 struct qla_hw_data *ha = vha->hw;
1394 if (unlikely(pci_channel_offline(ha->pdev)))
1395 return -EINVAL;
1397 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1398 if (start > ha->optrom_size) {
1399 ql_log(ql_log_warn, vha, 0x7055,
1400 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1401 return -EINVAL;
1404 if (ha->optrom_state != QLA_SWAITING) {
1405 ql_log(ql_log_info, vha, 0x7056,
1406 "optrom_state %d.\n", ha->optrom_state);
1407 return -EBUSY;
1410 ha->optrom_region_start = start;
1411 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1412 if (is_update) {
1413 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1414 valid = 1;
1415 else if (start == (ha->flt_region_boot * 4) ||
1416 start == (ha->flt_region_fw * 4))
1417 valid = 1;
1418 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1419 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1420 valid = 1;
1421 if (!valid) {
1422 ql_log(ql_log_warn, vha, 0x7058,
1423 "Invalid start region 0x%x/0x%x.\n", start,
1424 bsg_job->request_payload.payload_len);
1425 return -EINVAL;
1428 ha->optrom_region_size = start +
1429 bsg_job->request_payload.payload_len > ha->optrom_size ?
1430 ha->optrom_size - start :
1431 bsg_job->request_payload.payload_len;
1432 ha->optrom_state = QLA_SWRITING;
1433 } else {
1434 ha->optrom_region_size = start +
1435 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1436 ha->optrom_size - start :
1437 bsg_job->reply_payload.payload_len;
1438 ha->optrom_state = QLA_SREADING;
1441 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1442 if (!ha->optrom_buffer) {
1443 ql_log(ql_log_warn, vha, 0x7059,
1444 "Read: Unable to allocate memory for optrom retrieval "
1445 "(%x)\n", ha->optrom_region_size);
1447 ha->optrom_state = QLA_SWAITING;
1448 return -ENOMEM;
1451 return 0;
1454 static int
1455 qla2x00_read_optrom(struct bsg_job *bsg_job)
1457 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1458 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1459 scsi_qla_host_t *vha = shost_priv(host);
1460 struct qla_hw_data *ha = vha->hw;
1461 int rval = 0;
1463 if (ha->flags.nic_core_reset_hdlr_active)
1464 return -EBUSY;
1466 mutex_lock(&ha->optrom_mutex);
1467 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1468 if (rval) {
1469 mutex_unlock(&ha->optrom_mutex);
1470 return rval;
1473 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1474 ha->optrom_region_start, ha->optrom_region_size);
1476 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1477 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1478 ha->optrom_region_size);
1480 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1481 bsg_reply->result = DID_OK;
1482 vfree(ha->optrom_buffer);
1483 ha->optrom_buffer = NULL;
1484 ha->optrom_state = QLA_SWAITING;
1485 mutex_unlock(&ha->optrom_mutex);
1486 bsg_job_done(bsg_job, bsg_reply->result,
1487 bsg_reply->reply_payload_rcv_len);
1488 return rval;
1491 static int
1492 qla2x00_update_optrom(struct bsg_job *bsg_job)
1494 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1495 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1496 scsi_qla_host_t *vha = shost_priv(host);
1497 struct qla_hw_data *ha = vha->hw;
1498 int rval = 0;
1500 mutex_lock(&ha->optrom_mutex);
1501 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1502 if (rval) {
1503 mutex_unlock(&ha->optrom_mutex);
1504 return rval;
1507 /* Set the isp82xx_no_md_cap not to capture minidump */
1508 ha->flags.isp82xx_no_md_cap = 1;
1510 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1511 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1512 ha->optrom_region_size);
1514 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1515 ha->optrom_region_start, ha->optrom_region_size);
1517 bsg_reply->result = DID_OK;
1518 vfree(ha->optrom_buffer);
1519 ha->optrom_buffer = NULL;
1520 ha->optrom_state = QLA_SWAITING;
1521 mutex_unlock(&ha->optrom_mutex);
1522 bsg_job_done(bsg_job, bsg_reply->result,
1523 bsg_reply->reply_payload_rcv_len);
1524 return rval;
1527 static int
1528 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1530 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1531 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1532 scsi_qla_host_t *vha = shost_priv(host);
1533 struct qla_hw_data *ha = vha->hw;
1534 int rval = 0;
1535 uint8_t bsg[DMA_POOL_SIZE];
1536 struct qla_image_version_list *list = (void *)bsg;
1537 struct qla_image_version *image;
1538 uint32_t count;
1539 dma_addr_t sfp_dma;
1540 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1541 if (!sfp) {
1542 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1543 EXT_STATUS_NO_MEMORY;
1544 goto done;
1547 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1548 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1550 image = list->version;
1551 count = list->count;
1552 while (count--) {
1553 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1554 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1555 image->field_address.device, image->field_address.offset,
1556 sizeof(image->field_info), image->field_address.option);
1557 if (rval) {
1558 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1559 EXT_STATUS_MAILBOX;
1560 goto dealloc;
1562 image++;
1565 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1567 dealloc:
1568 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1570 done:
1571 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1572 bsg_reply->result = DID_OK << 16;
1573 bsg_job_done(bsg_job, bsg_reply->result,
1574 bsg_reply->reply_payload_rcv_len);
1576 return 0;
1579 static int
1580 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1582 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1583 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1584 scsi_qla_host_t *vha = shost_priv(host);
1585 struct qla_hw_data *ha = vha->hw;
1586 int rval = 0;
1587 uint8_t bsg[DMA_POOL_SIZE];
1588 struct qla_status_reg *sr = (void *)bsg;
1589 dma_addr_t sfp_dma;
1590 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1591 if (!sfp) {
1592 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1593 EXT_STATUS_NO_MEMORY;
1594 goto done;
1597 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1598 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1600 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1601 sr->field_address.device, sr->field_address.offset,
1602 sizeof(sr->status_reg), sr->field_address.option);
1603 sr->status_reg = *sfp;
1605 if (rval) {
1606 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1607 EXT_STATUS_MAILBOX;
1608 goto dealloc;
1611 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1612 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1614 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1616 dealloc:
1617 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1619 done:
1620 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1621 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1622 bsg_reply->result = DID_OK << 16;
1623 bsg_job_done(bsg_job, bsg_reply->result,
1624 bsg_reply->reply_payload_rcv_len);
1626 return 0;
1629 static int
1630 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1632 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1633 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1634 scsi_qla_host_t *vha = shost_priv(host);
1635 struct qla_hw_data *ha = vha->hw;
1636 int rval = 0;
1637 uint8_t bsg[DMA_POOL_SIZE];
1638 struct qla_status_reg *sr = (void *)bsg;
1639 dma_addr_t sfp_dma;
1640 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1641 if (!sfp) {
1642 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1643 EXT_STATUS_NO_MEMORY;
1644 goto done;
1647 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1648 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1650 *sfp = sr->status_reg;
1651 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1652 sr->field_address.device, sr->field_address.offset,
1653 sizeof(sr->status_reg), sr->field_address.option);
1655 if (rval) {
1656 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1657 EXT_STATUS_MAILBOX;
1658 goto dealloc;
1661 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1663 dealloc:
1664 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1666 done:
1667 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1668 bsg_reply->result = DID_OK << 16;
1669 bsg_job_done(bsg_job, bsg_reply->result,
1670 bsg_reply->reply_payload_rcv_len);
1672 return 0;
1675 static int
1676 qla2x00_write_i2c(struct bsg_job *bsg_job)
1678 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1679 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1680 scsi_qla_host_t *vha = shost_priv(host);
1681 struct qla_hw_data *ha = vha->hw;
1682 int rval = 0;
1683 uint8_t bsg[DMA_POOL_SIZE];
1684 struct qla_i2c_access *i2c = (void *)bsg;
1685 dma_addr_t sfp_dma;
1686 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1687 if (!sfp) {
1688 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1689 EXT_STATUS_NO_MEMORY;
1690 goto done;
1693 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1694 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1696 memcpy(sfp, i2c->buffer, i2c->length);
1697 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1698 i2c->device, i2c->offset, i2c->length, i2c->option);
1700 if (rval) {
1701 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1702 EXT_STATUS_MAILBOX;
1703 goto dealloc;
1706 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1708 dealloc:
1709 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1711 done:
1712 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1713 bsg_reply->result = DID_OK << 16;
1714 bsg_job_done(bsg_job, bsg_reply->result,
1715 bsg_reply->reply_payload_rcv_len);
1717 return 0;
1720 static int
1721 qla2x00_read_i2c(struct bsg_job *bsg_job)
1723 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1724 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1725 scsi_qla_host_t *vha = shost_priv(host);
1726 struct qla_hw_data *ha = vha->hw;
1727 int rval = 0;
1728 uint8_t bsg[DMA_POOL_SIZE];
1729 struct qla_i2c_access *i2c = (void *)bsg;
1730 dma_addr_t sfp_dma;
1731 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1732 if (!sfp) {
1733 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1734 EXT_STATUS_NO_MEMORY;
1735 goto done;
1738 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1739 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1741 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1742 i2c->device, i2c->offset, i2c->length, i2c->option);
1744 if (rval) {
1745 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1746 EXT_STATUS_MAILBOX;
1747 goto dealloc;
1750 memcpy(i2c->buffer, sfp, i2c->length);
1751 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1752 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1754 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1756 dealloc:
1757 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1759 done:
1760 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1761 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1762 bsg_reply->result = DID_OK << 16;
1763 bsg_job_done(bsg_job, bsg_reply->result,
1764 bsg_reply->reply_payload_rcv_len);
1766 return 0;
1769 static int
1770 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1772 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1773 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1774 scsi_qla_host_t *vha = shost_priv(host);
1775 struct qla_hw_data *ha = vha->hw;
1776 uint32_t rval = EXT_STATUS_OK;
1777 uint16_t req_sg_cnt = 0;
1778 uint16_t rsp_sg_cnt = 0;
1779 uint16_t nextlid = 0;
1780 uint32_t tot_dsds;
1781 srb_t *sp = NULL;
1782 uint32_t req_data_len;
1783 uint32_t rsp_data_len;
1785 /* Check the type of the adapter */
1786 if (!IS_BIDI_CAPABLE(ha)) {
1787 ql_log(ql_log_warn, vha, 0x70a0,
1788 "This adapter is not supported\n");
1789 rval = EXT_STATUS_NOT_SUPPORTED;
1790 goto done;
1793 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1794 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1795 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1796 rval = EXT_STATUS_BUSY;
1797 goto done;
1800 /* Check if host is online */
1801 if (!vha->flags.online) {
1802 ql_log(ql_log_warn, vha, 0x70a1,
1803 "Host is not online\n");
1804 rval = EXT_STATUS_DEVICE_OFFLINE;
1805 goto done;
1808 /* Check if cable is plugged in or not */
1809 if (vha->device_flags & DFLG_NO_CABLE) {
1810 ql_log(ql_log_warn, vha, 0x70a2,
1811 "Cable is unplugged...\n");
1812 rval = EXT_STATUS_INVALID_CFG;
1813 goto done;
1816 /* Check if the switch is connected or not */
1817 if (ha->current_topology != ISP_CFG_F) {
1818 ql_log(ql_log_warn, vha, 0x70a3,
1819 "Host is not connected to the switch\n");
1820 rval = EXT_STATUS_INVALID_CFG;
1821 goto done;
1824 /* Check if operating mode is P2P */
1825 if (ha->operating_mode != P2P) {
1826 ql_log(ql_log_warn, vha, 0x70a4,
1827 "Host operating mode is not P2p\n");
1828 rval = EXT_STATUS_INVALID_CFG;
1829 goto done;
1832 mutex_lock(&ha->selflogin_lock);
1833 if (vha->self_login_loop_id == 0) {
1834 /* Initialize all required fields of fcport */
1835 vha->bidir_fcport.vha = vha;
1836 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1837 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1838 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1839 vha->bidir_fcport.loop_id = vha->loop_id;
1841 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1842 ql_log(ql_log_warn, vha, 0x70a7,
1843 "Failed to login port %06X for bidirectional IOCB\n",
1844 vha->bidir_fcport.d_id.b24);
1845 mutex_unlock(&ha->selflogin_lock);
1846 rval = EXT_STATUS_MAILBOX;
1847 goto done;
1849 vha->self_login_loop_id = nextlid - 1;
1852 /* Assign the self login loop id to fcport */
1853 mutex_unlock(&ha->selflogin_lock);
1855 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1857 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1858 bsg_job->request_payload.sg_list,
1859 bsg_job->request_payload.sg_cnt,
1860 DMA_TO_DEVICE);
1862 if (!req_sg_cnt) {
1863 rval = EXT_STATUS_NO_MEMORY;
1864 goto done;
1867 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1868 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1869 DMA_FROM_DEVICE);
1871 if (!rsp_sg_cnt) {
1872 rval = EXT_STATUS_NO_MEMORY;
1873 goto done_unmap_req_sg;
1876 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1877 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1878 ql_dbg(ql_dbg_user, vha, 0x70a9,
1879 "Dma mapping resulted in different sg counts "
1880 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1881 "%x dma_reply_sg_cnt: %x]\n",
1882 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1883 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1884 rval = EXT_STATUS_NO_MEMORY;
1885 goto done_unmap_sg;
1888 req_data_len = bsg_job->request_payload.payload_len;
1889 rsp_data_len = bsg_job->reply_payload.payload_len;
1891 if (req_data_len != rsp_data_len) {
1892 rval = EXT_STATUS_BUSY;
1893 ql_log(ql_log_warn, vha, 0x70aa,
1894 "req_data_len != rsp_data_len\n");
1895 goto done_unmap_sg;
1898 /* Alloc SRB structure */
1899 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1900 if (!sp) {
1901 ql_dbg(ql_dbg_user, vha, 0x70ac,
1902 "Alloc SRB structure failed\n");
1903 rval = EXT_STATUS_NO_MEMORY;
1904 goto done_unmap_sg;
1907 /*Populate srb->ctx with bidir ctx*/
1908 sp->u.bsg_job = bsg_job;
1909 sp->free = qla2x00_bsg_sp_free;
1910 sp->type = SRB_BIDI_CMD;
1911 sp->done = qla2x00_bsg_job_done;
1913 /* Add the read and write sg count */
1914 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1916 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1917 if (rval != EXT_STATUS_OK)
1918 goto done_free_srb;
1919 /* the bsg request will be completed in the interrupt handler */
1920 return rval;
1922 done_free_srb:
1923 mempool_free(sp, ha->srb_mempool);
1924 done_unmap_sg:
1925 dma_unmap_sg(&ha->pdev->dev,
1926 bsg_job->reply_payload.sg_list,
1927 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1928 done_unmap_req_sg:
1929 dma_unmap_sg(&ha->pdev->dev,
1930 bsg_job->request_payload.sg_list,
1931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1932 done:
1934 /* Return an error vendor specific response
1935 * and complete the bsg request
1937 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1938 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1939 bsg_reply->reply_payload_rcv_len = 0;
1940 bsg_reply->result = (DID_OK) << 16;
1941 bsg_job_done(bsg_job, bsg_reply->result,
1942 bsg_reply->reply_payload_rcv_len);
1943 /* Always return success, vendor rsp carries correct status */
1944 return 0;
1947 static int
1948 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1950 struct fc_bsg_request *bsg_request = bsg_job->request;
1951 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1952 scsi_qla_host_t *vha = shost_priv(host);
1953 struct qla_hw_data *ha = vha->hw;
1954 int rval = (DID_ERROR << 16);
1955 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1956 srb_t *sp;
1957 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1958 struct fc_port *fcport;
1959 char *type = "FC_BSG_HST_FX_MGMT";
1961 /* Copy the IOCB specific information */
1962 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1963 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1965 /* Dump the vendor information */
1966 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1967 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1969 if (!vha->flags.online) {
1970 ql_log(ql_log_warn, vha, 0x70d0,
1971 "Host is not online.\n");
1972 rval = -EIO;
1973 goto done;
1976 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1977 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1978 bsg_job->request_payload.sg_list,
1979 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1980 if (!req_sg_cnt) {
1981 ql_log(ql_log_warn, vha, 0x70c7,
1982 "dma_map_sg return %d for request\n", req_sg_cnt);
1983 rval = -ENOMEM;
1984 goto done;
1988 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1989 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1990 bsg_job->reply_payload.sg_list,
1991 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1992 if (!rsp_sg_cnt) {
1993 ql_log(ql_log_warn, vha, 0x70c8,
1994 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1995 rval = -ENOMEM;
1996 goto done_unmap_req_sg;
2000 ql_dbg(ql_dbg_user, vha, 0x70c9,
2001 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2002 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2003 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2005 /* Allocate a dummy fcport structure, since functions preparing the
2006 * IOCB and mailbox command retrieves port specific information
2007 * from fcport structure. For Host based ELS commands there will be
2008 * no fcport structure allocated
2010 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2011 if (!fcport) {
2012 ql_log(ql_log_warn, vha, 0x70ca,
2013 "Failed to allocate fcport.\n");
2014 rval = -ENOMEM;
2015 goto done_unmap_rsp_sg;
2018 /* Alloc SRB structure */
2019 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2020 if (!sp) {
2021 ql_log(ql_log_warn, vha, 0x70cb,
2022 "qla2x00_get_sp failed.\n");
2023 rval = -ENOMEM;
2024 goto done_free_fcport;
2027 /* Initialize all required fields of fcport */
2028 fcport->vha = vha;
2029 fcport->loop_id = piocb_rqst->dataword;
2031 sp->type = SRB_FXIOCB_BCMD;
2032 sp->name = "bsg_fx_mgmt";
2033 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2034 sp->u.bsg_job = bsg_job;
2035 sp->free = qla2x00_bsg_sp_free;
2036 sp->done = qla2x00_bsg_job_done;
2038 ql_dbg(ql_dbg_user, vha, 0x70cc,
2039 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2040 type, piocb_rqst->func_type, fcport->loop_id);
2042 rval = qla2x00_start_sp(sp);
2043 if (rval != QLA_SUCCESS) {
2044 ql_log(ql_log_warn, vha, 0x70cd,
2045 "qla2x00_start_sp failed=%d.\n", rval);
2046 mempool_free(sp, ha->srb_mempool);
2047 rval = -EIO;
2048 goto done_free_fcport;
2050 return rval;
2052 done_free_fcport:
2053 kfree(fcport);
2055 done_unmap_rsp_sg:
2056 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2057 dma_unmap_sg(&ha->pdev->dev,
2058 bsg_job->reply_payload.sg_list,
2059 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2060 done_unmap_req_sg:
2061 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2062 dma_unmap_sg(&ha->pdev->dev,
2063 bsg_job->request_payload.sg_list,
2064 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2066 done:
2067 return rval;
2070 static int
2071 qla26xx_serdes_op(struct bsg_job *bsg_job)
2073 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2074 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2075 scsi_qla_host_t *vha = shost_priv(host);
2076 int rval = 0;
2077 struct qla_serdes_reg sr;
2079 memset(&sr, 0, sizeof(sr));
2081 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2082 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2084 switch (sr.cmd) {
2085 case INT_SC_SERDES_WRITE_REG:
2086 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2087 bsg_reply->reply_payload_rcv_len = 0;
2088 break;
2089 case INT_SC_SERDES_READ_REG:
2090 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2091 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2092 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2093 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2094 break;
2095 default:
2096 ql_dbg(ql_dbg_user, vha, 0x708c,
2097 "Unknown serdes cmd %x.\n", sr.cmd);
2098 rval = -EINVAL;
2099 break;
2102 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2103 rval ? EXT_STATUS_MAILBOX : 0;
2105 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2106 bsg_reply->result = DID_OK << 16;
2107 bsg_job_done(bsg_job, bsg_reply->result,
2108 bsg_reply->reply_payload_rcv_len);
2109 return 0;
2112 static int
2113 qla8044_serdes_op(struct bsg_job *bsg_job)
2115 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2116 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2117 scsi_qla_host_t *vha = shost_priv(host);
2118 int rval = 0;
2119 struct qla_serdes_reg_ex sr;
2121 memset(&sr, 0, sizeof(sr));
2123 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2124 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2126 switch (sr.cmd) {
2127 case INT_SC_SERDES_WRITE_REG:
2128 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2129 bsg_reply->reply_payload_rcv_len = 0;
2130 break;
2131 case INT_SC_SERDES_READ_REG:
2132 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2133 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2135 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2136 break;
2137 default:
2138 ql_dbg(ql_dbg_user, vha, 0x7020,
2139 "Unknown serdes cmd %x.\n", sr.cmd);
2140 rval = -EINVAL;
2141 break;
2144 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2145 rval ? EXT_STATUS_MAILBOX : 0;
2147 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2148 bsg_reply->result = DID_OK << 16;
2149 bsg_job_done(bsg_job, bsg_reply->result,
2150 bsg_reply->reply_payload_rcv_len);
2151 return 0;
2154 static int
2155 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2157 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2158 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2159 scsi_qla_host_t *vha = shost_priv(host);
2160 struct qla_hw_data *ha = vha->hw;
2161 struct qla_flash_update_caps cap;
2163 if (!(IS_QLA27XX(ha)))
2164 return -EPERM;
2166 memset(&cap, 0, sizeof(cap));
2167 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2168 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2169 (uint64_t)ha->fw_attributes_h << 16 |
2170 (uint64_t)ha->fw_attributes;
2172 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2173 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2174 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2176 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2177 EXT_STATUS_OK;
2179 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2180 bsg_reply->result = DID_OK << 16;
2181 bsg_job_done(bsg_job, bsg_reply->result,
2182 bsg_reply->reply_payload_rcv_len);
2183 return 0;
2186 static int
2187 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2189 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2190 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2191 scsi_qla_host_t *vha = shost_priv(host);
2192 struct qla_hw_data *ha = vha->hw;
2193 uint64_t online_fw_attr = 0;
2194 struct qla_flash_update_caps cap;
2196 if (!(IS_QLA27XX(ha)))
2197 return -EPERM;
2199 memset(&cap, 0, sizeof(cap));
2200 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2201 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2203 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2204 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2205 (uint64_t)ha->fw_attributes_h << 16 |
2206 (uint64_t)ha->fw_attributes;
2208 if (online_fw_attr != cap.capabilities) {
2209 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2210 EXT_STATUS_INVALID_PARAM;
2211 return -EINVAL;
2214 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2215 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2216 EXT_STATUS_INVALID_PARAM;
2217 return -EINVAL;
2220 bsg_reply->reply_payload_rcv_len = 0;
2222 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2223 EXT_STATUS_OK;
2225 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2226 bsg_reply->result = DID_OK << 16;
2227 bsg_job_done(bsg_job, bsg_reply->result,
2228 bsg_reply->reply_payload_rcv_len);
2229 return 0;
2232 static int
2233 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2235 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2236 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2237 scsi_qla_host_t *vha = shost_priv(host);
2238 struct qla_hw_data *ha = vha->hw;
2239 struct qla_bbcr_data bbcr;
2240 uint16_t loop_id, topo, sw_cap;
2241 uint8_t domain, area, al_pa, state;
2242 int rval;
2244 if (!(IS_QLA27XX(ha)))
2245 return -EPERM;
2247 memset(&bbcr, 0, sizeof(bbcr));
2249 if (vha->flags.bbcr_enable)
2250 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2251 else
2252 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2254 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2255 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2256 &area, &domain, &topo, &sw_cap);
2257 if (rval != QLA_SUCCESS) {
2258 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2259 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2260 bbcr.mbx1 = loop_id;
2261 goto done;
2264 state = (vha->bbcr >> 12) & 0x1;
2266 if (state) {
2267 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2268 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2269 } else {
2270 bbcr.state = QLA_BBCR_STATE_ONLINE;
2271 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2274 bbcr.configured_bbscn = vha->bbcr & 0xf;
2277 done:
2278 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2279 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2280 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2282 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2284 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2285 bsg_reply->result = DID_OK << 16;
2286 bsg_job_done(bsg_job, bsg_reply->result,
2287 bsg_reply->reply_payload_rcv_len);
2288 return 0;
2291 static int
2292 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2294 struct fc_bsg_request *bsg_request = bsg_job->request;
2295 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2296 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2297 scsi_qla_host_t *vha = shost_priv(host);
2298 struct qla_hw_data *ha = vha->hw;
2299 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2300 struct link_statistics *stats = NULL;
2301 dma_addr_t stats_dma;
2302 int rval;
2303 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2304 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2306 if (test_bit(UNLOADING, &vha->dpc_flags))
2307 return -ENODEV;
2309 if (unlikely(pci_channel_offline(ha->pdev)))
2310 return -ENODEV;
2312 if (qla2x00_reset_active(vha))
2313 return -EBUSY;
2315 if (!IS_FWI2_CAPABLE(ha))
2316 return -EPERM;
2318 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
2319 &stats_dma, GFP_KERNEL);
2320 if (!stats) {
2321 ql_log(ql_log_warn, vha, 0x70e2,
2322 "Failed to allocate memory for stats.\n");
2323 return -ENOMEM;
2326 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2328 if (rval == QLA_SUCCESS) {
2329 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2330 (uint8_t *)stats, sizeof(*stats));
2331 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2332 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2335 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2336 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2337 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2339 bsg_job->reply_len = sizeof(*bsg_reply);
2340 bsg_reply->result = DID_OK << 16;
2341 bsg_job_done(bsg_job, bsg_reply->result,
2342 bsg_reply->reply_payload_rcv_len);
2344 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2345 stats, stats_dma);
2347 return 0;
2350 static int
2351 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2353 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2354 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2355 scsi_qla_host_t *vha = shost_priv(host);
2356 int rval;
2357 struct qla_dport_diag *dd;
2359 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2360 return -EPERM;
2362 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2363 if (!dd) {
2364 ql_log(ql_log_warn, vha, 0x70db,
2365 "Failed to allocate memory for dport.\n");
2366 return -ENOMEM;
2369 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2370 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2372 rval = qla26xx_dport_diagnostics(
2373 vha, dd->buf, sizeof(dd->buf), dd->options);
2374 if (rval == QLA_SUCCESS) {
2375 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2376 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2379 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2380 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2381 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2383 bsg_job->reply_len = sizeof(*bsg_reply);
2384 bsg_reply->result = DID_OK << 16;
2385 bsg_job_done(bsg_job, bsg_reply->result,
2386 bsg_reply->reply_payload_rcv_len);
2388 kfree(dd);
2390 return 0;
2393 static int
2394 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2396 struct fc_bsg_request *bsg_request = bsg_job->request;
2398 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2399 case QL_VND_LOOPBACK:
2400 return qla2x00_process_loopback(bsg_job);
2402 case QL_VND_A84_RESET:
2403 return qla84xx_reset(bsg_job);
2405 case QL_VND_A84_UPDATE_FW:
2406 return qla84xx_updatefw(bsg_job);
2408 case QL_VND_A84_MGMT_CMD:
2409 return qla84xx_mgmt_cmd(bsg_job);
2411 case QL_VND_IIDMA:
2412 return qla24xx_iidma(bsg_job);
2414 case QL_VND_FCP_PRIO_CFG_CMD:
2415 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2417 case QL_VND_READ_FLASH:
2418 return qla2x00_read_optrom(bsg_job);
2420 case QL_VND_UPDATE_FLASH:
2421 return qla2x00_update_optrom(bsg_job);
2423 case QL_VND_SET_FRU_VERSION:
2424 return qla2x00_update_fru_versions(bsg_job);
2426 case QL_VND_READ_FRU_STATUS:
2427 return qla2x00_read_fru_status(bsg_job);
2429 case QL_VND_WRITE_FRU_STATUS:
2430 return qla2x00_write_fru_status(bsg_job);
2432 case QL_VND_WRITE_I2C:
2433 return qla2x00_write_i2c(bsg_job);
2435 case QL_VND_READ_I2C:
2436 return qla2x00_read_i2c(bsg_job);
2438 case QL_VND_DIAG_IO_CMD:
2439 return qla24xx_process_bidir_cmd(bsg_job);
2441 case QL_VND_FX00_MGMT_CMD:
2442 return qlafx00_mgmt_cmd(bsg_job);
2444 case QL_VND_SERDES_OP:
2445 return qla26xx_serdes_op(bsg_job);
2447 case QL_VND_SERDES_OP_EX:
2448 return qla8044_serdes_op(bsg_job);
2450 case QL_VND_GET_FLASH_UPDATE_CAPS:
2451 return qla27xx_get_flash_upd_cap(bsg_job);
2453 case QL_VND_SET_FLASH_UPDATE_CAPS:
2454 return qla27xx_set_flash_upd_cap(bsg_job);
2456 case QL_VND_GET_BBCR_DATA:
2457 return qla27xx_get_bbcr_data(bsg_job);
2459 case QL_VND_GET_PRIV_STATS:
2460 case QL_VND_GET_PRIV_STATS_EX:
2461 return qla2x00_get_priv_stats(bsg_job);
2463 case QL_VND_DPORT_DIAGNOSTICS:
2464 return qla2x00_do_dport_diagnostics(bsg_job);
2466 default:
2467 return -ENOSYS;
2472 qla24xx_bsg_request(struct bsg_job *bsg_job)
2474 struct fc_bsg_request *bsg_request = bsg_job->request;
2475 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2476 int ret = -EINVAL;
2477 struct fc_rport *rport;
2478 struct Scsi_Host *host;
2479 scsi_qla_host_t *vha;
2481 /* In case no data transferred. */
2482 bsg_reply->reply_payload_rcv_len = 0;
2484 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2485 rport = fc_bsg_to_rport(bsg_job);
2486 host = rport_to_shost(rport);
2487 vha = shost_priv(host);
2488 } else {
2489 host = fc_bsg_to_shost(bsg_job);
2490 vha = shost_priv(host);
2493 if (qla2x00_chip_is_down(vha)) {
2494 ql_dbg(ql_dbg_user, vha, 0x709f,
2495 "BSG: ISP abort active/needed -- cmd=%d.\n",
2496 bsg_request->msgcode);
2497 return -EBUSY;
2500 ql_dbg(ql_dbg_user, vha, 0x7000,
2501 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2503 switch (bsg_request->msgcode) {
2504 case FC_BSG_RPT_ELS:
2505 case FC_BSG_HST_ELS_NOLOGIN:
2506 ret = qla2x00_process_els(bsg_job);
2507 break;
2508 case FC_BSG_HST_CT:
2509 ret = qla2x00_process_ct(bsg_job);
2510 break;
2511 case FC_BSG_HST_VENDOR:
2512 ret = qla2x00_process_vendor_specific(bsg_job);
2513 break;
2514 case FC_BSG_HST_ADD_RPORT:
2515 case FC_BSG_HST_DEL_RPORT:
2516 case FC_BSG_RPT_CT:
2517 default:
2518 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2519 break;
2521 return ret;
2525 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2527 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2528 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2529 struct qla_hw_data *ha = vha->hw;
2530 srb_t *sp;
2531 int cnt, que;
2532 unsigned long flags;
2533 struct req_que *req;
2535 /* find the bsg job from the active list of commands */
2536 spin_lock_irqsave(&ha->hardware_lock, flags);
2537 for (que = 0; que < ha->max_req_queues; que++) {
2538 req = ha->req_q_map[que];
2539 if (!req)
2540 continue;
2542 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2543 sp = req->outstanding_cmds[cnt];
2544 if (sp) {
2545 if (((sp->type == SRB_CT_CMD) ||
2546 (sp->type == SRB_ELS_CMD_HST) ||
2547 (sp->type == SRB_FXIOCB_BCMD))
2548 && (sp->u.bsg_job == bsg_job)) {
2549 req->outstanding_cmds[cnt] = NULL;
2550 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2551 if (ha->isp_ops->abort_command(sp)) {
2552 ql_log(ql_log_warn, vha, 0x7089,
2553 "mbx abort_command "
2554 "failed.\n");
2555 bsg_reply->result = -EIO;
2556 } else {
2557 ql_dbg(ql_dbg_user, vha, 0x708a,
2558 "mbx abort_command "
2559 "success.\n");
2560 bsg_reply->result = 0;
2562 spin_lock_irqsave(&ha->hardware_lock, flags);
2563 goto done;
2568 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2569 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2570 bsg_reply->result = -ENXIO;
2571 return 0;
2573 done:
2574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2575 sp->free(sp);
2576 return 0;