2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
13 /* BSG support for ELS/CT pass through */
15 qla2x00_bsg_job_done(void *data
, void *ptr
, int res
)
17 srb_t
*sp
= (srb_t
*)ptr
;
18 struct scsi_qla_host
*vha
= (scsi_qla_host_t
*)data
;
19 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
21 bsg_job
->reply
->result
= res
;
22 bsg_job
->job_done(bsg_job
);
27 qla2x00_bsg_sp_free(void *data
, void *ptr
)
29 srb_t
*sp
= (srb_t
*)ptr
;
30 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
31 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
32 struct qla_hw_data
*ha
= vha
->hw
;
33 struct qla_mt_iocb_rqst_fx00
*piocb_rqst
;
35 if (sp
->type
== SRB_FXIOCB_BCMD
) {
36 piocb_rqst
= (struct qla_mt_iocb_rqst_fx00
*)
37 &bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
39 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
)
40 dma_unmap_sg(&ha
->pdev
->dev
,
41 bsg_job
->request_payload
.sg_list
,
42 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
44 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
)
45 dma_unmap_sg(&ha
->pdev
->dev
,
46 bsg_job
->reply_payload
.sg_list
,
47 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
49 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
50 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
52 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
53 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
56 if (sp
->type
== SRB_CT_CMD
||
57 sp
->type
== SRB_FXIOCB_BCMD
||
58 sp
->type
== SRB_ELS_CMD_HST
)
60 qla2x00_rel_sp(vha
, sp
);
64 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t
*vha
,
65 struct qla_fcp_prio_cfg
*pri_cfg
, uint8_t flag
)
67 int i
, ret
, num_valid
;
69 struct qla_fcp_prio_entry
*pri_entry
;
70 uint32_t *bcode_val_ptr
, bcode_val
;
74 bcode
= (uint8_t *)pri_cfg
;
75 bcode_val_ptr
= (uint32_t *)pri_cfg
;
76 bcode_val
= (uint32_t)(*bcode_val_ptr
);
78 if (bcode_val
== 0xFFFFFFFF) {
79 /* No FCP Priority config data in flash */
80 ql_dbg(ql_dbg_user
, vha
, 0x7051,
81 "No FCP Priority config data.\n");
85 if (bcode
[0] != 'H' || bcode
[1] != 'Q' || bcode
[2] != 'O' ||
87 /* Invalid FCP priority data header*/
88 ql_dbg(ql_dbg_user
, vha
, 0x7052,
89 "Invalid FCP Priority data header. bcode=0x%x.\n",
96 pri_entry
= &pri_cfg
->entry
[0];
97 for (i
= 0; i
< pri_cfg
->num_entries
; i
++) {
98 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
103 if (num_valid
== 0) {
104 /* No valid FCP priority data entries */
105 ql_dbg(ql_dbg_user
, vha
, 0x7053,
106 "No valid FCP Priority data entries.\n");
109 /* FCP priority data is valid */
110 ql_dbg(ql_dbg_user
, vha
, 0x7054,
111 "Valid FCP priority data. num entries = %d.\n",
119 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job
*bsg_job
)
121 struct Scsi_Host
*host
= bsg_job
->shost
;
122 scsi_qla_host_t
*vha
= shost_priv(host
);
123 struct qla_hw_data
*ha
= vha
->hw
;
128 if (!(IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) || IS_P3P_TYPE(ha
))) {
130 goto exit_fcp_prio_cfg
;
133 /* Get the sub command */
134 oper
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
136 /* Only set config is allowed if config memory is not allocated */
137 if (!ha
->fcp_prio_cfg
&& (oper
!= QLFC_FCP_PRIO_SET_CONFIG
)) {
139 goto exit_fcp_prio_cfg
;
142 case QLFC_FCP_PRIO_DISABLE
:
143 if (ha
->flags
.fcp_prio_enabled
) {
144 ha
->flags
.fcp_prio_enabled
= 0;
145 ha
->fcp_prio_cfg
->attributes
&=
146 ~FCP_PRIO_ATTR_ENABLE
;
147 qla24xx_update_all_fcp_prio(vha
);
148 bsg_job
->reply
->result
= DID_OK
;
151 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
152 goto exit_fcp_prio_cfg
;
156 case QLFC_FCP_PRIO_ENABLE
:
157 if (!ha
->flags
.fcp_prio_enabled
) {
158 if (ha
->fcp_prio_cfg
) {
159 ha
->flags
.fcp_prio_enabled
= 1;
160 ha
->fcp_prio_cfg
->attributes
|=
161 FCP_PRIO_ATTR_ENABLE
;
162 qla24xx_update_all_fcp_prio(vha
);
163 bsg_job
->reply
->result
= DID_OK
;
166 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
167 goto exit_fcp_prio_cfg
;
172 case QLFC_FCP_PRIO_GET_CONFIG
:
173 len
= bsg_job
->reply_payload
.payload_len
;
174 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
176 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
177 goto exit_fcp_prio_cfg
;
180 bsg_job
->reply
->result
= DID_OK
;
181 bsg_job
->reply
->reply_payload_rcv_len
=
183 bsg_job
->reply_payload
.sg_list
,
184 bsg_job
->reply_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
189 case QLFC_FCP_PRIO_SET_CONFIG
:
190 len
= bsg_job
->request_payload
.payload_len
;
191 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
192 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
194 goto exit_fcp_prio_cfg
;
197 if (!ha
->fcp_prio_cfg
) {
198 ha
->fcp_prio_cfg
= vmalloc(FCP_PRIO_CFG_SIZE
);
199 if (!ha
->fcp_prio_cfg
) {
200 ql_log(ql_log_warn
, vha
, 0x7050,
201 "Unable to allocate memory for fcp prio "
202 "config data (%x).\n", FCP_PRIO_CFG_SIZE
);
203 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
205 goto exit_fcp_prio_cfg
;
209 memset(ha
->fcp_prio_cfg
, 0, FCP_PRIO_CFG_SIZE
);
210 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
211 bsg_job
->request_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
214 /* validate fcp priority data */
216 if (!qla24xx_fcp_prio_cfg_valid(vha
,
217 (struct qla_fcp_prio_cfg
*) ha
->fcp_prio_cfg
, 1)) {
218 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
220 /* If buffer was invalidatic int
221 * fcp_prio_cfg is of no use
223 vfree(ha
->fcp_prio_cfg
);
224 ha
->fcp_prio_cfg
= NULL
;
225 goto exit_fcp_prio_cfg
;
228 ha
->flags
.fcp_prio_enabled
= 0;
229 if (ha
->fcp_prio_cfg
->attributes
& FCP_PRIO_ATTR_ENABLE
)
230 ha
->flags
.fcp_prio_enabled
= 1;
231 qla24xx_update_all_fcp_prio(vha
);
232 bsg_job
->reply
->result
= DID_OK
;
240 bsg_job
->job_done(bsg_job
);
245 qla2x00_process_els(struct fc_bsg_job
*bsg_job
)
247 struct fc_rport
*rport
;
248 fc_port_t
*fcport
= NULL
;
249 struct Scsi_Host
*host
;
250 scsi_qla_host_t
*vha
;
251 struct qla_hw_data
*ha
;
254 int req_sg_cnt
, rsp_sg_cnt
;
255 int rval
= (DRIVER_ERROR
<< 16);
256 uint16_t nextlid
= 0;
258 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
259 rport
= bsg_job
->rport
;
260 fcport
= *(fc_port_t
**) rport
->dd_data
;
261 host
= rport_to_shost(rport
);
262 vha
= shost_priv(host
);
264 type
= "FC_BSG_RPT_ELS";
266 host
= bsg_job
->shost
;
267 vha
= shost_priv(host
);
269 type
= "FC_BSG_HST_ELS_NOLOGIN";
272 if (!vha
->flags
.online
) {
273 ql_log(ql_log_warn
, vha
, 0x7005, "Host not online.\n");
278 /* pass through is supported only for ISP 4Gb or higher */
279 if (!IS_FWI2_CAPABLE(ha
)) {
280 ql_dbg(ql_dbg_user
, vha
, 0x7001,
281 "ELS passthru not supported for ISP23xx based adapters.\n");
286 /* Multiple SG's are not supported for ELS requests */
287 if (bsg_job
->request_payload
.sg_cnt
> 1 ||
288 bsg_job
->reply_payload
.sg_cnt
> 1) {
289 ql_dbg(ql_dbg_user
, vha
, 0x7002,
290 "Multiple SG's are not suppored for ELS requests, "
291 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
292 bsg_job
->request_payload
.sg_cnt
,
293 bsg_job
->reply_payload
.sg_cnt
);
298 /* ELS request for rport */
299 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
300 /* make sure the rport is logged in,
301 * if not perform fabric login
303 if (qla2x00_fabric_login(vha
, fcport
, &nextlid
)) {
304 ql_dbg(ql_dbg_user
, vha
, 0x7003,
305 "Failed to login port %06X for ELS passthru.\n",
311 /* Allocate a dummy fcport structure, since functions
312 * preparing the IOCB and mailbox command retrieves port
313 * specific information from fcport structure. For Host based
314 * ELS commands there will be no fcport structure allocated
316 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
322 /* Initialize all required fields of fcport */
324 fcport
->d_id
.b
.al_pa
=
325 bsg_job
->request
->rqst_data
.h_els
.port_id
[0];
326 fcport
->d_id
.b
.area
=
327 bsg_job
->request
->rqst_data
.h_els
.port_id
[1];
328 fcport
->d_id
.b
.domain
=
329 bsg_job
->request
->rqst_data
.h_els
.port_id
[2];
331 (fcport
->d_id
.b
.al_pa
== 0xFD) ?
332 NPH_FABRIC_CONTROLLER
: NPH_F_PORT
;
336 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
337 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
340 goto done_free_fcport
;
343 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
344 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
347 goto done_free_fcport
;
350 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
351 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
352 ql_log(ql_log_warn
, vha
, 0x7008,
353 "dma mapping resulted in different sg counts, "
354 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
355 "dma_reply_sg_cnt:%x.\n", bsg_job
->request_payload
.sg_cnt
,
356 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
361 /* Alloc SRB structure */
362 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
369 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
370 SRB_ELS_CMD_RPT
: SRB_ELS_CMD_HST
);
372 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
373 "bsg_els_rpt" : "bsg_els_hst");
374 sp
->u
.bsg_job
= bsg_job
;
375 sp
->free
= qla2x00_bsg_sp_free
;
376 sp
->done
= qla2x00_bsg_job_done
;
378 ql_dbg(ql_dbg_user
, vha
, 0x700a,
379 "bsg rqst type: %s els type: %x - loop-id=%x "
380 "portid=%-2x%02x%02x.\n", type
,
381 bsg_job
->request
->rqst_data
.h_els
.command_code
, fcport
->loop_id
,
382 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
384 rval
= qla2x00_start_sp(sp
);
385 if (rval
!= QLA_SUCCESS
) {
386 ql_log(ql_log_warn
, vha
, 0x700e,
387 "qla2x00_start_sp failed = %d\n", rval
);
388 qla2x00_rel_sp(vha
, sp
);
395 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
396 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
397 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
398 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
399 goto done_free_fcport
;
402 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
)
408 static inline uint16_t
409 qla24xx_calc_ct_iocbs(uint16_t dsds
)
415 iocbs
+= (dsds
- 2) / 5;
423 qla2x00_process_ct(struct fc_bsg_job
*bsg_job
)
426 struct Scsi_Host
*host
= bsg_job
->shost
;
427 scsi_qla_host_t
*vha
= shost_priv(host
);
428 struct qla_hw_data
*ha
= vha
->hw
;
429 int rval
= (DRIVER_ERROR
<< 16);
430 int req_sg_cnt
, rsp_sg_cnt
;
432 struct fc_port
*fcport
;
433 char *type
= "FC_BSG_HST_CT";
436 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
437 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
439 ql_log(ql_log_warn
, vha
, 0x700f,
440 "dma_map_sg return %d for request\n", req_sg_cnt
);
445 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
446 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
448 ql_log(ql_log_warn
, vha
, 0x7010,
449 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
454 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
455 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
456 ql_log(ql_log_warn
, vha
, 0x7011,
457 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
458 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
459 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
464 if (!vha
->flags
.online
) {
465 ql_log(ql_log_warn
, vha
, 0x7012,
466 "Host is not online.\n");
472 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word1
& 0xFF000000)
476 loop_id
= cpu_to_le16(NPH_SNS
);
479 loop_id
= vha
->mgmt_svr_loop_id
;
482 ql_dbg(ql_dbg_user
, vha
, 0x7013,
483 "Unknown loop id: %x.\n", loop_id
);
488 /* Allocate a dummy fcport structure, since functions preparing the
489 * IOCB and mailbox command retrieves port specific information
490 * from fcport structure. For Host based ELS commands there will be
491 * no fcport structure allocated
493 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
495 ql_log(ql_log_warn
, vha
, 0x7014,
496 "Failed to allocate fcport.\n");
501 /* Initialize all required fields of fcport */
503 fcport
->d_id
.b
.al_pa
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[0];
504 fcport
->d_id
.b
.area
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[1];
505 fcport
->d_id
.b
.domain
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[2];
506 fcport
->loop_id
= loop_id
;
508 /* Alloc SRB structure */
509 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
511 ql_log(ql_log_warn
, vha
, 0x7015,
512 "qla2x00_get_sp failed.\n");
514 goto done_free_fcport
;
517 sp
->type
= SRB_CT_CMD
;
519 sp
->iocbs
= qla24xx_calc_ct_iocbs(req_sg_cnt
+ rsp_sg_cnt
);
520 sp
->u
.bsg_job
= bsg_job
;
521 sp
->free
= qla2x00_bsg_sp_free
;
522 sp
->done
= qla2x00_bsg_job_done
;
524 ql_dbg(ql_dbg_user
, vha
, 0x7016,
525 "bsg rqst type: %s else type: %x - "
526 "loop-id=%x portid=%02x%02x%02x.\n", type
,
527 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word2
>> 16),
528 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
529 fcport
->d_id
.b
.al_pa
);
531 rval
= qla2x00_start_sp(sp
);
532 if (rval
!= QLA_SUCCESS
) {
533 ql_log(ql_log_warn
, vha
, 0x7017,
534 "qla2x00_start_sp failed=%d.\n", rval
);
535 qla2x00_rel_sp(vha
, sp
);
537 goto done_free_fcport
;
544 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
545 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
546 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
547 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
552 /* Disable loopback mode */
554 qla81xx_reset_loopback_mode(scsi_qla_host_t
*vha
, uint16_t *config
,
559 uint16_t new_config
[4];
560 struct qla_hw_data
*ha
= vha
->hw
;
562 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
) && !IS_QLA8044(ha
))
563 goto done_reset_internal
;
565 memset(new_config
, 0 , sizeof(new_config
));
566 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
567 ENABLE_INTERNAL_LOOPBACK
||
568 (config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
569 ENABLE_EXTERNAL_LOOPBACK
) {
570 new_config
[0] = config
[0] & ~INTERNAL_LOOPBACK_MASK
;
571 ql_dbg(ql_dbg_user
, vha
, 0x70bf, "new_config[0]=%02x\n",
572 (new_config
[0] & INTERNAL_LOOPBACK_MASK
));
573 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
575 ha
->notify_dcbx_comp
= wait
;
576 ha
->notify_lb_portup_comp
= wait2
;
578 ret
= qla81xx_set_port_config(vha
, new_config
);
579 if (ret
!= QLA_SUCCESS
) {
580 ql_log(ql_log_warn
, vha
, 0x7025,
581 "Set port config failed.\n");
582 ha
->notify_dcbx_comp
= 0;
583 ha
->notify_lb_portup_comp
= 0;
585 goto done_reset_internal
;
588 /* Wait for DCBX complete event */
589 if (wait
&& !wait_for_completion_timeout(&ha
->dcbx_comp
,
590 (DCBX_COMP_TIMEOUT
* HZ
))) {
591 ql_dbg(ql_dbg_user
, vha
, 0x7026,
592 "DCBX completion not received.\n");
593 ha
->notify_dcbx_comp
= 0;
594 ha
->notify_lb_portup_comp
= 0;
596 goto done_reset_internal
;
598 ql_dbg(ql_dbg_user
, vha
, 0x7027,
599 "DCBX completion received.\n");
602 !wait_for_completion_timeout(&ha
->lb_portup_comp
,
603 (LB_PORTUP_COMP_TIMEOUT
* HZ
))) {
604 ql_dbg(ql_dbg_user
, vha
, 0x70c5,
605 "Port up completion not received.\n");
606 ha
->notify_lb_portup_comp
= 0;
608 goto done_reset_internal
;
610 ql_dbg(ql_dbg_user
, vha
, 0x70c6,
611 "Port up completion received.\n");
613 ha
->notify_dcbx_comp
= 0;
614 ha
->notify_lb_portup_comp
= 0;
621 * Set the port configuration to enable the internal or external loopback
622 * depending on the loopback mode.
625 qla81xx_set_loopback_mode(scsi_qla_host_t
*vha
, uint16_t *config
,
626 uint16_t *new_config
, uint16_t mode
)
630 unsigned long rem_tmo
= 0, current_tmo
= 0;
631 struct qla_hw_data
*ha
= vha
->hw
;
633 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
) && !IS_QLA8044(ha
))
634 goto done_set_internal
;
636 if (mode
== INTERNAL_LOOPBACK
)
637 new_config
[0] = config
[0] | (ENABLE_INTERNAL_LOOPBACK
<< 1);
638 else if (mode
== EXTERNAL_LOOPBACK
)
639 new_config
[0] = config
[0] | (ENABLE_EXTERNAL_LOOPBACK
<< 1);
640 ql_dbg(ql_dbg_user
, vha
, 0x70be,
641 "new_config[0]=%02x\n", (new_config
[0] & INTERNAL_LOOPBACK_MASK
));
643 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3);
645 ha
->notify_dcbx_comp
= 1;
646 ret
= qla81xx_set_port_config(vha
, new_config
);
647 if (ret
!= QLA_SUCCESS
) {
648 ql_log(ql_log_warn
, vha
, 0x7021,
649 "set port config failed.\n");
650 ha
->notify_dcbx_comp
= 0;
652 goto done_set_internal
;
655 /* Wait for DCBX complete event */
656 current_tmo
= DCBX_COMP_TIMEOUT
* HZ
;
658 rem_tmo
= wait_for_completion_timeout(&ha
->dcbx_comp
,
660 if (!ha
->idc_extend_tmo
|| rem_tmo
) {
661 ha
->idc_extend_tmo
= 0;
664 current_tmo
= ha
->idc_extend_tmo
* HZ
;
665 ha
->idc_extend_tmo
= 0;
669 ql_dbg(ql_dbg_user
, vha
, 0x7022,
670 "DCBX completion not received.\n");
671 ret
= qla81xx_reset_loopback_mode(vha
, new_config
, 0, 0);
673 * If the reset of the loopback mode doesn't work take a FCoE
674 * dump and reset the chip.
677 ha
->isp_ops
->fw_dump(vha
, 0);
678 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
682 if (ha
->flags
.idc_compl_status
) {
683 ql_dbg(ql_dbg_user
, vha
, 0x70c3,
684 "Bad status in IDC Completion AEN\n");
686 ha
->flags
.idc_compl_status
= 0;
688 ql_dbg(ql_dbg_user
, vha
, 0x7023,
689 "DCBX completion received.\n");
692 ha
->notify_dcbx_comp
= 0;
693 ha
->idc_extend_tmo
= 0;
700 qla2x00_process_loopback(struct fc_bsg_job
*bsg_job
)
702 struct Scsi_Host
*host
= bsg_job
->shost
;
703 scsi_qla_host_t
*vha
= shost_priv(host
);
704 struct qla_hw_data
*ha
= vha
->hw
;
706 uint8_t command_sent
;
708 struct msg_echo_lb elreq
;
709 uint16_t response
[MAILBOX_REGISTER_COUNT
];
710 uint16_t config
[4], new_config
[4];
712 uint8_t *req_data
= NULL
;
713 dma_addr_t req_data_dma
;
714 uint32_t req_data_len
;
715 uint8_t *rsp_data
= NULL
;
716 dma_addr_t rsp_data_dma
;
717 uint32_t rsp_data_len
;
719 if (!vha
->flags
.online
) {
720 ql_log(ql_log_warn
, vha
, 0x7019, "Host is not online.\n");
724 elreq
.req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
725 bsg_job
->request_payload
.sg_list
, bsg_job
->request_payload
.sg_cnt
,
728 if (!elreq
.req_sg_cnt
) {
729 ql_log(ql_log_warn
, vha
, 0x701a,
730 "dma_map_sg returned %d for request.\n", elreq
.req_sg_cnt
);
734 elreq
.rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
735 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
738 if (!elreq
.rsp_sg_cnt
) {
739 ql_log(ql_log_warn
, vha
, 0x701b,
740 "dma_map_sg returned %d for reply.\n", elreq
.rsp_sg_cnt
);
742 goto done_unmap_req_sg
;
745 if ((elreq
.req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
746 (elreq
.rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
747 ql_log(ql_log_warn
, vha
, 0x701c,
748 "dma mapping resulted in different sg counts, "
749 "request_sg_cnt: %x dma_request_sg_cnt: %x "
750 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
751 bsg_job
->request_payload
.sg_cnt
, elreq
.req_sg_cnt
,
752 bsg_job
->reply_payload
.sg_cnt
, elreq
.rsp_sg_cnt
);
756 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
757 req_data
= dma_alloc_coherent(&ha
->pdev
->dev
, req_data_len
,
758 &req_data_dma
, GFP_KERNEL
);
760 ql_log(ql_log_warn
, vha
, 0x701d,
761 "dma alloc failed for req_data.\n");
766 rsp_data
= dma_alloc_coherent(&ha
->pdev
->dev
, rsp_data_len
,
767 &rsp_data_dma
, GFP_KERNEL
);
769 ql_log(ql_log_warn
, vha
, 0x7004,
770 "dma alloc failed for rsp_data.\n");
772 goto done_free_dma_req
;
775 /* Copy the request buffer in req_data now */
776 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
777 bsg_job
->request_payload
.sg_cnt
, req_data
, req_data_len
);
779 elreq
.send_dma
= req_data_dma
;
780 elreq
.rcv_dma
= rsp_data_dma
;
781 elreq
.transfer_size
= req_data_len
;
783 elreq
.options
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
784 elreq
.iteration_count
=
785 bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[2];
787 if (atomic_read(&vha
->loop_state
) == LOOP_READY
&&
788 (ha
->current_topology
== ISP_CFG_F
||
789 ((IS_QLA81XX(ha
) || IS_QLA8031(ha
) || IS_QLA8044(ha
)) &&
790 le32_to_cpu(*(uint32_t *)req_data
) == ELS_OPCODE_BYTE
791 && req_data_len
== MAX_ELS_FRAME_PAYLOAD
)) &&
792 elreq
.options
== EXTERNAL_LOOPBACK
) {
793 type
= "FC_BSG_HST_VENDOR_ECHO_DIAG";
794 ql_dbg(ql_dbg_user
, vha
, 0x701e,
795 "BSG request type: %s.\n", type
);
796 command_sent
= INT_DEF_LB_ECHO_CMD
;
797 rval
= qla2x00_echo_test(vha
, &elreq
, response
);
799 if (IS_QLA81XX(ha
) || IS_QLA8031(ha
) || IS_QLA8044(ha
)) {
800 memset(config
, 0, sizeof(config
));
801 memset(new_config
, 0, sizeof(new_config
));
803 if (qla81xx_get_port_config(vha
, config
)) {
804 ql_log(ql_log_warn
, vha
, 0x701f,
805 "Get port config failed.\n");
807 goto done_free_dma_rsp
;
810 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) != 0) {
811 ql_dbg(ql_dbg_user
, vha
, 0x70c4,
812 "Loopback operation already in "
815 goto done_free_dma_rsp
;
818 ql_dbg(ql_dbg_user
, vha
, 0x70c0,
819 "elreq.options=%04x\n", elreq
.options
);
821 if (elreq
.options
== EXTERNAL_LOOPBACK
)
822 if (IS_QLA8031(ha
) || IS_QLA8044(ha
))
823 rval
= qla81xx_set_loopback_mode(vha
,
824 config
, new_config
, elreq
.options
);
826 rval
= qla81xx_reset_loopback_mode(vha
,
829 rval
= qla81xx_set_loopback_mode(vha
, config
,
830 new_config
, elreq
.options
);
834 goto done_free_dma_rsp
;
837 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
838 ql_dbg(ql_dbg_user
, vha
, 0x7028,
839 "BSG request type: %s.\n", type
);
841 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
842 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
844 if (response
[0] == MBS_COMMAND_ERROR
&&
845 response
[1] == MBS_LB_RESET
) {
846 ql_log(ql_log_warn
, vha
, 0x7029,
847 "MBX command error, Aborting ISP.\n");
848 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
849 qla2xxx_wake_dpc(vha
);
850 qla2x00_wait_for_chip_reset(vha
);
851 /* Also reset the MPI */
852 if (IS_QLA81XX(ha
)) {
853 if (qla81xx_restart_mpi_firmware(vha
) !=
855 ql_log(ql_log_warn
, vha
, 0x702a,
856 "MPI reset failed.\n");
861 goto done_free_dma_rsp
;
867 /* Revert back to original port config
868 * Also clear internal loopback
870 ret
= qla81xx_reset_loopback_mode(vha
,
874 * If the reset of the loopback mode
875 * doesn't work take FCoE dump and then
878 ha
->isp_ops
->fw_dump(vha
, 0);
879 set_bit(ISP_ABORT_NEEDED
,
886 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
887 ql_dbg(ql_dbg_user
, vha
, 0x702b,
888 "BSG request type: %s.\n", type
);
889 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
890 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
895 ql_log(ql_log_warn
, vha
, 0x702c,
896 "Vendor request %s failed.\n", type
);
899 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
900 bsg_job
->reply
->reply_payload_rcv_len
= 0;
902 ql_dbg(ql_dbg_user
, vha
, 0x702d,
903 "Vendor request %s completed.\n", type
);
904 bsg_job
->reply
->result
= (DID_OK
<< 16);
905 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
906 bsg_job
->reply_payload
.sg_cnt
, rsp_data
,
910 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
911 sizeof(response
) + sizeof(uint8_t);
912 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
913 sizeof(struct fc_bsg_reply
);
914 memcpy(fw_sts_ptr
, response
, sizeof(response
));
915 fw_sts_ptr
+= sizeof(response
);
916 *fw_sts_ptr
= command_sent
;
919 dma_free_coherent(&ha
->pdev
->dev
, rsp_data_len
,
920 rsp_data
, rsp_data_dma
);
922 dma_free_coherent(&ha
->pdev
->dev
, req_data_len
,
923 req_data
, req_data_dma
);
925 dma_unmap_sg(&ha
->pdev
->dev
,
926 bsg_job
->reply_payload
.sg_list
,
927 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
929 dma_unmap_sg(&ha
->pdev
->dev
,
930 bsg_job
->request_payload
.sg_list
,
931 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
933 bsg_job
->job_done(bsg_job
);
938 qla84xx_reset(struct fc_bsg_job
*bsg_job
)
940 struct Scsi_Host
*host
= bsg_job
->shost
;
941 scsi_qla_host_t
*vha
= shost_priv(host
);
942 struct qla_hw_data
*ha
= vha
->hw
;
946 if (!IS_QLA84XX(ha
)) {
947 ql_dbg(ql_dbg_user
, vha
, 0x702f, "Not 84xx, exiting.\n");
951 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
953 rval
= qla84xx_reset_chip(vha
, flag
== A84_ISSUE_RESET_DIAG_FW
);
956 ql_log(ql_log_warn
, vha
, 0x7030,
957 "Vendor request 84xx reset failed.\n");
958 rval
= (DID_ERROR
<< 16);
961 ql_dbg(ql_dbg_user
, vha
, 0x7031,
962 "Vendor request 84xx reset completed.\n");
963 bsg_job
->reply
->result
= DID_OK
;
964 bsg_job
->job_done(bsg_job
);
971 qla84xx_updatefw(struct fc_bsg_job
*bsg_job
)
973 struct Scsi_Host
*host
= bsg_job
->shost
;
974 scsi_qla_host_t
*vha
= shost_priv(host
);
975 struct qla_hw_data
*ha
= vha
->hw
;
976 struct verify_chip_entry_84xx
*mn
= NULL
;
977 dma_addr_t mn_dma
, fw_dma
;
986 if (!IS_QLA84XX(ha
)) {
987 ql_dbg(ql_dbg_user
, vha
, 0x7032,
988 "Not 84xx, exiting.\n");
992 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
993 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
995 ql_log(ql_log_warn
, vha
, 0x7033,
996 "dma_map_sg returned %d for request.\n", sg_cnt
);
1000 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1001 ql_log(ql_log_warn
, vha
, 0x7034,
1002 "DMA mapping resulted in different sg counts, "
1003 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1004 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1009 data_len
= bsg_job
->request_payload
.payload_len
;
1010 fw_buf
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1011 &fw_dma
, GFP_KERNEL
);
1013 ql_log(ql_log_warn
, vha
, 0x7035,
1014 "DMA alloc failed for fw_buf.\n");
1019 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1020 bsg_job
->request_payload
.sg_cnt
, fw_buf
, data_len
);
1022 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1024 ql_log(ql_log_warn
, vha
, 0x7036,
1025 "DMA alloc failed for fw buffer.\n");
1027 goto done_free_fw_buf
;
1030 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1031 fw_ver
= le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf
+ 2)));
1033 memset(mn
, 0, sizeof(struct access_chip_84xx
));
1034 mn
->entry_type
= VERIFY_CHIP_IOCB_TYPE
;
1035 mn
->entry_count
= 1;
1037 options
= VCO_FORCE_UPDATE
| VCO_END_OF_DATA
;
1038 if (flag
== A84_ISSUE_UPDATE_DIAGFW_CMD
)
1039 options
|= VCO_DIAG_FW
;
1041 mn
->options
= cpu_to_le16(options
);
1042 mn
->fw_ver
= cpu_to_le32(fw_ver
);
1043 mn
->fw_size
= cpu_to_le32(data_len
);
1044 mn
->fw_seq_size
= cpu_to_le32(data_len
);
1045 mn
->dseg_address
[0] = cpu_to_le32(LSD(fw_dma
));
1046 mn
->dseg_address
[1] = cpu_to_le32(MSD(fw_dma
));
1047 mn
->dseg_length
= cpu_to_le32(data_len
);
1048 mn
->data_seg_cnt
= cpu_to_le16(1);
1050 rval
= qla2x00_issue_iocb_timeout(vha
, mn
, mn_dma
, 0, 120);
1053 ql_log(ql_log_warn
, vha
, 0x7037,
1054 "Vendor request 84xx updatefw failed.\n");
1056 rval
= (DID_ERROR
<< 16);
1058 ql_dbg(ql_dbg_user
, vha
, 0x7038,
1059 "Vendor request 84xx updatefw completed.\n");
1061 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1062 bsg_job
->reply
->result
= DID_OK
;
1065 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1068 dma_free_coherent(&ha
->pdev
->dev
, data_len
, fw_buf
, fw_dma
);
1071 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1072 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1075 bsg_job
->job_done(bsg_job
);
1080 qla84xx_mgmt_cmd(struct fc_bsg_job
*bsg_job
)
1082 struct Scsi_Host
*host
= bsg_job
->shost
;
1083 scsi_qla_host_t
*vha
= shost_priv(host
);
1084 struct qla_hw_data
*ha
= vha
->hw
;
1085 struct access_chip_84xx
*mn
= NULL
;
1086 dma_addr_t mn_dma
, mgmt_dma
;
1087 void *mgmt_b
= NULL
;
1089 struct qla_bsg_a84_mgmt
*ql84_mgmt
;
1091 uint32_t data_len
= 0;
1092 uint32_t dma_direction
= DMA_NONE
;
1094 if (!IS_QLA84XX(ha
)) {
1095 ql_log(ql_log_warn
, vha
, 0x703a,
1096 "Not 84xx, exiting.\n");
1100 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1102 ql_log(ql_log_warn
, vha
, 0x703c,
1103 "DMA alloc failed for fw buffer.\n");
1107 memset(mn
, 0, sizeof(struct access_chip_84xx
));
1108 mn
->entry_type
= ACCESS_CHIP_IOCB_TYPE
;
1109 mn
->entry_count
= 1;
1110 ql84_mgmt
= (void *)bsg_job
->request
+ sizeof(struct fc_bsg_request
);
1111 switch (ql84_mgmt
->mgmt
.cmd
) {
1112 case QLA84_MGMT_READ_MEM
:
1113 case QLA84_MGMT_GET_INFO
:
1114 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1115 bsg_job
->reply_payload
.sg_list
,
1116 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1118 ql_log(ql_log_warn
, vha
, 0x703d,
1119 "dma_map_sg returned %d for reply.\n", sg_cnt
);
1124 dma_direction
= DMA_FROM_DEVICE
;
1126 if (sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
) {
1127 ql_log(ql_log_warn
, vha
, 0x703e,
1128 "DMA mapping resulted in different sg counts, "
1129 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1130 bsg_job
->reply_payload
.sg_cnt
, sg_cnt
);
1135 data_len
= bsg_job
->reply_payload
.payload_len
;
1137 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1138 &mgmt_dma
, GFP_KERNEL
);
1140 ql_log(ql_log_warn
, vha
, 0x703f,
1141 "DMA alloc failed for mgmt_b.\n");
1146 if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) {
1147 mn
->options
= cpu_to_le16(ACO_DUMP_MEMORY
);
1150 ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1152 } else if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
) {
1153 mn
->options
= cpu_to_le16(ACO_REQUEST_INFO
);
1155 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.info
.type
);
1159 ql84_mgmt
->mgmt
.mgmtp
.u
.info
.context
);
1163 case QLA84_MGMT_WRITE_MEM
:
1164 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1165 bsg_job
->request_payload
.sg_list
,
1166 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1169 ql_log(ql_log_warn
, vha
, 0x7040,
1170 "dma_map_sg returned %d.\n", sg_cnt
);
1175 dma_direction
= DMA_TO_DEVICE
;
1177 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1178 ql_log(ql_log_warn
, vha
, 0x7041,
1179 "DMA mapping resulted in different sg counts, "
1180 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1181 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1186 data_len
= bsg_job
->request_payload
.payload_len
;
1187 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1188 &mgmt_dma
, GFP_KERNEL
);
1190 ql_log(ql_log_warn
, vha
, 0x7042,
1191 "DMA alloc failed for mgmt_b.\n");
1196 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1197 bsg_job
->request_payload
.sg_cnt
, mgmt_b
, data_len
);
1199 mn
->options
= cpu_to_le16(ACO_LOAD_MEMORY
);
1201 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1204 case QLA84_MGMT_CHNG_CONFIG
:
1205 mn
->options
= cpu_to_le16(ACO_CHANGE_CONFIG_PARAM
);
1207 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.id
);
1210 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param0
);
1213 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param1
);
1221 if (ql84_mgmt
->mgmt
.cmd
!= QLA84_MGMT_CHNG_CONFIG
) {
1222 mn
->total_byte_cnt
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1223 mn
->dseg_count
= cpu_to_le16(1);
1224 mn
->dseg_address
[0] = cpu_to_le32(LSD(mgmt_dma
));
1225 mn
->dseg_address
[1] = cpu_to_le32(MSD(mgmt_dma
));
1226 mn
->dseg_length
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1229 rval
= qla2x00_issue_iocb(vha
, mn
, mn_dma
, 0);
1232 ql_log(ql_log_warn
, vha
, 0x7043,
1233 "Vendor request 84xx mgmt failed.\n");
1235 rval
= (DID_ERROR
<< 16);
1238 ql_dbg(ql_dbg_user
, vha
, 0x7044,
1239 "Vendor request 84xx mgmt completed.\n");
1241 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1242 bsg_job
->reply
->result
= DID_OK
;
1244 if ((ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) ||
1245 (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
)) {
1246 bsg_job
->reply
->reply_payload_rcv_len
=
1247 bsg_job
->reply_payload
.payload_len
;
1249 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1250 bsg_job
->reply_payload
.sg_cnt
, mgmt_b
,
1257 dma_free_coherent(&ha
->pdev
->dev
, data_len
, mgmt_b
, mgmt_dma
);
1259 if (dma_direction
== DMA_TO_DEVICE
)
1260 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1261 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1262 else if (dma_direction
== DMA_FROM_DEVICE
)
1263 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1264 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1267 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1270 bsg_job
->job_done(bsg_job
);
1275 qla24xx_iidma(struct fc_bsg_job
*bsg_job
)
1277 struct Scsi_Host
*host
= bsg_job
->shost
;
1278 scsi_qla_host_t
*vha
= shost_priv(host
);
1280 struct qla_port_param
*port_param
= NULL
;
1281 fc_port_t
*fcport
= NULL
;
1283 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
1284 uint8_t *rsp_ptr
= NULL
;
1286 if (!IS_IIDMA_CAPABLE(vha
->hw
)) {
1287 ql_log(ql_log_info
, vha
, 0x7046, "iiDMA not supported.\n");
1291 port_param
= (void *)bsg_job
->request
+ sizeof(struct fc_bsg_request
);
1292 if (port_param
->fc_scsi_addr
.dest_type
!= EXT_DEF_TYPE_WWPN
) {
1293 ql_log(ql_log_warn
, vha
, 0x7048,
1294 "Invalid destination type.\n");
1298 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1299 if (fcport
->port_type
!= FCT_TARGET
)
1302 if (memcmp(port_param
->fc_scsi_addr
.dest_addr
.wwpn
,
1303 fcport
->port_name
, sizeof(fcport
->port_name
)))
1311 ql_log(ql_log_warn
, vha
, 0x7049,
1312 "Failed to find port.\n");
1316 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1317 ql_log(ql_log_warn
, vha
, 0x704a,
1318 "Port is not online.\n");
1322 if (fcport
->flags
& FCF_LOGIN_NEEDED
) {
1323 ql_log(ql_log_warn
, vha
, 0x704b,
1324 "Remote port not logged in flags = 0x%x.\n", fcport
->flags
);
1328 if (port_param
->mode
)
1329 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
,
1330 port_param
->speed
, mb
);
1332 rval
= qla2x00_get_idma_speed(vha
, fcport
->loop_id
,
1333 &port_param
->speed
, mb
);
1336 ql_log(ql_log_warn
, vha
, 0x704c,
1337 "iIDMA cmd failed for %8phN -- "
1338 "%04x %x %04x %04x.\n", fcport
->port_name
,
1339 rval
, fcport
->fp_speed
, mb
[0], mb
[1]);
1340 rval
= (DID_ERROR
<< 16);
1342 if (!port_param
->mode
) {
1343 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
1344 sizeof(struct qla_port_param
);
1346 rsp_ptr
= ((uint8_t *)bsg_job
->reply
) +
1347 sizeof(struct fc_bsg_reply
);
1349 memcpy(rsp_ptr
, port_param
,
1350 sizeof(struct qla_port_param
));
1353 bsg_job
->reply
->result
= DID_OK
;
1354 bsg_job
->job_done(bsg_job
);
1361 qla2x00_optrom_setup(struct fc_bsg_job
*bsg_job
, scsi_qla_host_t
*vha
,
1366 struct qla_hw_data
*ha
= vha
->hw
;
1368 if (unlikely(pci_channel_offline(ha
->pdev
)))
1371 start
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1372 if (start
> ha
->optrom_size
) {
1373 ql_log(ql_log_warn
, vha
, 0x7055,
1374 "start %d > optrom_size %d.\n", start
, ha
->optrom_size
);
1378 if (ha
->optrom_state
!= QLA_SWAITING
) {
1379 ql_log(ql_log_info
, vha
, 0x7056,
1380 "optrom_state %d.\n", ha
->optrom_state
);
1384 ha
->optrom_region_start
= start
;
1385 ql_dbg(ql_dbg_user
, vha
, 0x7057, "is_update=%d.\n", is_update
);
1387 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
1389 else if (start
== (ha
->flt_region_boot
* 4) ||
1390 start
== (ha
->flt_region_fw
* 4))
1392 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) ||
1393 IS_CNA_CAPABLE(ha
) || IS_QLA2031(ha
) || IS_QLA27XX(ha
))
1396 ql_log(ql_log_warn
, vha
, 0x7058,
1397 "Invalid start region 0x%x/0x%x.\n", start
,
1398 bsg_job
->request_payload
.payload_len
);
1402 ha
->optrom_region_size
= start
+
1403 bsg_job
->request_payload
.payload_len
> ha
->optrom_size
?
1404 ha
->optrom_size
- start
:
1405 bsg_job
->request_payload
.payload_len
;
1406 ha
->optrom_state
= QLA_SWRITING
;
1408 ha
->optrom_region_size
= start
+
1409 bsg_job
->reply_payload
.payload_len
> ha
->optrom_size
?
1410 ha
->optrom_size
- start
:
1411 bsg_job
->reply_payload
.payload_len
;
1412 ha
->optrom_state
= QLA_SREADING
;
1415 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
1416 if (!ha
->optrom_buffer
) {
1417 ql_log(ql_log_warn
, vha
, 0x7059,
1418 "Read: Unable to allocate memory for optrom retrieval "
1419 "(%x)\n", ha
->optrom_region_size
);
1421 ha
->optrom_state
= QLA_SWAITING
;
1425 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
1430 qla2x00_read_optrom(struct fc_bsg_job
*bsg_job
)
1432 struct Scsi_Host
*host
= bsg_job
->shost
;
1433 scsi_qla_host_t
*vha
= shost_priv(host
);
1434 struct qla_hw_data
*ha
= vha
->hw
;
1437 if (ha
->flags
.nic_core_reset_hdlr_active
)
1440 mutex_lock(&ha
->optrom_mutex
);
1441 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 0);
1443 mutex_unlock(&ha
->optrom_mutex
);
1447 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
1448 ha
->optrom_region_start
, ha
->optrom_region_size
);
1450 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1451 bsg_job
->reply_payload
.sg_cnt
, ha
->optrom_buffer
,
1452 ha
->optrom_region_size
);
1454 bsg_job
->reply
->reply_payload_rcv_len
= ha
->optrom_region_size
;
1455 bsg_job
->reply
->result
= DID_OK
;
1456 vfree(ha
->optrom_buffer
);
1457 ha
->optrom_buffer
= NULL
;
1458 ha
->optrom_state
= QLA_SWAITING
;
1459 mutex_unlock(&ha
->optrom_mutex
);
1460 bsg_job
->job_done(bsg_job
);
1465 qla2x00_update_optrom(struct fc_bsg_job
*bsg_job
)
1467 struct Scsi_Host
*host
= bsg_job
->shost
;
1468 scsi_qla_host_t
*vha
= shost_priv(host
);
1469 struct qla_hw_data
*ha
= vha
->hw
;
1472 mutex_lock(&ha
->optrom_mutex
);
1473 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 1);
1475 mutex_unlock(&ha
->optrom_mutex
);
1479 /* Set the isp82xx_no_md_cap not to capture minidump */
1480 ha
->flags
.isp82xx_no_md_cap
= 1;
1482 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1483 bsg_job
->request_payload
.sg_cnt
, ha
->optrom_buffer
,
1484 ha
->optrom_region_size
);
1486 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
1487 ha
->optrom_region_start
, ha
->optrom_region_size
);
1489 bsg_job
->reply
->result
= DID_OK
;
1490 vfree(ha
->optrom_buffer
);
1491 ha
->optrom_buffer
= NULL
;
1492 ha
->optrom_state
= QLA_SWAITING
;
1493 mutex_unlock(&ha
->optrom_mutex
);
1494 bsg_job
->job_done(bsg_job
);
1499 qla2x00_update_fru_versions(struct fc_bsg_job
*bsg_job
)
1501 struct Scsi_Host
*host
= bsg_job
->shost
;
1502 scsi_qla_host_t
*vha
= shost_priv(host
);
1503 struct qla_hw_data
*ha
= vha
->hw
;
1505 uint8_t bsg
[DMA_POOL_SIZE
];
1506 struct qla_image_version_list
*list
= (void *)bsg
;
1507 struct qla_image_version
*image
;
1510 void *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1512 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1513 EXT_STATUS_NO_MEMORY
;
1517 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1518 bsg_job
->request_payload
.sg_cnt
, list
, sizeof(bsg
));
1520 image
= list
->version
;
1521 count
= list
->count
;
1523 memcpy(sfp
, &image
->field_info
, sizeof(image
->field_info
));
1524 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1525 image
->field_address
.device
, image
->field_address
.offset
,
1526 sizeof(image
->field_info
), image
->field_address
.option
);
1528 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1535 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1538 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1541 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1542 bsg_job
->reply
->result
= DID_OK
<< 16;
1543 bsg_job
->job_done(bsg_job
);
1549 qla2x00_read_fru_status(struct fc_bsg_job
*bsg_job
)
1551 struct Scsi_Host
*host
= bsg_job
->shost
;
1552 scsi_qla_host_t
*vha
= shost_priv(host
);
1553 struct qla_hw_data
*ha
= vha
->hw
;
1555 uint8_t bsg
[DMA_POOL_SIZE
];
1556 struct qla_status_reg
*sr
= (void *)bsg
;
1558 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1560 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1561 EXT_STATUS_NO_MEMORY
;
1565 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1566 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1568 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1569 sr
->field_address
.device
, sr
->field_address
.offset
,
1570 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1571 sr
->status_reg
= *sfp
;
1574 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1579 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1580 bsg_job
->reply_payload
.sg_cnt
, sr
, sizeof(*sr
));
1582 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1585 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1588 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1589 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(*sr
);
1590 bsg_job
->reply
->result
= DID_OK
<< 16;
1591 bsg_job
->job_done(bsg_job
);
1597 qla2x00_write_fru_status(struct fc_bsg_job
*bsg_job
)
1599 struct Scsi_Host
*host
= bsg_job
->shost
;
1600 scsi_qla_host_t
*vha
= shost_priv(host
);
1601 struct qla_hw_data
*ha
= vha
->hw
;
1603 uint8_t bsg
[DMA_POOL_SIZE
];
1604 struct qla_status_reg
*sr
= (void *)bsg
;
1606 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1608 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1609 EXT_STATUS_NO_MEMORY
;
1613 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1614 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1616 *sfp
= sr
->status_reg
;
1617 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1618 sr
->field_address
.device
, sr
->field_address
.offset
,
1619 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1622 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1627 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1630 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1633 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1634 bsg_job
->reply
->result
= DID_OK
<< 16;
1635 bsg_job
->job_done(bsg_job
);
1641 qla2x00_write_i2c(struct fc_bsg_job
*bsg_job
)
1643 struct Scsi_Host
*host
= bsg_job
->shost
;
1644 scsi_qla_host_t
*vha
= shost_priv(host
);
1645 struct qla_hw_data
*ha
= vha
->hw
;
1647 uint8_t bsg
[DMA_POOL_SIZE
];
1648 struct qla_i2c_access
*i2c
= (void *)bsg
;
1650 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1652 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1653 EXT_STATUS_NO_MEMORY
;
1657 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1658 bsg_job
->request_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1660 memcpy(sfp
, i2c
->buffer
, i2c
->length
);
1661 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1662 i2c
->device
, i2c
->offset
, i2c
->length
, i2c
->option
);
1665 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1670 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1673 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1676 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1677 bsg_job
->reply
->result
= DID_OK
<< 16;
1678 bsg_job
->job_done(bsg_job
);
1684 qla2x00_read_i2c(struct fc_bsg_job
*bsg_job
)
1686 struct Scsi_Host
*host
= bsg_job
->shost
;
1687 scsi_qla_host_t
*vha
= shost_priv(host
);
1688 struct qla_hw_data
*ha
= vha
->hw
;
1690 uint8_t bsg
[DMA_POOL_SIZE
];
1691 struct qla_i2c_access
*i2c
= (void *)bsg
;
1693 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1695 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1696 EXT_STATUS_NO_MEMORY
;
1700 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1701 bsg_job
->request_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1703 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1704 i2c
->device
, i2c
->offset
, i2c
->length
, i2c
->option
);
1707 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1712 memcpy(i2c
->buffer
, sfp
, i2c
->length
);
1713 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1714 bsg_job
->reply_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1716 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1719 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1722 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1723 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(*i2c
);
1724 bsg_job
->reply
->result
= DID_OK
<< 16;
1725 bsg_job
->job_done(bsg_job
);
1731 qla24xx_process_bidir_cmd(struct fc_bsg_job
*bsg_job
)
1733 struct Scsi_Host
*host
= bsg_job
->shost
;
1734 scsi_qla_host_t
*vha
= shost_priv(host
);
1735 struct qla_hw_data
*ha
= vha
->hw
;
1736 uint32_t rval
= EXT_STATUS_OK
;
1737 uint16_t req_sg_cnt
= 0;
1738 uint16_t rsp_sg_cnt
= 0;
1739 uint16_t nextlid
= 0;
1742 uint32_t req_data_len
= 0;
1743 uint32_t rsp_data_len
= 0;
1745 /* Check the type of the adapter */
1746 if (!IS_BIDI_CAPABLE(ha
)) {
1747 ql_log(ql_log_warn
, vha
, 0x70a0,
1748 "This adapter is not supported\n");
1749 rval
= EXT_STATUS_NOT_SUPPORTED
;
1753 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1754 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1755 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
1756 rval
= EXT_STATUS_BUSY
;
1760 /* Check if host is online */
1761 if (!vha
->flags
.online
) {
1762 ql_log(ql_log_warn
, vha
, 0x70a1,
1763 "Host is not online\n");
1764 rval
= EXT_STATUS_DEVICE_OFFLINE
;
1768 /* Check if cable is plugged in or not */
1769 if (vha
->device_flags
& DFLG_NO_CABLE
) {
1770 ql_log(ql_log_warn
, vha
, 0x70a2,
1771 "Cable is unplugged...\n");
1772 rval
= EXT_STATUS_INVALID_CFG
;
1776 /* Check if the switch is connected or not */
1777 if (ha
->current_topology
!= ISP_CFG_F
) {
1778 ql_log(ql_log_warn
, vha
, 0x70a3,
1779 "Host is not connected to the switch\n");
1780 rval
= EXT_STATUS_INVALID_CFG
;
1784 /* Check if operating mode is P2P */
1785 if (ha
->operating_mode
!= P2P
) {
1786 ql_log(ql_log_warn
, vha
, 0x70a4,
1787 "Host is operating mode is not P2p\n");
1788 rval
= EXT_STATUS_INVALID_CFG
;
1792 mutex_lock(&ha
->selflogin_lock
);
1793 if (vha
->self_login_loop_id
== 0) {
1794 /* Initialize all required fields of fcport */
1795 vha
->bidir_fcport
.vha
= vha
;
1796 vha
->bidir_fcport
.d_id
.b
.al_pa
= vha
->d_id
.b
.al_pa
;
1797 vha
->bidir_fcport
.d_id
.b
.area
= vha
->d_id
.b
.area
;
1798 vha
->bidir_fcport
.d_id
.b
.domain
= vha
->d_id
.b
.domain
;
1799 vha
->bidir_fcport
.loop_id
= vha
->loop_id
;
1801 if (qla2x00_fabric_login(vha
, &(vha
->bidir_fcport
), &nextlid
)) {
1802 ql_log(ql_log_warn
, vha
, 0x70a7,
1803 "Failed to login port %06X for bidirectional IOCB\n",
1804 vha
->bidir_fcport
.d_id
.b24
);
1805 mutex_unlock(&ha
->selflogin_lock
);
1806 rval
= EXT_STATUS_MAILBOX
;
1809 vha
->self_login_loop_id
= nextlid
- 1;
1812 /* Assign the self login loop id to fcport */
1813 mutex_unlock(&ha
->selflogin_lock
);
1815 vha
->bidir_fcport
.loop_id
= vha
->self_login_loop_id
;
1817 req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1818 bsg_job
->request_payload
.sg_list
,
1819 bsg_job
->request_payload
.sg_cnt
,
1823 rval
= EXT_STATUS_NO_MEMORY
;
1827 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1828 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
1832 rval
= EXT_STATUS_NO_MEMORY
;
1833 goto done_unmap_req_sg
;
1836 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
1837 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
1838 ql_dbg(ql_dbg_user
, vha
, 0x70a9,
1839 "Dma mapping resulted in different sg counts "
1840 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1841 "%x dma_reply_sg_cnt: %x]\n",
1842 bsg_job
->request_payload
.sg_cnt
, req_sg_cnt
,
1843 bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
1844 rval
= EXT_STATUS_NO_MEMORY
;
1848 if (req_data_len
!= rsp_data_len
) {
1849 rval
= EXT_STATUS_BUSY
;
1850 ql_log(ql_log_warn
, vha
, 0x70aa,
1851 "req_data_len != rsp_data_len\n");
1855 req_data_len
= bsg_job
->request_payload
.payload_len
;
1856 rsp_data_len
= bsg_job
->reply_payload
.payload_len
;
1859 /* Alloc SRB structure */
1860 sp
= qla2x00_get_sp(vha
, &(vha
->bidir_fcport
), GFP_KERNEL
);
1862 ql_dbg(ql_dbg_user
, vha
, 0x70ac,
1863 "Alloc SRB structure failed\n");
1864 rval
= EXT_STATUS_NO_MEMORY
;
1868 /*Populate srb->ctx with bidir ctx*/
1869 sp
->u
.bsg_job
= bsg_job
;
1870 sp
->free
= qla2x00_bsg_sp_free
;
1871 sp
->type
= SRB_BIDI_CMD
;
1872 sp
->done
= qla2x00_bsg_job_done
;
1874 /* Add the read and write sg count */
1875 tot_dsds
= rsp_sg_cnt
+ req_sg_cnt
;
1877 rval
= qla2x00_start_bidir(sp
, vha
, tot_dsds
);
1878 if (rval
!= EXT_STATUS_OK
)
1880 /* the bsg request will be completed in the interrupt handler */
1884 mempool_free(sp
, ha
->srb_mempool
);
1886 dma_unmap_sg(&ha
->pdev
->dev
,
1887 bsg_job
->reply_payload
.sg_list
,
1888 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1890 dma_unmap_sg(&ha
->pdev
->dev
,
1891 bsg_job
->request_payload
.sg_list
,
1892 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1895 /* Return an error vendor specific response
1896 * and complete the bsg request
1898 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = rval
;
1899 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1900 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1901 bsg_job
->reply
->result
= (DID_OK
) << 16;
1902 bsg_job
->job_done(bsg_job
);
1903 /* Always return success, vendor rsp carries correct status */
1908 qlafx00_mgmt_cmd(struct fc_bsg_job
*bsg_job
)
1910 struct Scsi_Host
*host
= bsg_job
->shost
;
1911 scsi_qla_host_t
*vha
= shost_priv(host
);
1912 struct qla_hw_data
*ha
= vha
->hw
;
1913 int rval
= (DRIVER_ERROR
<< 16);
1914 struct qla_mt_iocb_rqst_fx00
*piocb_rqst
;
1916 int req_sg_cnt
= 0, rsp_sg_cnt
= 0;
1917 struct fc_port
*fcport
;
1918 char *type
= "FC_BSG_HST_FX_MGMT";
1920 /* Copy the IOCB specific information */
1921 piocb_rqst
= (struct qla_mt_iocb_rqst_fx00
*)
1922 &bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1924 /* Dump the vendor information */
1925 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
, vha
, 0x70cf,
1926 (uint8_t *)piocb_rqst
, sizeof(struct qla_mt_iocb_rqst_fx00
));
1928 if (!vha
->flags
.online
) {
1929 ql_log(ql_log_warn
, vha
, 0x70d0,
1930 "Host is not online.\n");
1935 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
) {
1936 req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1937 bsg_job
->request_payload
.sg_list
,
1938 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1940 ql_log(ql_log_warn
, vha
, 0x70c7,
1941 "dma_map_sg return %d for request\n", req_sg_cnt
);
1947 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
) {
1948 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1949 bsg_job
->reply_payload
.sg_list
,
1950 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1952 ql_log(ql_log_warn
, vha
, 0x70c8,
1953 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
1955 goto done_unmap_req_sg
;
1959 ql_dbg(ql_dbg_user
, vha
, 0x70c9,
1960 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
1961 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
1962 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
1964 /* Allocate a dummy fcport structure, since functions preparing the
1965 * IOCB and mailbox command retrieves port specific information
1966 * from fcport structure. For Host based ELS commands there will be
1967 * no fcport structure allocated
1969 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
1971 ql_log(ql_log_warn
, vha
, 0x70ca,
1972 "Failed to allocate fcport.\n");
1974 goto done_unmap_rsp_sg
;
1977 /* Alloc SRB structure */
1978 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
1980 ql_log(ql_log_warn
, vha
, 0x70cb,
1981 "qla2x00_get_sp failed.\n");
1983 goto done_free_fcport
;
1986 /* Initialize all required fields of fcport */
1988 fcport
->loop_id
= piocb_rqst
->dataword
;
1990 sp
->type
= SRB_FXIOCB_BCMD
;
1991 sp
->name
= "bsg_fx_mgmt";
1992 sp
->iocbs
= qla24xx_calc_ct_iocbs(req_sg_cnt
+ rsp_sg_cnt
);
1993 sp
->u
.bsg_job
= bsg_job
;
1994 sp
->free
= qla2x00_bsg_sp_free
;
1995 sp
->done
= qla2x00_bsg_job_done
;
1997 ql_dbg(ql_dbg_user
, vha
, 0x70cc,
1998 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
1999 type
, piocb_rqst
->func_type
, fcport
->loop_id
);
2001 rval
= qla2x00_start_sp(sp
);
2002 if (rval
!= QLA_SUCCESS
) {
2003 ql_log(ql_log_warn
, vha
, 0x70cd,
2004 "qla2x00_start_sp failed=%d.\n", rval
);
2005 mempool_free(sp
, ha
->srb_mempool
);
2007 goto done_free_fcport
;
2015 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
)
2016 dma_unmap_sg(&ha
->pdev
->dev
,
2017 bsg_job
->reply_payload
.sg_list
,
2018 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
2020 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
)
2021 dma_unmap_sg(&ha
->pdev
->dev
,
2022 bsg_job
->request_payload
.sg_list
,
2023 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
2030 qla26xx_serdes_op(struct fc_bsg_job
*bsg_job
)
2032 struct Scsi_Host
*host
= bsg_job
->shost
;
2033 scsi_qla_host_t
*vha
= shost_priv(host
);
2035 struct qla_serdes_reg sr
;
2037 memset(&sr
, 0, sizeof(sr
));
2039 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2040 bsg_job
->request_payload
.sg_cnt
, &sr
, sizeof(sr
));
2043 case INT_SC_SERDES_WRITE_REG
:
2044 rval
= qla2x00_write_serdes_word(vha
, sr
.addr
, sr
.val
);
2045 bsg_job
->reply
->reply_payload_rcv_len
= 0;
2047 case INT_SC_SERDES_READ_REG
:
2048 rval
= qla2x00_read_serdes_word(vha
, sr
.addr
, &sr
.val
);
2049 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2050 bsg_job
->reply_payload
.sg_cnt
, &sr
, sizeof(sr
));
2051 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(sr
);
2054 ql_dbg(ql_dbg_user
, vha
, 0x708c,
2055 "Unknown serdes cmd %x.\n", sr
.cmd
);
2060 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2061 rval
? EXT_STATUS_MAILBOX
: 0;
2063 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2064 bsg_job
->reply
->result
= DID_OK
<< 16;
2065 bsg_job
->job_done(bsg_job
);
2070 qla8044_serdes_op(struct fc_bsg_job
*bsg_job
)
2072 struct Scsi_Host
*host
= bsg_job
->shost
;
2073 scsi_qla_host_t
*vha
= shost_priv(host
);
2075 struct qla_serdes_reg_ex sr
;
2077 memset(&sr
, 0, sizeof(sr
));
2079 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2080 bsg_job
->request_payload
.sg_cnt
, &sr
, sizeof(sr
));
2083 case INT_SC_SERDES_WRITE_REG
:
2084 rval
= qla8044_write_serdes_word(vha
, sr
.addr
, sr
.val
);
2085 bsg_job
->reply
->reply_payload_rcv_len
= 0;
2087 case INT_SC_SERDES_READ_REG
:
2088 rval
= qla8044_read_serdes_word(vha
, sr
.addr
, &sr
.val
);
2089 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2090 bsg_job
->reply_payload
.sg_cnt
, &sr
, sizeof(sr
));
2091 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(sr
);
2094 ql_dbg(ql_dbg_user
, vha
, 0x70cf,
2095 "Unknown serdes cmd %x.\n", sr
.cmd
);
2100 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2101 rval
? EXT_STATUS_MAILBOX
: 0;
2103 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2104 bsg_job
->reply
->result
= DID_OK
<< 16;
2105 bsg_job
->job_done(bsg_job
);
2110 qla27xx_get_flash_upd_cap(struct fc_bsg_job
*bsg_job
)
2112 struct Scsi_Host
*host
= bsg_job
->shost
;
2113 scsi_qla_host_t
*vha
= shost_priv(host
);
2114 struct qla_hw_data
*ha
= vha
->hw
;
2115 struct qla_flash_update_caps cap
;
2117 if (!(IS_QLA27XX(ha
)))
2120 memset(&cap
, 0, sizeof(cap
));
2121 cap
.capabilities
= (uint64_t)ha
->fw_attributes_ext
[1] << 48 |
2122 (uint64_t)ha
->fw_attributes_ext
[0] << 32 |
2123 (uint64_t)ha
->fw_attributes_h
<< 16 |
2124 (uint64_t)ha
->fw_attributes
;
2126 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2127 bsg_job
->reply_payload
.sg_cnt
, &cap
, sizeof(cap
));
2128 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(cap
);
2130 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2133 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2134 bsg_job
->reply
->result
= DID_OK
<< 16;
2135 bsg_job
->job_done(bsg_job
);
2140 qla27xx_set_flash_upd_cap(struct fc_bsg_job
*bsg_job
)
2142 struct Scsi_Host
*host
= bsg_job
->shost
;
2143 scsi_qla_host_t
*vha
= shost_priv(host
);
2144 struct qla_hw_data
*ha
= vha
->hw
;
2145 uint64_t online_fw_attr
= 0;
2146 struct qla_flash_update_caps cap
;
2148 if (!(IS_QLA27XX(ha
)))
2151 memset(&cap
, 0, sizeof(cap
));
2152 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2153 bsg_job
->request_payload
.sg_cnt
, &cap
, sizeof(cap
));
2155 online_fw_attr
= (uint64_t)ha
->fw_attributes_ext
[1] << 48 |
2156 (uint64_t)ha
->fw_attributes_ext
[0] << 32 |
2157 (uint64_t)ha
->fw_attributes_h
<< 16 |
2158 (uint64_t)ha
->fw_attributes
;
2160 if (online_fw_attr
!= cap
.capabilities
) {
2161 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2162 EXT_STATUS_INVALID_PARAM
;
2166 if (cap
.outage_duration
< MAX_LOOP_TIMEOUT
) {
2167 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2168 EXT_STATUS_INVALID_PARAM
;
2172 bsg_job
->reply
->reply_payload_rcv_len
= 0;
2174 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2177 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2178 bsg_job
->reply
->result
= DID_OK
<< 16;
2179 bsg_job
->job_done(bsg_job
);
2184 qla27xx_get_bbcr_data(struct fc_bsg_job
*bsg_job
)
2186 struct Scsi_Host
*host
= bsg_job
->shost
;
2187 scsi_qla_host_t
*vha
= shost_priv(host
);
2188 struct qla_hw_data
*ha
= vha
->hw
;
2189 struct qla_bbcr_data bbcr
;
2190 uint16_t loop_id
, topo
, sw_cap
;
2191 uint8_t domain
, area
, al_pa
, state
;
2194 if (!(IS_QLA27XX(ha
)))
2197 memset(&bbcr
, 0, sizeof(bbcr
));
2199 if (vha
->flags
.bbcr_enable
)
2200 bbcr
.status
= QLA_BBCR_STATUS_ENABLED
;
2202 bbcr
.status
= QLA_BBCR_STATUS_DISABLED
;
2204 if (bbcr
.status
== QLA_BBCR_STATUS_ENABLED
) {
2205 rval
= qla2x00_get_adapter_id(vha
, &loop_id
, &al_pa
,
2206 &area
, &domain
, &topo
, &sw_cap
);
2207 if (rval
!= QLA_SUCCESS
) {
2208 bbcr
.status
= QLA_BBCR_STATUS_UNKNOWN
;
2209 bbcr
.state
= QLA_BBCR_STATE_OFFLINE
;
2210 bbcr
.mbx1
= loop_id
;
2214 state
= (vha
->bbcr
>> 12) & 0x1;
2217 bbcr
.state
= QLA_BBCR_STATE_OFFLINE
;
2218 bbcr
.offline_reason_code
= QLA_BBCR_REASON_LOGIN_REJECT
;
2220 bbcr
.state
= QLA_BBCR_STATE_ONLINE
;
2221 bbcr
.negotiated_bbscn
= (vha
->bbcr
>> 8) & 0xf;
2224 bbcr
.configured_bbscn
= vha
->bbcr
& 0xf;
2228 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2229 bsg_job
->reply_payload
.sg_cnt
, &bbcr
, sizeof(bbcr
));
2230 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(bbcr
);
2232 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = EXT_STATUS_OK
;
2234 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2235 bsg_job
->reply
->result
= DID_OK
<< 16;
2236 bsg_job
->job_done(bsg_job
);
2241 qla2x00_get_priv_stats(struct fc_bsg_job
*bsg_job
)
2243 struct Scsi_Host
*host
= bsg_job
->shost
;
2244 scsi_qla_host_t
*vha
= shost_priv(host
);
2245 struct qla_hw_data
*ha
= vha
->hw
;
2246 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
2247 struct link_statistics
*stats
= NULL
;
2248 dma_addr_t stats_dma
;
2249 int rval
= QLA_FUNCTION_FAILED
;
2251 if (test_bit(UNLOADING
, &vha
->dpc_flags
))
2254 if (unlikely(pci_channel_offline(ha
->pdev
)))
2257 if (qla2x00_reset_active(vha
))
2260 if (!IS_FWI2_CAPABLE(ha
))
2263 stats
= dma_alloc_coherent(&ha
->pdev
->dev
,
2264 sizeof(struct link_statistics
), &stats_dma
, GFP_KERNEL
);
2266 ql_log(ql_log_warn
, vha
, 0x70e2,
2267 "Failed to allocate memory for stats.\n");
2271 memset(stats
, 0, sizeof(struct link_statistics
));
2273 rval
= qla24xx_get_isp_stats(base_vha
, stats
, stats_dma
);
2275 if (rval
!= QLA_SUCCESS
)
2278 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
, vha
, 0x70e3,
2279 (uint8_t *)stats
, sizeof(struct link_statistics
));
2281 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2282 bsg_job
->reply_payload
.sg_cnt
, stats
, sizeof(struct link_statistics
));
2283 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(struct link_statistics
);
2285 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = EXT_STATUS_OK
;
2287 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2288 bsg_job
->reply
->result
= DID_OK
<< 16;
2289 bsg_job
->job_done(bsg_job
);
2292 dma_free_coherent(&ha
->pdev
->dev
, sizeof(struct link_statistics
),
2299 qla2x00_process_vendor_specific(struct fc_bsg_job
*bsg_job
)
2301 switch (bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
2302 case QL_VND_LOOPBACK
:
2303 return qla2x00_process_loopback(bsg_job
);
2305 case QL_VND_A84_RESET
:
2306 return qla84xx_reset(bsg_job
);
2308 case QL_VND_A84_UPDATE_FW
:
2309 return qla84xx_updatefw(bsg_job
);
2311 case QL_VND_A84_MGMT_CMD
:
2312 return qla84xx_mgmt_cmd(bsg_job
);
2315 return qla24xx_iidma(bsg_job
);
2317 case QL_VND_FCP_PRIO_CFG_CMD
:
2318 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job
);
2320 case QL_VND_READ_FLASH
:
2321 return qla2x00_read_optrom(bsg_job
);
2323 case QL_VND_UPDATE_FLASH
:
2324 return qla2x00_update_optrom(bsg_job
);
2326 case QL_VND_SET_FRU_VERSION
:
2327 return qla2x00_update_fru_versions(bsg_job
);
2329 case QL_VND_READ_FRU_STATUS
:
2330 return qla2x00_read_fru_status(bsg_job
);
2332 case QL_VND_WRITE_FRU_STATUS
:
2333 return qla2x00_write_fru_status(bsg_job
);
2335 case QL_VND_WRITE_I2C
:
2336 return qla2x00_write_i2c(bsg_job
);
2338 case QL_VND_READ_I2C
:
2339 return qla2x00_read_i2c(bsg_job
);
2341 case QL_VND_DIAG_IO_CMD
:
2342 return qla24xx_process_bidir_cmd(bsg_job
);
2344 case QL_VND_FX00_MGMT_CMD
:
2345 return qlafx00_mgmt_cmd(bsg_job
);
2347 case QL_VND_SERDES_OP
:
2348 return qla26xx_serdes_op(bsg_job
);
2350 case QL_VND_SERDES_OP_EX
:
2351 return qla8044_serdes_op(bsg_job
);
2353 case QL_VND_GET_FLASH_UPDATE_CAPS
:
2354 return qla27xx_get_flash_upd_cap(bsg_job
);
2356 case QL_VND_SET_FLASH_UPDATE_CAPS
:
2357 return qla27xx_set_flash_upd_cap(bsg_job
);
2359 case QL_VND_GET_BBCR_DATA
:
2360 return qla27xx_get_bbcr_data(bsg_job
);
2362 case QL_VND_GET_PRIV_STATS
:
2363 return qla2x00_get_priv_stats(bsg_job
);
2371 qla24xx_bsg_request(struct fc_bsg_job
*bsg_job
)
2374 struct fc_rport
*rport
;
2375 struct Scsi_Host
*host
;
2376 scsi_qla_host_t
*vha
;
2378 /* In case no data transferred. */
2379 bsg_job
->reply
->reply_payload_rcv_len
= 0;
2381 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
2382 rport
= bsg_job
->rport
;
2383 host
= rport_to_shost(rport
);
2384 vha
= shost_priv(host
);
2386 host
= bsg_job
->shost
;
2387 vha
= shost_priv(host
);
2390 if (qla2x00_reset_active(vha
)) {
2391 ql_dbg(ql_dbg_user
, vha
, 0x709f,
2392 "BSG: ISP abort active/needed -- cmd=%d.\n",
2393 bsg_job
->request
->msgcode
);
2397 ql_dbg(ql_dbg_user
, vha
, 0x7000,
2398 "Entered %s msgcode=0x%x.\n", __func__
, bsg_job
->request
->msgcode
);
2400 switch (bsg_job
->request
->msgcode
) {
2401 case FC_BSG_RPT_ELS
:
2402 case FC_BSG_HST_ELS_NOLOGIN
:
2403 ret
= qla2x00_process_els(bsg_job
);
2406 ret
= qla2x00_process_ct(bsg_job
);
2408 case FC_BSG_HST_VENDOR
:
2409 ret
= qla2x00_process_vendor_specific(bsg_job
);
2411 case FC_BSG_HST_ADD_RPORT
:
2412 case FC_BSG_HST_DEL_RPORT
:
2415 ql_log(ql_log_warn
, vha
, 0x705a, "Unsupported BSG request.\n");
2422 qla24xx_bsg_timeout(struct fc_bsg_job
*bsg_job
)
2424 scsi_qla_host_t
*vha
= shost_priv(bsg_job
->shost
);
2425 struct qla_hw_data
*ha
= vha
->hw
;
2428 unsigned long flags
;
2429 struct req_que
*req
;
2431 /* find the bsg job from the active list of commands */
2432 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2433 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
2434 req
= ha
->req_q_map
[que
];
2438 for (cnt
= 1; cnt
< req
->num_outstanding_cmds
; cnt
++) {
2439 sp
= req
->outstanding_cmds
[cnt
];
2441 if (((sp
->type
== SRB_CT_CMD
) ||
2442 (sp
->type
== SRB_ELS_CMD_HST
) ||
2443 (sp
->type
== SRB_FXIOCB_BCMD
))
2444 && (sp
->u
.bsg_job
== bsg_job
)) {
2445 req
->outstanding_cmds
[cnt
] = NULL
;
2446 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2447 if (ha
->isp_ops
->abort_command(sp
)) {
2448 ql_log(ql_log_warn
, vha
, 0x7089,
2449 "mbx abort_command "
2451 bsg_job
->req
->errors
=
2452 bsg_job
->reply
->result
= -EIO
;
2454 ql_dbg(ql_dbg_user
, vha
, 0x708a,
2455 "mbx abort_command "
2457 bsg_job
->req
->errors
=
2458 bsg_job
->reply
->result
= 0;
2460 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2466 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2467 ql_log(ql_log_info
, vha
, 0x708b, "SRB not found to abort.\n");
2468 bsg_job
->req
->errors
= bsg_job
->reply
->result
= -ENXIO
;
2472 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);