2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 /* BSG support for ELS/CT pass through */
16 qla2x00_bsg_job_done(void *ptr
, int res
)
19 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
20 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
22 bsg_reply
->result
= res
;
23 bsg_job_done(bsg_job
, bsg_reply
->result
,
24 bsg_reply
->reply_payload_rcv_len
);
29 qla2x00_bsg_sp_free(void *ptr
)
32 struct qla_hw_data
*ha
= sp
->vha
->hw
;
33 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
34 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
35 struct qla_mt_iocb_rqst_fx00
*piocb_rqst
;
37 if (sp
->type
== SRB_FXIOCB_BCMD
) {
38 piocb_rqst
= (struct qla_mt_iocb_rqst_fx00
*)
39 &bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
41 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
)
42 dma_unmap_sg(&ha
->pdev
->dev
,
43 bsg_job
->request_payload
.sg_list
,
44 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
46 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
)
47 dma_unmap_sg(&ha
->pdev
->dev
,
48 bsg_job
->reply_payload
.sg_list
,
49 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
51 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
52 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
54 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
55 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
58 if (sp
->type
== SRB_CT_CMD
||
59 sp
->type
== SRB_FXIOCB_BCMD
||
60 sp
->type
== SRB_ELS_CMD_HST
)
66 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t
*vha
,
67 struct qla_fcp_prio_cfg
*pri_cfg
, uint8_t flag
)
69 int i
, ret
, num_valid
;
71 struct qla_fcp_prio_entry
*pri_entry
;
72 uint32_t *bcode_val_ptr
, bcode_val
;
76 bcode
= (uint8_t *)pri_cfg
;
77 bcode_val_ptr
= (uint32_t *)pri_cfg
;
78 bcode_val
= (uint32_t)(*bcode_val_ptr
);
80 if (bcode_val
== 0xFFFFFFFF) {
81 /* No FCP Priority config data in flash */
82 ql_dbg(ql_dbg_user
, vha
, 0x7051,
83 "No FCP Priority config data.\n");
87 if (bcode
[0] != 'H' || bcode
[1] != 'Q' || bcode
[2] != 'O' ||
89 /* Invalid FCP priority data header*/
90 ql_dbg(ql_dbg_user
, vha
, 0x7052,
91 "Invalid FCP Priority data header. bcode=0x%x.\n",
98 pri_entry
= &pri_cfg
->entry
[0];
99 for (i
= 0; i
< pri_cfg
->num_entries
; i
++) {
100 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
105 if (num_valid
== 0) {
106 /* No valid FCP priority data entries */
107 ql_dbg(ql_dbg_user
, vha
, 0x7053,
108 "No valid FCP Priority data entries.\n");
111 /* FCP priority data is valid */
112 ql_dbg(ql_dbg_user
, vha
, 0x7054,
113 "Valid FCP priority data. num entries = %d.\n",
121 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job
*bsg_job
)
123 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
124 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
125 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
126 scsi_qla_host_t
*vha
= shost_priv(host
);
127 struct qla_hw_data
*ha
= vha
->hw
;
132 if (!(IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) || IS_P3P_TYPE(ha
))) {
134 goto exit_fcp_prio_cfg
;
137 /* Get the sub command */
138 oper
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
140 /* Only set config is allowed if config memory is not allocated */
141 if (!ha
->fcp_prio_cfg
&& (oper
!= QLFC_FCP_PRIO_SET_CONFIG
)) {
143 goto exit_fcp_prio_cfg
;
146 case QLFC_FCP_PRIO_DISABLE
:
147 if (ha
->flags
.fcp_prio_enabled
) {
148 ha
->flags
.fcp_prio_enabled
= 0;
149 ha
->fcp_prio_cfg
->attributes
&=
150 ~FCP_PRIO_ATTR_ENABLE
;
151 qla24xx_update_all_fcp_prio(vha
);
152 bsg_reply
->result
= DID_OK
;
155 bsg_reply
->result
= (DID_ERROR
<< 16);
156 goto exit_fcp_prio_cfg
;
160 case QLFC_FCP_PRIO_ENABLE
:
161 if (!ha
->flags
.fcp_prio_enabled
) {
162 if (ha
->fcp_prio_cfg
) {
163 ha
->flags
.fcp_prio_enabled
= 1;
164 ha
->fcp_prio_cfg
->attributes
|=
165 FCP_PRIO_ATTR_ENABLE
;
166 qla24xx_update_all_fcp_prio(vha
);
167 bsg_reply
->result
= DID_OK
;
170 bsg_reply
->result
= (DID_ERROR
<< 16);
171 goto exit_fcp_prio_cfg
;
176 case QLFC_FCP_PRIO_GET_CONFIG
:
177 len
= bsg_job
->reply_payload
.payload_len
;
178 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
180 bsg_reply
->result
= (DID_ERROR
<< 16);
181 goto exit_fcp_prio_cfg
;
184 bsg_reply
->result
= DID_OK
;
185 bsg_reply
->reply_payload_rcv_len
=
187 bsg_job
->reply_payload
.sg_list
,
188 bsg_job
->reply_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
193 case QLFC_FCP_PRIO_SET_CONFIG
:
194 len
= bsg_job
->request_payload
.payload_len
;
195 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
196 bsg_reply
->result
= (DID_ERROR
<< 16);
198 goto exit_fcp_prio_cfg
;
201 if (!ha
->fcp_prio_cfg
) {
202 ha
->fcp_prio_cfg
= vmalloc(FCP_PRIO_CFG_SIZE
);
203 if (!ha
->fcp_prio_cfg
) {
204 ql_log(ql_log_warn
, vha
, 0x7050,
205 "Unable to allocate memory for fcp prio "
206 "config data (%x).\n", FCP_PRIO_CFG_SIZE
);
207 bsg_reply
->result
= (DID_ERROR
<< 16);
209 goto exit_fcp_prio_cfg
;
213 memset(ha
->fcp_prio_cfg
, 0, FCP_PRIO_CFG_SIZE
);
214 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
215 bsg_job
->request_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
218 /* validate fcp priority data */
220 if (!qla24xx_fcp_prio_cfg_valid(vha
,
221 (struct qla_fcp_prio_cfg
*) ha
->fcp_prio_cfg
, 1)) {
222 bsg_reply
->result
= (DID_ERROR
<< 16);
224 /* If buffer was invalidatic int
225 * fcp_prio_cfg is of no use
227 vfree(ha
->fcp_prio_cfg
);
228 ha
->fcp_prio_cfg
= NULL
;
229 goto exit_fcp_prio_cfg
;
232 ha
->flags
.fcp_prio_enabled
= 0;
233 if (ha
->fcp_prio_cfg
->attributes
& FCP_PRIO_ATTR_ENABLE
)
234 ha
->flags
.fcp_prio_enabled
= 1;
235 qla24xx_update_all_fcp_prio(vha
);
236 bsg_reply
->result
= DID_OK
;
244 bsg_job_done(bsg_job
, bsg_reply
->result
,
245 bsg_reply
->reply_payload_rcv_len
);
250 qla2x00_process_els(struct bsg_job
*bsg_job
)
252 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
253 struct fc_rport
*rport
;
254 fc_port_t
*fcport
= NULL
;
255 struct Scsi_Host
*host
;
256 scsi_qla_host_t
*vha
;
257 struct qla_hw_data
*ha
;
260 int req_sg_cnt
, rsp_sg_cnt
;
261 int rval
= (DID_ERROR
<< 16);
262 uint16_t nextlid
= 0;
264 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
) {
265 rport
= fc_bsg_to_rport(bsg_job
);
266 fcport
= *(fc_port_t
**) rport
->dd_data
;
267 host
= rport_to_shost(rport
);
268 vha
= shost_priv(host
);
270 type
= "FC_BSG_RPT_ELS";
272 host
= fc_bsg_to_shost(bsg_job
);
273 vha
= shost_priv(host
);
275 type
= "FC_BSG_HST_ELS_NOLOGIN";
278 if (!vha
->flags
.online
) {
279 ql_log(ql_log_warn
, vha
, 0x7005, "Host not online.\n");
284 /* pass through is supported only for ISP 4Gb or higher */
285 if (!IS_FWI2_CAPABLE(ha
)) {
286 ql_dbg(ql_dbg_user
, vha
, 0x7001,
287 "ELS passthru not supported for ISP23xx based adapters.\n");
292 /* Multiple SG's are not supported for ELS requests */
293 if (bsg_job
->request_payload
.sg_cnt
> 1 ||
294 bsg_job
->reply_payload
.sg_cnt
> 1) {
295 ql_dbg(ql_dbg_user
, vha
, 0x7002,
296 "Multiple SG's are not supported for ELS requests, "
297 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
298 bsg_job
->request_payload
.sg_cnt
,
299 bsg_job
->reply_payload
.sg_cnt
);
304 /* ELS request for rport */
305 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
) {
306 /* make sure the rport is logged in,
307 * if not perform fabric login
309 if (qla2x00_fabric_login(vha
, fcport
, &nextlid
)) {
310 ql_dbg(ql_dbg_user
, vha
, 0x7003,
311 "Failed to login port %06X for ELS passthru.\n",
317 /* Allocate a dummy fcport structure, since functions
318 * preparing the IOCB and mailbox command retrieves port
319 * specific information from fcport structure. For Host based
320 * ELS commands there will be no fcport structure allocated
322 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
328 /* Initialize all required fields of fcport */
330 fcport
->d_id
.b
.al_pa
=
331 bsg_request
->rqst_data
.h_els
.port_id
[0];
332 fcport
->d_id
.b
.area
=
333 bsg_request
->rqst_data
.h_els
.port_id
[1];
334 fcport
->d_id
.b
.domain
=
335 bsg_request
->rqst_data
.h_els
.port_id
[2];
337 (fcport
->d_id
.b
.al_pa
== 0xFD) ?
338 NPH_FABRIC_CONTROLLER
: NPH_F_PORT
;
342 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
343 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
345 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
346 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
348 goto done_free_fcport
;
351 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
352 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
354 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
355 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
357 goto done_free_fcport
;
360 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
361 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
362 ql_log(ql_log_warn
, vha
, 0x7008,
363 "dma mapping resulted in different sg counts, "
364 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
365 "dma_reply_sg_cnt:%x.\n", bsg_job
->request_payload
.sg_cnt
,
366 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
371 /* Alloc SRB structure */
372 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
379 (bsg_request
->msgcode
== FC_BSG_RPT_ELS
?
380 SRB_ELS_CMD_RPT
: SRB_ELS_CMD_HST
);
382 (bsg_request
->msgcode
== FC_BSG_RPT_ELS
?
383 "bsg_els_rpt" : "bsg_els_hst");
384 sp
->u
.bsg_job
= bsg_job
;
385 sp
->free
= qla2x00_bsg_sp_free
;
386 sp
->done
= qla2x00_bsg_job_done
;
388 ql_dbg(ql_dbg_user
, vha
, 0x700a,
389 "bsg rqst type: %s els type: %x - loop-id=%x "
390 "portid=%-2x%02x%02x.\n", type
,
391 bsg_request
->rqst_data
.h_els
.command_code
, fcport
->loop_id
,
392 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
394 rval
= qla2x00_start_sp(sp
);
395 if (rval
!= QLA_SUCCESS
) {
396 ql_log(ql_log_warn
, vha
, 0x700e,
397 "qla2x00_start_sp failed = %d\n", rval
);
405 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
406 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
407 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
408 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
409 goto done_free_fcport
;
412 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
)
418 static inline uint16_t
419 qla24xx_calc_ct_iocbs(uint16_t dsds
)
425 iocbs
+= (dsds
- 2) / 5;
433 qla2x00_process_ct(struct bsg_job
*bsg_job
)
436 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
437 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
438 scsi_qla_host_t
*vha
= shost_priv(host
);
439 struct qla_hw_data
*ha
= vha
->hw
;
440 int rval
= (DID_ERROR
<< 16);
441 int req_sg_cnt
, rsp_sg_cnt
;
443 struct fc_port
*fcport
;
444 char *type
= "FC_BSG_HST_CT";
447 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
448 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
450 ql_log(ql_log_warn
, vha
, 0x700f,
451 "dma_map_sg return %d for request\n", req_sg_cnt
);
456 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
457 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
459 ql_log(ql_log_warn
, vha
, 0x7010,
460 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
465 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
466 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
467 ql_log(ql_log_warn
, vha
, 0x7011,
468 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
469 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
470 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
475 if (!vha
->flags
.online
) {
476 ql_log(ql_log_warn
, vha
, 0x7012,
477 "Host is not online.\n");
483 (bsg_request
->rqst_data
.h_ct
.preamble_word1
& 0xFF000000)
487 loop_id
= cpu_to_le16(NPH_SNS
);
490 loop_id
= vha
->mgmt_svr_loop_id
;
493 ql_dbg(ql_dbg_user
, vha
, 0x7013,
494 "Unknown loop id: %x.\n", loop_id
);
499 /* Allocate a dummy fcport structure, since functions preparing the
500 * IOCB and mailbox command retrieves port specific information
501 * from fcport structure. For Host based ELS commands there will be
502 * no fcport structure allocated
504 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
506 ql_log(ql_log_warn
, vha
, 0x7014,
507 "Failed to allocate fcport.\n");
512 /* Initialize all required fields of fcport */
514 fcport
->d_id
.b
.al_pa
= bsg_request
->rqst_data
.h_ct
.port_id
[0];
515 fcport
->d_id
.b
.area
= bsg_request
->rqst_data
.h_ct
.port_id
[1];
516 fcport
->d_id
.b
.domain
= bsg_request
->rqst_data
.h_ct
.port_id
[2];
517 fcport
->loop_id
= loop_id
;
519 /* Alloc SRB structure */
520 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
522 ql_log(ql_log_warn
, vha
, 0x7015,
523 "qla2x00_get_sp failed.\n");
525 goto done_free_fcport
;
528 sp
->type
= SRB_CT_CMD
;
530 sp
->iocbs
= qla24xx_calc_ct_iocbs(req_sg_cnt
+ rsp_sg_cnt
);
531 sp
->u
.bsg_job
= bsg_job
;
532 sp
->free
= qla2x00_bsg_sp_free
;
533 sp
->done
= qla2x00_bsg_job_done
;
535 ql_dbg(ql_dbg_user
, vha
, 0x7016,
536 "bsg rqst type: %s else type: %x - "
537 "loop-id=%x portid=%02x%02x%02x.\n", type
,
538 (bsg_request
->rqst_data
.h_ct
.preamble_word2
>> 16),
539 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
540 fcport
->d_id
.b
.al_pa
);
542 rval
= qla2x00_start_sp(sp
);
543 if (rval
!= QLA_SUCCESS
) {
544 ql_log(ql_log_warn
, vha
, 0x7017,
545 "qla2x00_start_sp failed=%d.\n", rval
);
548 goto done_free_fcport
;
555 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
556 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
557 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
558 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
563 /* Disable loopback mode */
565 qla81xx_reset_loopback_mode(scsi_qla_host_t
*vha
, uint16_t *config
,
570 uint16_t new_config
[4];
571 struct qla_hw_data
*ha
= vha
->hw
;
573 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
) && !IS_QLA8044(ha
))
574 goto done_reset_internal
;
576 memset(new_config
, 0 , sizeof(new_config
));
577 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
578 ENABLE_INTERNAL_LOOPBACK
||
579 (config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
580 ENABLE_EXTERNAL_LOOPBACK
) {
581 new_config
[0] = config
[0] & ~INTERNAL_LOOPBACK_MASK
;
582 ql_dbg(ql_dbg_user
, vha
, 0x70bf, "new_config[0]=%02x\n",
583 (new_config
[0] & INTERNAL_LOOPBACK_MASK
));
584 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
586 ha
->notify_dcbx_comp
= wait
;
587 ha
->notify_lb_portup_comp
= wait2
;
589 ret
= qla81xx_set_port_config(vha
, new_config
);
590 if (ret
!= QLA_SUCCESS
) {
591 ql_log(ql_log_warn
, vha
, 0x7025,
592 "Set port config failed.\n");
593 ha
->notify_dcbx_comp
= 0;
594 ha
->notify_lb_portup_comp
= 0;
596 goto done_reset_internal
;
599 /* Wait for DCBX complete event */
600 if (wait
&& !wait_for_completion_timeout(&ha
->dcbx_comp
,
601 (DCBX_COMP_TIMEOUT
* HZ
))) {
602 ql_dbg(ql_dbg_user
, vha
, 0x7026,
603 "DCBX completion not received.\n");
604 ha
->notify_dcbx_comp
= 0;
605 ha
->notify_lb_portup_comp
= 0;
607 goto done_reset_internal
;
609 ql_dbg(ql_dbg_user
, vha
, 0x7027,
610 "DCBX completion received.\n");
613 !wait_for_completion_timeout(&ha
->lb_portup_comp
,
614 (LB_PORTUP_COMP_TIMEOUT
* HZ
))) {
615 ql_dbg(ql_dbg_user
, vha
, 0x70c5,
616 "Port up completion not received.\n");
617 ha
->notify_lb_portup_comp
= 0;
619 goto done_reset_internal
;
621 ql_dbg(ql_dbg_user
, vha
, 0x70c6,
622 "Port up completion received.\n");
624 ha
->notify_dcbx_comp
= 0;
625 ha
->notify_lb_portup_comp
= 0;
632 * Set the port configuration to enable the internal or external loopback
633 * depending on the loopback mode.
636 qla81xx_set_loopback_mode(scsi_qla_host_t
*vha
, uint16_t *config
,
637 uint16_t *new_config
, uint16_t mode
)
641 unsigned long rem_tmo
= 0, current_tmo
= 0;
642 struct qla_hw_data
*ha
= vha
->hw
;
644 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
) && !IS_QLA8044(ha
))
645 goto done_set_internal
;
647 if (mode
== INTERNAL_LOOPBACK
)
648 new_config
[0] = config
[0] | (ENABLE_INTERNAL_LOOPBACK
<< 1);
649 else if (mode
== EXTERNAL_LOOPBACK
)
650 new_config
[0] = config
[0] | (ENABLE_EXTERNAL_LOOPBACK
<< 1);
651 ql_dbg(ql_dbg_user
, vha
, 0x70be,
652 "new_config[0]=%02x\n", (new_config
[0] & INTERNAL_LOOPBACK_MASK
));
654 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3);
656 ha
->notify_dcbx_comp
= 1;
657 ret
= qla81xx_set_port_config(vha
, new_config
);
658 if (ret
!= QLA_SUCCESS
) {
659 ql_log(ql_log_warn
, vha
, 0x7021,
660 "set port config failed.\n");
661 ha
->notify_dcbx_comp
= 0;
663 goto done_set_internal
;
666 /* Wait for DCBX complete event */
667 current_tmo
= DCBX_COMP_TIMEOUT
* HZ
;
669 rem_tmo
= wait_for_completion_timeout(&ha
->dcbx_comp
,
671 if (!ha
->idc_extend_tmo
|| rem_tmo
) {
672 ha
->idc_extend_tmo
= 0;
675 current_tmo
= ha
->idc_extend_tmo
* HZ
;
676 ha
->idc_extend_tmo
= 0;
680 ql_dbg(ql_dbg_user
, vha
, 0x7022,
681 "DCBX completion not received.\n");
682 ret
= qla81xx_reset_loopback_mode(vha
, new_config
, 0, 0);
684 * If the reset of the loopback mode doesn't work take a FCoE
685 * dump and reset the chip.
688 ha
->isp_ops
->fw_dump(vha
, 0);
689 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
693 if (ha
->flags
.idc_compl_status
) {
694 ql_dbg(ql_dbg_user
, vha
, 0x70c3,
695 "Bad status in IDC Completion AEN\n");
697 ha
->flags
.idc_compl_status
= 0;
699 ql_dbg(ql_dbg_user
, vha
, 0x7023,
700 "DCBX completion received.\n");
703 ha
->notify_dcbx_comp
= 0;
704 ha
->idc_extend_tmo
= 0;
711 qla2x00_process_loopback(struct bsg_job
*bsg_job
)
713 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
714 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
715 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
716 scsi_qla_host_t
*vha
= shost_priv(host
);
717 struct qla_hw_data
*ha
= vha
->hw
;
719 uint8_t command_sent
;
721 struct msg_echo_lb elreq
;
722 uint16_t response
[MAILBOX_REGISTER_COUNT
];
723 uint16_t config
[4], new_config
[4];
725 uint8_t *req_data
= NULL
;
726 dma_addr_t req_data_dma
;
727 uint32_t req_data_len
;
728 uint8_t *rsp_data
= NULL
;
729 dma_addr_t rsp_data_dma
;
730 uint32_t rsp_data_len
;
732 if (!vha
->flags
.online
) {
733 ql_log(ql_log_warn
, vha
, 0x7019, "Host is not online.\n");
737 memset(&elreq
, 0, sizeof(elreq
));
739 elreq
.req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
740 bsg_job
->request_payload
.sg_list
, bsg_job
->request_payload
.sg_cnt
,
743 if (!elreq
.req_sg_cnt
) {
744 ql_log(ql_log_warn
, vha
, 0x701a,
745 "dma_map_sg returned %d for request.\n", elreq
.req_sg_cnt
);
749 elreq
.rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
750 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
753 if (!elreq
.rsp_sg_cnt
) {
754 ql_log(ql_log_warn
, vha
, 0x701b,
755 "dma_map_sg returned %d for reply.\n", elreq
.rsp_sg_cnt
);
757 goto done_unmap_req_sg
;
760 if ((elreq
.req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
761 (elreq
.rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
762 ql_log(ql_log_warn
, vha
, 0x701c,
763 "dma mapping resulted in different sg counts, "
764 "request_sg_cnt: %x dma_request_sg_cnt: %x "
765 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
766 bsg_job
->request_payload
.sg_cnt
, elreq
.req_sg_cnt
,
767 bsg_job
->reply_payload
.sg_cnt
, elreq
.rsp_sg_cnt
);
771 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
772 req_data
= dma_alloc_coherent(&ha
->pdev
->dev
, req_data_len
,
773 &req_data_dma
, GFP_KERNEL
);
775 ql_log(ql_log_warn
, vha
, 0x701d,
776 "dma alloc failed for req_data.\n");
781 rsp_data
= dma_alloc_coherent(&ha
->pdev
->dev
, rsp_data_len
,
782 &rsp_data_dma
, GFP_KERNEL
);
784 ql_log(ql_log_warn
, vha
, 0x7004,
785 "dma alloc failed for rsp_data.\n");
787 goto done_free_dma_req
;
790 /* Copy the request buffer in req_data now */
791 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
792 bsg_job
->request_payload
.sg_cnt
, req_data
, req_data_len
);
794 elreq
.send_dma
= req_data_dma
;
795 elreq
.rcv_dma
= rsp_data_dma
;
796 elreq
.transfer_size
= req_data_len
;
798 elreq
.options
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
799 elreq
.iteration_count
=
800 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[2];
802 if (atomic_read(&vha
->loop_state
) == LOOP_READY
&&
803 (ha
->current_topology
== ISP_CFG_F
||
804 (le32_to_cpu(*(uint32_t *)req_data
) == ELS_OPCODE_BYTE
&&
805 req_data_len
== MAX_ELS_FRAME_PAYLOAD
)) &&
806 elreq
.options
== EXTERNAL_LOOPBACK
) {
807 type
= "FC_BSG_HST_VENDOR_ECHO_DIAG";
808 ql_dbg(ql_dbg_user
, vha
, 0x701e,
809 "BSG request type: %s.\n", type
);
810 command_sent
= INT_DEF_LB_ECHO_CMD
;
811 rval
= qla2x00_echo_test(vha
, &elreq
, response
);
813 if (IS_QLA81XX(ha
) || IS_QLA8031(ha
) || IS_QLA8044(ha
)) {
814 memset(config
, 0, sizeof(config
));
815 memset(new_config
, 0, sizeof(new_config
));
817 if (qla81xx_get_port_config(vha
, config
)) {
818 ql_log(ql_log_warn
, vha
, 0x701f,
819 "Get port config failed.\n");
821 goto done_free_dma_rsp
;
824 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) != 0) {
825 ql_dbg(ql_dbg_user
, vha
, 0x70c4,
826 "Loopback operation already in "
829 goto done_free_dma_rsp
;
832 ql_dbg(ql_dbg_user
, vha
, 0x70c0,
833 "elreq.options=%04x\n", elreq
.options
);
835 if (elreq
.options
== EXTERNAL_LOOPBACK
)
836 if (IS_QLA8031(ha
) || IS_QLA8044(ha
))
837 rval
= qla81xx_set_loopback_mode(vha
,
838 config
, new_config
, elreq
.options
);
840 rval
= qla81xx_reset_loopback_mode(vha
,
843 rval
= qla81xx_set_loopback_mode(vha
, config
,
844 new_config
, elreq
.options
);
848 goto done_free_dma_rsp
;
851 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
852 ql_dbg(ql_dbg_user
, vha
, 0x7028,
853 "BSG request type: %s.\n", type
);
855 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
856 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
858 if (response
[0] == MBS_COMMAND_ERROR
&&
859 response
[1] == MBS_LB_RESET
) {
860 ql_log(ql_log_warn
, vha
, 0x7029,
861 "MBX command error, Aborting ISP.\n");
862 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
863 qla2xxx_wake_dpc(vha
);
864 qla2x00_wait_for_chip_reset(vha
);
865 /* Also reset the MPI */
866 if (IS_QLA81XX(ha
)) {
867 if (qla81xx_restart_mpi_firmware(vha
) !=
869 ql_log(ql_log_warn
, vha
, 0x702a,
870 "MPI reset failed.\n");
875 goto done_free_dma_rsp
;
881 /* Revert back to original port config
882 * Also clear internal loopback
884 ret
= qla81xx_reset_loopback_mode(vha
,
888 * If the reset of the loopback mode
889 * doesn't work take FCoE dump and then
892 ha
->isp_ops
->fw_dump(vha
, 0);
893 set_bit(ISP_ABORT_NEEDED
,
900 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
901 ql_dbg(ql_dbg_user
, vha
, 0x702b,
902 "BSG request type: %s.\n", type
);
903 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
904 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
909 ql_log(ql_log_warn
, vha
, 0x702c,
910 "Vendor request %s failed.\n", type
);
913 bsg_reply
->result
= (DID_ERROR
<< 16);
914 bsg_reply
->reply_payload_rcv_len
= 0;
916 ql_dbg(ql_dbg_user
, vha
, 0x702d,
917 "Vendor request %s completed.\n", type
);
918 bsg_reply
->result
= (DID_OK
<< 16);
919 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
920 bsg_job
->reply_payload
.sg_cnt
, rsp_data
,
924 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
925 sizeof(response
) + sizeof(uint8_t);
926 fw_sts_ptr
= bsg_job
->reply
+ sizeof(struct fc_bsg_reply
);
927 memcpy(bsg_job
->reply
+ sizeof(struct fc_bsg_reply
), response
,
929 fw_sts_ptr
+= sizeof(response
);
930 *fw_sts_ptr
= command_sent
;
933 dma_free_coherent(&ha
->pdev
->dev
, rsp_data_len
,
934 rsp_data
, rsp_data_dma
);
936 dma_free_coherent(&ha
->pdev
->dev
, req_data_len
,
937 req_data
, req_data_dma
);
939 dma_unmap_sg(&ha
->pdev
->dev
,
940 bsg_job
->reply_payload
.sg_list
,
941 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
943 dma_unmap_sg(&ha
->pdev
->dev
,
944 bsg_job
->request_payload
.sg_list
,
945 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
947 bsg_job_done(bsg_job
, bsg_reply
->result
,
948 bsg_reply
->reply_payload_rcv_len
);
953 qla84xx_reset(struct bsg_job
*bsg_job
)
955 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
956 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
957 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
958 scsi_qla_host_t
*vha
= shost_priv(host
);
959 struct qla_hw_data
*ha
= vha
->hw
;
963 if (!IS_QLA84XX(ha
)) {
964 ql_dbg(ql_dbg_user
, vha
, 0x702f, "Not 84xx, exiting.\n");
968 flag
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
970 rval
= qla84xx_reset_chip(vha
, flag
== A84_ISSUE_RESET_DIAG_FW
);
973 ql_log(ql_log_warn
, vha
, 0x7030,
974 "Vendor request 84xx reset failed.\n");
975 rval
= (DID_ERROR
<< 16);
978 ql_dbg(ql_dbg_user
, vha
, 0x7031,
979 "Vendor request 84xx reset completed.\n");
980 bsg_reply
->result
= DID_OK
;
981 bsg_job_done(bsg_job
, bsg_reply
->result
,
982 bsg_reply
->reply_payload_rcv_len
);
989 qla84xx_updatefw(struct bsg_job
*bsg_job
)
991 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
992 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
993 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
994 scsi_qla_host_t
*vha
= shost_priv(host
);
995 struct qla_hw_data
*ha
= vha
->hw
;
996 struct verify_chip_entry_84xx
*mn
= NULL
;
997 dma_addr_t mn_dma
, fw_dma
;
1006 if (!IS_QLA84XX(ha
)) {
1007 ql_dbg(ql_dbg_user
, vha
, 0x7032,
1008 "Not 84xx, exiting.\n");
1012 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1013 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1015 ql_log(ql_log_warn
, vha
, 0x7033,
1016 "dma_map_sg returned %d for request.\n", sg_cnt
);
1020 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1021 ql_log(ql_log_warn
, vha
, 0x7034,
1022 "DMA mapping resulted in different sg counts, "
1023 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1024 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1029 data_len
= bsg_job
->request_payload
.payload_len
;
1030 fw_buf
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1031 &fw_dma
, GFP_KERNEL
);
1033 ql_log(ql_log_warn
, vha
, 0x7035,
1034 "DMA alloc failed for fw_buf.\n");
1039 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1040 bsg_job
->request_payload
.sg_cnt
, fw_buf
, data_len
);
1042 mn
= dma_pool_zalloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1044 ql_log(ql_log_warn
, vha
, 0x7036,
1045 "DMA alloc failed for fw buffer.\n");
1047 goto done_free_fw_buf
;
1050 flag
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1051 fw_ver
= le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf
+ 2)));
1053 mn
->entry_type
= VERIFY_CHIP_IOCB_TYPE
;
1054 mn
->entry_count
= 1;
1056 options
= VCO_FORCE_UPDATE
| VCO_END_OF_DATA
;
1057 if (flag
== A84_ISSUE_UPDATE_DIAGFW_CMD
)
1058 options
|= VCO_DIAG_FW
;
1060 mn
->options
= cpu_to_le16(options
);
1061 mn
->fw_ver
= cpu_to_le32(fw_ver
);
1062 mn
->fw_size
= cpu_to_le32(data_len
);
1063 mn
->fw_seq_size
= cpu_to_le32(data_len
);
1064 mn
->dseg_address
[0] = cpu_to_le32(LSD(fw_dma
));
1065 mn
->dseg_address
[1] = cpu_to_le32(MSD(fw_dma
));
1066 mn
->dseg_length
= cpu_to_le32(data_len
);
1067 mn
->data_seg_cnt
= cpu_to_le16(1);
1069 rval
= qla2x00_issue_iocb_timeout(vha
, mn
, mn_dma
, 0, 120);
1072 ql_log(ql_log_warn
, vha
, 0x7037,
1073 "Vendor request 84xx updatefw failed.\n");
1075 rval
= (DID_ERROR
<< 16);
1077 ql_dbg(ql_dbg_user
, vha
, 0x7038,
1078 "Vendor request 84xx updatefw completed.\n");
1080 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1081 bsg_reply
->result
= DID_OK
;
1084 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1087 dma_free_coherent(&ha
->pdev
->dev
, data_len
, fw_buf
, fw_dma
);
1090 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1091 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1094 bsg_job_done(bsg_job
, bsg_reply
->result
,
1095 bsg_reply
->reply_payload_rcv_len
);
1100 qla84xx_mgmt_cmd(struct bsg_job
*bsg_job
)
1102 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1103 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1104 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1105 scsi_qla_host_t
*vha
= shost_priv(host
);
1106 struct qla_hw_data
*ha
= vha
->hw
;
1107 struct access_chip_84xx
*mn
= NULL
;
1108 dma_addr_t mn_dma
, mgmt_dma
;
1109 void *mgmt_b
= NULL
;
1111 struct qla_bsg_a84_mgmt
*ql84_mgmt
;
1113 uint32_t data_len
= 0;
1114 uint32_t dma_direction
= DMA_NONE
;
1116 if (!IS_QLA84XX(ha
)) {
1117 ql_log(ql_log_warn
, vha
, 0x703a,
1118 "Not 84xx, exiting.\n");
1122 mn
= dma_pool_zalloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1124 ql_log(ql_log_warn
, vha
, 0x703c,
1125 "DMA alloc failed for fw buffer.\n");
1129 mn
->entry_type
= ACCESS_CHIP_IOCB_TYPE
;
1130 mn
->entry_count
= 1;
1131 ql84_mgmt
= (void *)bsg_request
+ sizeof(struct fc_bsg_request
);
1132 switch (ql84_mgmt
->mgmt
.cmd
) {
1133 case QLA84_MGMT_READ_MEM
:
1134 case QLA84_MGMT_GET_INFO
:
1135 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1136 bsg_job
->reply_payload
.sg_list
,
1137 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1139 ql_log(ql_log_warn
, vha
, 0x703d,
1140 "dma_map_sg returned %d for reply.\n", sg_cnt
);
1145 dma_direction
= DMA_FROM_DEVICE
;
1147 if (sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
) {
1148 ql_log(ql_log_warn
, vha
, 0x703e,
1149 "DMA mapping resulted in different sg counts, "
1150 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1151 bsg_job
->reply_payload
.sg_cnt
, sg_cnt
);
1156 data_len
= bsg_job
->reply_payload
.payload_len
;
1158 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1159 &mgmt_dma
, GFP_KERNEL
);
1161 ql_log(ql_log_warn
, vha
, 0x703f,
1162 "DMA alloc failed for mgmt_b.\n");
1167 if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) {
1168 mn
->options
= cpu_to_le16(ACO_DUMP_MEMORY
);
1171 ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1173 } else if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
) {
1174 mn
->options
= cpu_to_le16(ACO_REQUEST_INFO
);
1176 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.info
.type
);
1180 ql84_mgmt
->mgmt
.mgmtp
.u
.info
.context
);
1184 case QLA84_MGMT_WRITE_MEM
:
1185 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1186 bsg_job
->request_payload
.sg_list
,
1187 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1190 ql_log(ql_log_warn
, vha
, 0x7040,
1191 "dma_map_sg returned %d.\n", sg_cnt
);
1196 dma_direction
= DMA_TO_DEVICE
;
1198 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1199 ql_log(ql_log_warn
, vha
, 0x7041,
1200 "DMA mapping resulted in different sg counts, "
1201 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1202 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1207 data_len
= bsg_job
->request_payload
.payload_len
;
1208 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1209 &mgmt_dma
, GFP_KERNEL
);
1211 ql_log(ql_log_warn
, vha
, 0x7042,
1212 "DMA alloc failed for mgmt_b.\n");
1217 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1218 bsg_job
->request_payload
.sg_cnt
, mgmt_b
, data_len
);
1220 mn
->options
= cpu_to_le16(ACO_LOAD_MEMORY
);
1222 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1225 case QLA84_MGMT_CHNG_CONFIG
:
1226 mn
->options
= cpu_to_le16(ACO_CHANGE_CONFIG_PARAM
);
1228 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.id
);
1231 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param0
);
1234 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param1
);
1242 if (ql84_mgmt
->mgmt
.cmd
!= QLA84_MGMT_CHNG_CONFIG
) {
1243 mn
->total_byte_cnt
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1244 mn
->dseg_count
= cpu_to_le16(1);
1245 mn
->dseg_address
[0] = cpu_to_le32(LSD(mgmt_dma
));
1246 mn
->dseg_address
[1] = cpu_to_le32(MSD(mgmt_dma
));
1247 mn
->dseg_length
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1250 rval
= qla2x00_issue_iocb(vha
, mn
, mn_dma
, 0);
1253 ql_log(ql_log_warn
, vha
, 0x7043,
1254 "Vendor request 84xx mgmt failed.\n");
1256 rval
= (DID_ERROR
<< 16);
1259 ql_dbg(ql_dbg_user
, vha
, 0x7044,
1260 "Vendor request 84xx mgmt completed.\n");
1262 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1263 bsg_reply
->result
= DID_OK
;
1265 if ((ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) ||
1266 (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
)) {
1267 bsg_reply
->reply_payload_rcv_len
=
1268 bsg_job
->reply_payload
.payload_len
;
1270 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1271 bsg_job
->reply_payload
.sg_cnt
, mgmt_b
,
1278 dma_free_coherent(&ha
->pdev
->dev
, data_len
, mgmt_b
, mgmt_dma
);
1280 if (dma_direction
== DMA_TO_DEVICE
)
1281 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1282 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1283 else if (dma_direction
== DMA_FROM_DEVICE
)
1284 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1285 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1288 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1291 bsg_job_done(bsg_job
, bsg_reply
->result
,
1292 bsg_reply
->reply_payload_rcv_len
);
1297 qla24xx_iidma(struct bsg_job
*bsg_job
)
1299 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1300 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1301 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1302 scsi_qla_host_t
*vha
= shost_priv(host
);
1304 struct qla_port_param
*port_param
= NULL
;
1305 fc_port_t
*fcport
= NULL
;
1307 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
1308 uint8_t *rsp_ptr
= NULL
;
1310 if (!IS_IIDMA_CAPABLE(vha
->hw
)) {
1311 ql_log(ql_log_info
, vha
, 0x7046, "iiDMA not supported.\n");
1315 port_param
= (void *)bsg_request
+ sizeof(struct fc_bsg_request
);
1316 if (port_param
->fc_scsi_addr
.dest_type
!= EXT_DEF_TYPE_WWPN
) {
1317 ql_log(ql_log_warn
, vha
, 0x7048,
1318 "Invalid destination type.\n");
1322 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1323 if (fcport
->port_type
!= FCT_TARGET
)
1326 if (memcmp(port_param
->fc_scsi_addr
.dest_addr
.wwpn
,
1327 fcport
->port_name
, sizeof(fcport
->port_name
)))
1335 ql_log(ql_log_warn
, vha
, 0x7049,
1336 "Failed to find port.\n");
1340 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1341 ql_log(ql_log_warn
, vha
, 0x704a,
1342 "Port is not online.\n");
1346 if (fcport
->flags
& FCF_LOGIN_NEEDED
) {
1347 ql_log(ql_log_warn
, vha
, 0x704b,
1348 "Remote port not logged in flags = 0x%x.\n", fcport
->flags
);
1352 if (port_param
->mode
)
1353 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
,
1354 port_param
->speed
, mb
);
1356 rval
= qla2x00_get_idma_speed(vha
, fcport
->loop_id
,
1357 &port_param
->speed
, mb
);
1360 ql_log(ql_log_warn
, vha
, 0x704c,
1361 "iIDMA cmd failed for %8phN -- "
1362 "%04x %x %04x %04x.\n", fcport
->port_name
,
1363 rval
, fcport
->fp_speed
, mb
[0], mb
[1]);
1364 rval
= (DID_ERROR
<< 16);
1366 if (!port_param
->mode
) {
1367 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
1368 sizeof(struct qla_port_param
);
1370 rsp_ptr
= ((uint8_t *)bsg_reply
) +
1371 sizeof(struct fc_bsg_reply
);
1373 memcpy(rsp_ptr
, port_param
,
1374 sizeof(struct qla_port_param
));
1377 bsg_reply
->result
= DID_OK
;
1378 bsg_job_done(bsg_job
, bsg_reply
->result
,
1379 bsg_reply
->reply_payload_rcv_len
);
1386 qla2x00_optrom_setup(struct bsg_job
*bsg_job
, scsi_qla_host_t
*vha
,
1389 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1392 struct qla_hw_data
*ha
= vha
->hw
;
1394 if (unlikely(pci_channel_offline(ha
->pdev
)))
1397 start
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1398 if (start
> ha
->optrom_size
) {
1399 ql_log(ql_log_warn
, vha
, 0x7055,
1400 "start %d > optrom_size %d.\n", start
, ha
->optrom_size
);
1404 if (ha
->optrom_state
!= QLA_SWAITING
) {
1405 ql_log(ql_log_info
, vha
, 0x7056,
1406 "optrom_state %d.\n", ha
->optrom_state
);
1410 ha
->optrom_region_start
= start
;
1411 ql_dbg(ql_dbg_user
, vha
, 0x7057, "is_update=%d.\n", is_update
);
1413 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
1415 else if (start
== (ha
->flt_region_boot
* 4) ||
1416 start
== (ha
->flt_region_fw
* 4))
1418 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) ||
1419 IS_CNA_CAPABLE(ha
) || IS_QLA2031(ha
) || IS_QLA27XX(ha
))
1422 ql_log(ql_log_warn
, vha
, 0x7058,
1423 "Invalid start region 0x%x/0x%x.\n", start
,
1424 bsg_job
->request_payload
.payload_len
);
1428 ha
->optrom_region_size
= start
+
1429 bsg_job
->request_payload
.payload_len
> ha
->optrom_size
?
1430 ha
->optrom_size
- start
:
1431 bsg_job
->request_payload
.payload_len
;
1432 ha
->optrom_state
= QLA_SWRITING
;
1434 ha
->optrom_region_size
= start
+
1435 bsg_job
->reply_payload
.payload_len
> ha
->optrom_size
?
1436 ha
->optrom_size
- start
:
1437 bsg_job
->reply_payload
.payload_len
;
1438 ha
->optrom_state
= QLA_SREADING
;
1441 ha
->optrom_buffer
= vzalloc(ha
->optrom_region_size
);
1442 if (!ha
->optrom_buffer
) {
1443 ql_log(ql_log_warn
, vha
, 0x7059,
1444 "Read: Unable to allocate memory for optrom retrieval "
1445 "(%x)\n", ha
->optrom_region_size
);
1447 ha
->optrom_state
= QLA_SWAITING
;
1455 qla2x00_read_optrom(struct bsg_job
*bsg_job
)
1457 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1458 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1459 scsi_qla_host_t
*vha
= shost_priv(host
);
1460 struct qla_hw_data
*ha
= vha
->hw
;
1463 if (ha
->flags
.nic_core_reset_hdlr_active
)
1466 mutex_lock(&ha
->optrom_mutex
);
1467 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 0);
1469 mutex_unlock(&ha
->optrom_mutex
);
1473 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
1474 ha
->optrom_region_start
, ha
->optrom_region_size
);
1476 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1477 bsg_job
->reply_payload
.sg_cnt
, ha
->optrom_buffer
,
1478 ha
->optrom_region_size
);
1480 bsg_reply
->reply_payload_rcv_len
= ha
->optrom_region_size
;
1481 bsg_reply
->result
= DID_OK
;
1482 vfree(ha
->optrom_buffer
);
1483 ha
->optrom_buffer
= NULL
;
1484 ha
->optrom_state
= QLA_SWAITING
;
1485 mutex_unlock(&ha
->optrom_mutex
);
1486 bsg_job_done(bsg_job
, bsg_reply
->result
,
1487 bsg_reply
->reply_payload_rcv_len
);
1492 qla2x00_update_optrom(struct bsg_job
*bsg_job
)
1494 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1495 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1496 scsi_qla_host_t
*vha
= shost_priv(host
);
1497 struct qla_hw_data
*ha
= vha
->hw
;
1500 mutex_lock(&ha
->optrom_mutex
);
1501 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 1);
1503 mutex_unlock(&ha
->optrom_mutex
);
1507 /* Set the isp82xx_no_md_cap not to capture minidump */
1508 ha
->flags
.isp82xx_no_md_cap
= 1;
1510 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1511 bsg_job
->request_payload
.sg_cnt
, ha
->optrom_buffer
,
1512 ha
->optrom_region_size
);
1514 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
1515 ha
->optrom_region_start
, ha
->optrom_region_size
);
1517 bsg_reply
->result
= DID_OK
;
1518 vfree(ha
->optrom_buffer
);
1519 ha
->optrom_buffer
= NULL
;
1520 ha
->optrom_state
= QLA_SWAITING
;
1521 mutex_unlock(&ha
->optrom_mutex
);
1522 bsg_job_done(bsg_job
, bsg_reply
->result
,
1523 bsg_reply
->reply_payload_rcv_len
);
1528 qla2x00_update_fru_versions(struct bsg_job
*bsg_job
)
1530 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1531 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1532 scsi_qla_host_t
*vha
= shost_priv(host
);
1533 struct qla_hw_data
*ha
= vha
->hw
;
1535 uint8_t bsg
[DMA_POOL_SIZE
];
1536 struct qla_image_version_list
*list
= (void *)bsg
;
1537 struct qla_image_version
*image
;
1540 void *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1542 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1543 EXT_STATUS_NO_MEMORY
;
1547 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1548 bsg_job
->request_payload
.sg_cnt
, list
, sizeof(bsg
));
1550 image
= list
->version
;
1551 count
= list
->count
;
1553 memcpy(sfp
, &image
->field_info
, sizeof(image
->field_info
));
1554 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1555 image
->field_address
.device
, image
->field_address
.offset
,
1556 sizeof(image
->field_info
), image
->field_address
.option
);
1558 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1565 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1568 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1571 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1572 bsg_reply
->result
= DID_OK
<< 16;
1573 bsg_job_done(bsg_job
, bsg_reply
->result
,
1574 bsg_reply
->reply_payload_rcv_len
);
1580 qla2x00_read_fru_status(struct bsg_job
*bsg_job
)
1582 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1583 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1584 scsi_qla_host_t
*vha
= shost_priv(host
);
1585 struct qla_hw_data
*ha
= vha
->hw
;
1587 uint8_t bsg
[DMA_POOL_SIZE
];
1588 struct qla_status_reg
*sr
= (void *)bsg
;
1590 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1592 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1593 EXT_STATUS_NO_MEMORY
;
1597 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1598 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1600 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1601 sr
->field_address
.device
, sr
->field_address
.offset
,
1602 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1603 sr
->status_reg
= *sfp
;
1606 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1611 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1612 bsg_job
->reply_payload
.sg_cnt
, sr
, sizeof(*sr
));
1614 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1617 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1620 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1621 bsg_reply
->reply_payload_rcv_len
= sizeof(*sr
);
1622 bsg_reply
->result
= DID_OK
<< 16;
1623 bsg_job_done(bsg_job
, bsg_reply
->result
,
1624 bsg_reply
->reply_payload_rcv_len
);
1630 qla2x00_write_fru_status(struct bsg_job
*bsg_job
)
1632 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1633 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1634 scsi_qla_host_t
*vha
= shost_priv(host
);
1635 struct qla_hw_data
*ha
= vha
->hw
;
1637 uint8_t bsg
[DMA_POOL_SIZE
];
1638 struct qla_status_reg
*sr
= (void *)bsg
;
1640 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1642 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1643 EXT_STATUS_NO_MEMORY
;
1647 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1648 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1650 *sfp
= sr
->status_reg
;
1651 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1652 sr
->field_address
.device
, sr
->field_address
.offset
,
1653 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1656 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1661 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1664 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1667 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1668 bsg_reply
->result
= DID_OK
<< 16;
1669 bsg_job_done(bsg_job
, bsg_reply
->result
,
1670 bsg_reply
->reply_payload_rcv_len
);
1676 qla2x00_write_i2c(struct bsg_job
*bsg_job
)
1678 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1679 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1680 scsi_qla_host_t
*vha
= shost_priv(host
);
1681 struct qla_hw_data
*ha
= vha
->hw
;
1683 uint8_t bsg
[DMA_POOL_SIZE
];
1684 struct qla_i2c_access
*i2c
= (void *)bsg
;
1686 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1688 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1689 EXT_STATUS_NO_MEMORY
;
1693 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1694 bsg_job
->request_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1696 memcpy(sfp
, i2c
->buffer
, i2c
->length
);
1697 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1698 i2c
->device
, i2c
->offset
, i2c
->length
, i2c
->option
);
1701 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1706 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1709 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1712 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1713 bsg_reply
->result
= DID_OK
<< 16;
1714 bsg_job_done(bsg_job
, bsg_reply
->result
,
1715 bsg_reply
->reply_payload_rcv_len
);
1721 qla2x00_read_i2c(struct bsg_job
*bsg_job
)
1723 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1724 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1725 scsi_qla_host_t
*vha
= shost_priv(host
);
1726 struct qla_hw_data
*ha
= vha
->hw
;
1728 uint8_t bsg
[DMA_POOL_SIZE
];
1729 struct qla_i2c_access
*i2c
= (void *)bsg
;
1731 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1733 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1734 EXT_STATUS_NO_MEMORY
;
1738 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1739 bsg_job
->request_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1741 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1742 i2c
->device
, i2c
->offset
, i2c
->length
, i2c
->option
);
1745 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1750 memcpy(i2c
->buffer
, sfp
, i2c
->length
);
1751 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1752 bsg_job
->reply_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1754 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1757 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1760 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1761 bsg_reply
->reply_payload_rcv_len
= sizeof(*i2c
);
1762 bsg_reply
->result
= DID_OK
<< 16;
1763 bsg_job_done(bsg_job
, bsg_reply
->result
,
1764 bsg_reply
->reply_payload_rcv_len
);
1770 qla24xx_process_bidir_cmd(struct bsg_job
*bsg_job
)
1772 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1773 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1774 scsi_qla_host_t
*vha
= shost_priv(host
);
1775 struct qla_hw_data
*ha
= vha
->hw
;
1776 uint32_t rval
= EXT_STATUS_OK
;
1777 uint16_t req_sg_cnt
= 0;
1778 uint16_t rsp_sg_cnt
= 0;
1779 uint16_t nextlid
= 0;
1782 uint32_t req_data_len
;
1783 uint32_t rsp_data_len
;
1785 /* Check the type of the adapter */
1786 if (!IS_BIDI_CAPABLE(ha
)) {
1787 ql_log(ql_log_warn
, vha
, 0x70a0,
1788 "This adapter is not supported\n");
1789 rval
= EXT_STATUS_NOT_SUPPORTED
;
1793 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1794 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1795 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
1796 rval
= EXT_STATUS_BUSY
;
1800 /* Check if host is online */
1801 if (!vha
->flags
.online
) {
1802 ql_log(ql_log_warn
, vha
, 0x70a1,
1803 "Host is not online\n");
1804 rval
= EXT_STATUS_DEVICE_OFFLINE
;
1808 /* Check if cable is plugged in or not */
1809 if (vha
->device_flags
& DFLG_NO_CABLE
) {
1810 ql_log(ql_log_warn
, vha
, 0x70a2,
1811 "Cable is unplugged...\n");
1812 rval
= EXT_STATUS_INVALID_CFG
;
1816 /* Check if the switch is connected or not */
1817 if (ha
->current_topology
!= ISP_CFG_F
) {
1818 ql_log(ql_log_warn
, vha
, 0x70a3,
1819 "Host is not connected to the switch\n");
1820 rval
= EXT_STATUS_INVALID_CFG
;
1824 /* Check if operating mode is P2P */
1825 if (ha
->operating_mode
!= P2P
) {
1826 ql_log(ql_log_warn
, vha
, 0x70a4,
1827 "Host operating mode is not P2p\n");
1828 rval
= EXT_STATUS_INVALID_CFG
;
1832 mutex_lock(&ha
->selflogin_lock
);
1833 if (vha
->self_login_loop_id
== 0) {
1834 /* Initialize all required fields of fcport */
1835 vha
->bidir_fcport
.vha
= vha
;
1836 vha
->bidir_fcport
.d_id
.b
.al_pa
= vha
->d_id
.b
.al_pa
;
1837 vha
->bidir_fcport
.d_id
.b
.area
= vha
->d_id
.b
.area
;
1838 vha
->bidir_fcport
.d_id
.b
.domain
= vha
->d_id
.b
.domain
;
1839 vha
->bidir_fcport
.loop_id
= vha
->loop_id
;
1841 if (qla2x00_fabric_login(vha
, &(vha
->bidir_fcport
), &nextlid
)) {
1842 ql_log(ql_log_warn
, vha
, 0x70a7,
1843 "Failed to login port %06X for bidirectional IOCB\n",
1844 vha
->bidir_fcport
.d_id
.b24
);
1845 mutex_unlock(&ha
->selflogin_lock
);
1846 rval
= EXT_STATUS_MAILBOX
;
1849 vha
->self_login_loop_id
= nextlid
- 1;
1852 /* Assign the self login loop id to fcport */
1853 mutex_unlock(&ha
->selflogin_lock
);
1855 vha
->bidir_fcport
.loop_id
= vha
->self_login_loop_id
;
1857 req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1858 bsg_job
->request_payload
.sg_list
,
1859 bsg_job
->request_payload
.sg_cnt
,
1863 rval
= EXT_STATUS_NO_MEMORY
;
1867 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1868 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
1872 rval
= EXT_STATUS_NO_MEMORY
;
1873 goto done_unmap_req_sg
;
1876 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
1877 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
1878 ql_dbg(ql_dbg_user
, vha
, 0x70a9,
1879 "Dma mapping resulted in different sg counts "
1880 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1881 "%x dma_reply_sg_cnt: %x]\n",
1882 bsg_job
->request_payload
.sg_cnt
, req_sg_cnt
,
1883 bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
1884 rval
= EXT_STATUS_NO_MEMORY
;
1888 req_data_len
= bsg_job
->request_payload
.payload_len
;
1889 rsp_data_len
= bsg_job
->reply_payload
.payload_len
;
1891 if (req_data_len
!= rsp_data_len
) {
1892 rval
= EXT_STATUS_BUSY
;
1893 ql_log(ql_log_warn
, vha
, 0x70aa,
1894 "req_data_len != rsp_data_len\n");
1898 /* Alloc SRB structure */
1899 sp
= qla2x00_get_sp(vha
, &(vha
->bidir_fcport
), GFP_KERNEL
);
1901 ql_dbg(ql_dbg_user
, vha
, 0x70ac,
1902 "Alloc SRB structure failed\n");
1903 rval
= EXT_STATUS_NO_MEMORY
;
1907 /*Populate srb->ctx with bidir ctx*/
1908 sp
->u
.bsg_job
= bsg_job
;
1909 sp
->free
= qla2x00_bsg_sp_free
;
1910 sp
->type
= SRB_BIDI_CMD
;
1911 sp
->done
= qla2x00_bsg_job_done
;
1913 /* Add the read and write sg count */
1914 tot_dsds
= rsp_sg_cnt
+ req_sg_cnt
;
1916 rval
= qla2x00_start_bidir(sp
, vha
, tot_dsds
);
1917 if (rval
!= EXT_STATUS_OK
)
1919 /* the bsg request will be completed in the interrupt handler */
1923 mempool_free(sp
, ha
->srb_mempool
);
1925 dma_unmap_sg(&ha
->pdev
->dev
,
1926 bsg_job
->reply_payload
.sg_list
,
1927 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1929 dma_unmap_sg(&ha
->pdev
->dev
,
1930 bsg_job
->request_payload
.sg_list
,
1931 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1934 /* Return an error vendor specific response
1935 * and complete the bsg request
1937 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = rval
;
1938 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1939 bsg_reply
->reply_payload_rcv_len
= 0;
1940 bsg_reply
->result
= (DID_OK
) << 16;
1941 bsg_job_done(bsg_job
, bsg_reply
->result
,
1942 bsg_reply
->reply_payload_rcv_len
);
1943 /* Always return success, vendor rsp carries correct status */
1948 qlafx00_mgmt_cmd(struct bsg_job
*bsg_job
)
1950 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1951 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1952 scsi_qla_host_t
*vha
= shost_priv(host
);
1953 struct qla_hw_data
*ha
= vha
->hw
;
1954 int rval
= (DID_ERROR
<< 16);
1955 struct qla_mt_iocb_rqst_fx00
*piocb_rqst
;
1957 int req_sg_cnt
= 0, rsp_sg_cnt
= 0;
1958 struct fc_port
*fcport
;
1959 char *type
= "FC_BSG_HST_FX_MGMT";
1961 /* Copy the IOCB specific information */
1962 piocb_rqst
= (struct qla_mt_iocb_rqst_fx00
*)
1963 &bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1965 /* Dump the vendor information */
1966 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
, vha
, 0x70cf,
1967 (uint8_t *)piocb_rqst
, sizeof(struct qla_mt_iocb_rqst_fx00
));
1969 if (!vha
->flags
.online
) {
1970 ql_log(ql_log_warn
, vha
, 0x70d0,
1971 "Host is not online.\n");
1976 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
) {
1977 req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1978 bsg_job
->request_payload
.sg_list
,
1979 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1981 ql_log(ql_log_warn
, vha
, 0x70c7,
1982 "dma_map_sg return %d for request\n", req_sg_cnt
);
1988 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
) {
1989 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1990 bsg_job
->reply_payload
.sg_list
,
1991 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1993 ql_log(ql_log_warn
, vha
, 0x70c8,
1994 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
1996 goto done_unmap_req_sg
;
2000 ql_dbg(ql_dbg_user
, vha
, 0x70c9,
2001 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2002 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
2003 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
2005 /* Allocate a dummy fcport structure, since functions preparing the
2006 * IOCB and mailbox command retrieves port specific information
2007 * from fcport structure. For Host based ELS commands there will be
2008 * no fcport structure allocated
2010 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
2012 ql_log(ql_log_warn
, vha
, 0x70ca,
2013 "Failed to allocate fcport.\n");
2015 goto done_unmap_rsp_sg
;
2018 /* Alloc SRB structure */
2019 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2021 ql_log(ql_log_warn
, vha
, 0x70cb,
2022 "qla2x00_get_sp failed.\n");
2024 goto done_free_fcport
;
2027 /* Initialize all required fields of fcport */
2029 fcport
->loop_id
= piocb_rqst
->dataword
;
2031 sp
->type
= SRB_FXIOCB_BCMD
;
2032 sp
->name
= "bsg_fx_mgmt";
2033 sp
->iocbs
= qla24xx_calc_ct_iocbs(req_sg_cnt
+ rsp_sg_cnt
);
2034 sp
->u
.bsg_job
= bsg_job
;
2035 sp
->free
= qla2x00_bsg_sp_free
;
2036 sp
->done
= qla2x00_bsg_job_done
;
2038 ql_dbg(ql_dbg_user
, vha
, 0x70cc,
2039 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2040 type
, piocb_rqst
->func_type
, fcport
->loop_id
);
2042 rval
= qla2x00_start_sp(sp
);
2043 if (rval
!= QLA_SUCCESS
) {
2044 ql_log(ql_log_warn
, vha
, 0x70cd,
2045 "qla2x00_start_sp failed=%d.\n", rval
);
2046 mempool_free(sp
, ha
->srb_mempool
);
2048 goto done_free_fcport
;
2056 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
)
2057 dma_unmap_sg(&ha
->pdev
->dev
,
2058 bsg_job
->reply_payload
.sg_list
,
2059 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
2061 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
)
2062 dma_unmap_sg(&ha
->pdev
->dev
,
2063 bsg_job
->request_payload
.sg_list
,
2064 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
2071 qla26xx_serdes_op(struct bsg_job
*bsg_job
)
2073 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2074 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2075 scsi_qla_host_t
*vha
= shost_priv(host
);
2077 struct qla_serdes_reg sr
;
2079 memset(&sr
, 0, sizeof(sr
));
2081 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2082 bsg_job
->request_payload
.sg_cnt
, &sr
, sizeof(sr
));
2085 case INT_SC_SERDES_WRITE_REG
:
2086 rval
= qla2x00_write_serdes_word(vha
, sr
.addr
, sr
.val
);
2087 bsg_reply
->reply_payload_rcv_len
= 0;
2089 case INT_SC_SERDES_READ_REG
:
2090 rval
= qla2x00_read_serdes_word(vha
, sr
.addr
, &sr
.val
);
2091 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2092 bsg_job
->reply_payload
.sg_cnt
, &sr
, sizeof(sr
));
2093 bsg_reply
->reply_payload_rcv_len
= sizeof(sr
);
2096 ql_dbg(ql_dbg_user
, vha
, 0x708c,
2097 "Unknown serdes cmd %x.\n", sr
.cmd
);
2102 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2103 rval
? EXT_STATUS_MAILBOX
: 0;
2105 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2106 bsg_reply
->result
= DID_OK
<< 16;
2107 bsg_job_done(bsg_job
, bsg_reply
->result
,
2108 bsg_reply
->reply_payload_rcv_len
);
2113 qla8044_serdes_op(struct bsg_job
*bsg_job
)
2115 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2116 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2117 scsi_qla_host_t
*vha
= shost_priv(host
);
2119 struct qla_serdes_reg_ex sr
;
2121 memset(&sr
, 0, sizeof(sr
));
2123 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2124 bsg_job
->request_payload
.sg_cnt
, &sr
, sizeof(sr
));
2127 case INT_SC_SERDES_WRITE_REG
:
2128 rval
= qla8044_write_serdes_word(vha
, sr
.addr
, sr
.val
);
2129 bsg_reply
->reply_payload_rcv_len
= 0;
2131 case INT_SC_SERDES_READ_REG
:
2132 rval
= qla8044_read_serdes_word(vha
, sr
.addr
, &sr
.val
);
2133 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2134 bsg_job
->reply_payload
.sg_cnt
, &sr
, sizeof(sr
));
2135 bsg_reply
->reply_payload_rcv_len
= sizeof(sr
);
2138 ql_dbg(ql_dbg_user
, vha
, 0x7020,
2139 "Unknown serdes cmd %x.\n", sr
.cmd
);
2144 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2145 rval
? EXT_STATUS_MAILBOX
: 0;
2147 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2148 bsg_reply
->result
= DID_OK
<< 16;
2149 bsg_job_done(bsg_job
, bsg_reply
->result
,
2150 bsg_reply
->reply_payload_rcv_len
);
2155 qla27xx_get_flash_upd_cap(struct bsg_job
*bsg_job
)
2157 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2158 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2159 scsi_qla_host_t
*vha
= shost_priv(host
);
2160 struct qla_hw_data
*ha
= vha
->hw
;
2161 struct qla_flash_update_caps cap
;
2163 if (!(IS_QLA27XX(ha
)))
2166 memset(&cap
, 0, sizeof(cap
));
2167 cap
.capabilities
= (uint64_t)ha
->fw_attributes_ext
[1] << 48 |
2168 (uint64_t)ha
->fw_attributes_ext
[0] << 32 |
2169 (uint64_t)ha
->fw_attributes_h
<< 16 |
2170 (uint64_t)ha
->fw_attributes
;
2172 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2173 bsg_job
->reply_payload
.sg_cnt
, &cap
, sizeof(cap
));
2174 bsg_reply
->reply_payload_rcv_len
= sizeof(cap
);
2176 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2179 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2180 bsg_reply
->result
= DID_OK
<< 16;
2181 bsg_job_done(bsg_job
, bsg_reply
->result
,
2182 bsg_reply
->reply_payload_rcv_len
);
2187 qla27xx_set_flash_upd_cap(struct bsg_job
*bsg_job
)
2189 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2190 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2191 scsi_qla_host_t
*vha
= shost_priv(host
);
2192 struct qla_hw_data
*ha
= vha
->hw
;
2193 uint64_t online_fw_attr
= 0;
2194 struct qla_flash_update_caps cap
;
2196 if (!(IS_QLA27XX(ha
)))
2199 memset(&cap
, 0, sizeof(cap
));
2200 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2201 bsg_job
->request_payload
.sg_cnt
, &cap
, sizeof(cap
));
2203 online_fw_attr
= (uint64_t)ha
->fw_attributes_ext
[1] << 48 |
2204 (uint64_t)ha
->fw_attributes_ext
[0] << 32 |
2205 (uint64_t)ha
->fw_attributes_h
<< 16 |
2206 (uint64_t)ha
->fw_attributes
;
2208 if (online_fw_attr
!= cap
.capabilities
) {
2209 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2210 EXT_STATUS_INVALID_PARAM
;
2214 if (cap
.outage_duration
< MAX_LOOP_TIMEOUT
) {
2215 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2216 EXT_STATUS_INVALID_PARAM
;
2220 bsg_reply
->reply_payload_rcv_len
= 0;
2222 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2225 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2226 bsg_reply
->result
= DID_OK
<< 16;
2227 bsg_job_done(bsg_job
, bsg_reply
->result
,
2228 bsg_reply
->reply_payload_rcv_len
);
2233 qla27xx_get_bbcr_data(struct bsg_job
*bsg_job
)
2235 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2236 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2237 scsi_qla_host_t
*vha
= shost_priv(host
);
2238 struct qla_hw_data
*ha
= vha
->hw
;
2239 struct qla_bbcr_data bbcr
;
2240 uint16_t loop_id
, topo
, sw_cap
;
2241 uint8_t domain
, area
, al_pa
, state
;
2244 if (!(IS_QLA27XX(ha
)))
2247 memset(&bbcr
, 0, sizeof(bbcr
));
2249 if (vha
->flags
.bbcr_enable
)
2250 bbcr
.status
= QLA_BBCR_STATUS_ENABLED
;
2252 bbcr
.status
= QLA_BBCR_STATUS_DISABLED
;
2254 if (bbcr
.status
== QLA_BBCR_STATUS_ENABLED
) {
2255 rval
= qla2x00_get_adapter_id(vha
, &loop_id
, &al_pa
,
2256 &area
, &domain
, &topo
, &sw_cap
);
2257 if (rval
!= QLA_SUCCESS
) {
2258 bbcr
.status
= QLA_BBCR_STATUS_UNKNOWN
;
2259 bbcr
.state
= QLA_BBCR_STATE_OFFLINE
;
2260 bbcr
.mbx1
= loop_id
;
2264 state
= (vha
->bbcr
>> 12) & 0x1;
2267 bbcr
.state
= QLA_BBCR_STATE_OFFLINE
;
2268 bbcr
.offline_reason_code
= QLA_BBCR_REASON_LOGIN_REJECT
;
2270 bbcr
.state
= QLA_BBCR_STATE_ONLINE
;
2271 bbcr
.negotiated_bbscn
= (vha
->bbcr
>> 8) & 0xf;
2274 bbcr
.configured_bbscn
= vha
->bbcr
& 0xf;
2278 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2279 bsg_job
->reply_payload
.sg_cnt
, &bbcr
, sizeof(bbcr
));
2280 bsg_reply
->reply_payload_rcv_len
= sizeof(bbcr
);
2282 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = EXT_STATUS_OK
;
2284 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2285 bsg_reply
->result
= DID_OK
<< 16;
2286 bsg_job_done(bsg_job
, bsg_reply
->result
,
2287 bsg_reply
->reply_payload_rcv_len
);
2292 qla2x00_get_priv_stats(struct bsg_job
*bsg_job
)
2294 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2295 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2296 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2297 scsi_qla_host_t
*vha
= shost_priv(host
);
2298 struct qla_hw_data
*ha
= vha
->hw
;
2299 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
2300 struct link_statistics
*stats
= NULL
;
2301 dma_addr_t stats_dma
;
2303 uint32_t *cmd
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
2304 uint options
= cmd
[0] == QL_VND_GET_PRIV_STATS_EX
? cmd
[1] : 0;
2306 if (test_bit(UNLOADING
, &vha
->dpc_flags
))
2309 if (unlikely(pci_channel_offline(ha
->pdev
)))
2312 if (qla2x00_reset_active(vha
))
2315 if (!IS_FWI2_CAPABLE(ha
))
2318 stats
= dma_zalloc_coherent(&ha
->pdev
->dev
, sizeof(*stats
),
2319 &stats_dma
, GFP_KERNEL
);
2321 ql_log(ql_log_warn
, vha
, 0x70e2,
2322 "Failed to allocate memory for stats.\n");
2326 rval
= qla24xx_get_isp_stats(base_vha
, stats
, stats_dma
, options
);
2328 if (rval
== QLA_SUCCESS
) {
2329 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
, vha
, 0x70e3,
2330 (uint8_t *)stats
, sizeof(*stats
));
2331 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2332 bsg_job
->reply_payload
.sg_cnt
, stats
, sizeof(*stats
));
2335 bsg_reply
->reply_payload_rcv_len
= sizeof(*stats
);
2336 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2337 rval
? EXT_STATUS_MAILBOX
: EXT_STATUS_OK
;
2339 bsg_job
->reply_len
= sizeof(*bsg_reply
);
2340 bsg_reply
->result
= DID_OK
<< 16;
2341 bsg_job_done(bsg_job
, bsg_reply
->result
,
2342 bsg_reply
->reply_payload_rcv_len
);
2344 dma_free_coherent(&ha
->pdev
->dev
, sizeof(*stats
),
2351 qla2x00_do_dport_diagnostics(struct bsg_job
*bsg_job
)
2353 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2354 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2355 scsi_qla_host_t
*vha
= shost_priv(host
);
2357 struct qla_dport_diag
*dd
;
2359 if (!IS_QLA83XX(vha
->hw
) && !IS_QLA27XX(vha
->hw
))
2362 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
2364 ql_log(ql_log_warn
, vha
, 0x70db,
2365 "Failed to allocate memory for dport.\n");
2369 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2370 bsg_job
->request_payload
.sg_cnt
, dd
, sizeof(*dd
));
2372 rval
= qla26xx_dport_diagnostics(
2373 vha
, dd
->buf
, sizeof(dd
->buf
), dd
->options
);
2374 if (rval
== QLA_SUCCESS
) {
2375 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2376 bsg_job
->reply_payload
.sg_cnt
, dd
, sizeof(*dd
));
2379 bsg_reply
->reply_payload_rcv_len
= sizeof(*dd
);
2380 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2381 rval
? EXT_STATUS_MAILBOX
: EXT_STATUS_OK
;
2383 bsg_job
->reply_len
= sizeof(*bsg_reply
);
2384 bsg_reply
->result
= DID_OK
<< 16;
2385 bsg_job_done(bsg_job
, bsg_reply
->result
,
2386 bsg_reply
->reply_payload_rcv_len
);
2394 qla2x00_process_vendor_specific(struct bsg_job
*bsg_job
)
2396 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2398 switch (bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
2399 case QL_VND_LOOPBACK
:
2400 return qla2x00_process_loopback(bsg_job
);
2402 case QL_VND_A84_RESET
:
2403 return qla84xx_reset(bsg_job
);
2405 case QL_VND_A84_UPDATE_FW
:
2406 return qla84xx_updatefw(bsg_job
);
2408 case QL_VND_A84_MGMT_CMD
:
2409 return qla84xx_mgmt_cmd(bsg_job
);
2412 return qla24xx_iidma(bsg_job
);
2414 case QL_VND_FCP_PRIO_CFG_CMD
:
2415 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job
);
2417 case QL_VND_READ_FLASH
:
2418 return qla2x00_read_optrom(bsg_job
);
2420 case QL_VND_UPDATE_FLASH
:
2421 return qla2x00_update_optrom(bsg_job
);
2423 case QL_VND_SET_FRU_VERSION
:
2424 return qla2x00_update_fru_versions(bsg_job
);
2426 case QL_VND_READ_FRU_STATUS
:
2427 return qla2x00_read_fru_status(bsg_job
);
2429 case QL_VND_WRITE_FRU_STATUS
:
2430 return qla2x00_write_fru_status(bsg_job
);
2432 case QL_VND_WRITE_I2C
:
2433 return qla2x00_write_i2c(bsg_job
);
2435 case QL_VND_READ_I2C
:
2436 return qla2x00_read_i2c(bsg_job
);
2438 case QL_VND_DIAG_IO_CMD
:
2439 return qla24xx_process_bidir_cmd(bsg_job
);
2441 case QL_VND_FX00_MGMT_CMD
:
2442 return qlafx00_mgmt_cmd(bsg_job
);
2444 case QL_VND_SERDES_OP
:
2445 return qla26xx_serdes_op(bsg_job
);
2447 case QL_VND_SERDES_OP_EX
:
2448 return qla8044_serdes_op(bsg_job
);
2450 case QL_VND_GET_FLASH_UPDATE_CAPS
:
2451 return qla27xx_get_flash_upd_cap(bsg_job
);
2453 case QL_VND_SET_FLASH_UPDATE_CAPS
:
2454 return qla27xx_set_flash_upd_cap(bsg_job
);
2456 case QL_VND_GET_BBCR_DATA
:
2457 return qla27xx_get_bbcr_data(bsg_job
);
2459 case QL_VND_GET_PRIV_STATS
:
2460 case QL_VND_GET_PRIV_STATS_EX
:
2461 return qla2x00_get_priv_stats(bsg_job
);
2463 case QL_VND_DPORT_DIAGNOSTICS
:
2464 return qla2x00_do_dport_diagnostics(bsg_job
);
2472 qla24xx_bsg_request(struct bsg_job
*bsg_job
)
2474 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2475 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2477 struct fc_rport
*rport
;
2478 struct Scsi_Host
*host
;
2479 scsi_qla_host_t
*vha
;
2481 /* In case no data transferred. */
2482 bsg_reply
->reply_payload_rcv_len
= 0;
2484 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
) {
2485 rport
= fc_bsg_to_rport(bsg_job
);
2486 host
= rport_to_shost(rport
);
2487 vha
= shost_priv(host
);
2489 host
= fc_bsg_to_shost(bsg_job
);
2490 vha
= shost_priv(host
);
2493 if (qla2x00_chip_is_down(vha
)) {
2494 ql_dbg(ql_dbg_user
, vha
, 0x709f,
2495 "BSG: ISP abort active/needed -- cmd=%d.\n",
2496 bsg_request
->msgcode
);
2500 ql_dbg(ql_dbg_user
, vha
, 0x7000,
2501 "Entered %s msgcode=0x%x.\n", __func__
, bsg_request
->msgcode
);
2503 switch (bsg_request
->msgcode
) {
2504 case FC_BSG_RPT_ELS
:
2505 case FC_BSG_HST_ELS_NOLOGIN
:
2506 ret
= qla2x00_process_els(bsg_job
);
2509 ret
= qla2x00_process_ct(bsg_job
);
2511 case FC_BSG_HST_VENDOR
:
2512 ret
= qla2x00_process_vendor_specific(bsg_job
);
2514 case FC_BSG_HST_ADD_RPORT
:
2515 case FC_BSG_HST_DEL_RPORT
:
2518 ql_log(ql_log_warn
, vha
, 0x705a, "Unsupported BSG request.\n");
2525 qla24xx_bsg_timeout(struct bsg_job
*bsg_job
)
2527 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2528 scsi_qla_host_t
*vha
= shost_priv(fc_bsg_to_shost(bsg_job
));
2529 struct qla_hw_data
*ha
= vha
->hw
;
2532 unsigned long flags
;
2533 struct req_que
*req
;
2535 /* find the bsg job from the active list of commands */
2536 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2537 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
2538 req
= ha
->req_q_map
[que
];
2542 for (cnt
= 1; cnt
< req
->num_outstanding_cmds
; cnt
++) {
2543 sp
= req
->outstanding_cmds
[cnt
];
2545 if (((sp
->type
== SRB_CT_CMD
) ||
2546 (sp
->type
== SRB_ELS_CMD_HST
) ||
2547 (sp
->type
== SRB_FXIOCB_BCMD
))
2548 && (sp
->u
.bsg_job
== bsg_job
)) {
2549 req
->outstanding_cmds
[cnt
] = NULL
;
2550 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2551 if (ha
->isp_ops
->abort_command(sp
)) {
2552 ql_log(ql_log_warn
, vha
, 0x7089,
2553 "mbx abort_command "
2555 bsg_reply
->result
= -EIO
;
2557 ql_dbg(ql_dbg_user
, vha
, 0x708a,
2558 "mbx abort_command "
2560 bsg_reply
->result
= 0;
2562 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2568 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2569 ql_log(ql_log_info
, vha
, 0x708b, "SRB not found to abort.\n");
2570 bsg_reply
->result
= -ENXIO
;
2574 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);