2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 static void qla2xxx_free_fcport_work(struct work_struct
*work
)
16 struct fc_port
*fcport
= container_of(work
, typeof(*fcport
),
19 qla2x00_free_fcport(fcport
);
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t
*sp
, int res
)
25 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
26 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
28 bsg_reply
->result
= res
;
29 bsg_job_done(bsg_job
, bsg_reply
->result
,
30 bsg_reply
->reply_payload_rcv_len
);
34 void qla2x00_bsg_sp_free(srb_t
*sp
)
36 struct qla_hw_data
*ha
= sp
->vha
->hw
;
37 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
38 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
39 struct qla_mt_iocb_rqst_fx00
*piocb_rqst
;
41 if (sp
->type
== SRB_FXIOCB_BCMD
) {
42 piocb_rqst
= (struct qla_mt_iocb_rqst_fx00
*)
43 &bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
45 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
)
46 dma_unmap_sg(&ha
->pdev
->dev
,
47 bsg_job
->request_payload
.sg_list
,
48 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
50 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
)
51 dma_unmap_sg(&ha
->pdev
->dev
,
52 bsg_job
->reply_payload
.sg_list
,
53 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
55 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
56 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
58 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
59 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
62 if (sp
->type
== SRB_CT_CMD
||
63 sp
->type
== SRB_FXIOCB_BCMD
||
64 sp
->type
== SRB_ELS_CMD_HST
) {
65 INIT_WORK(&sp
->fcport
->free_work
, qla2xxx_free_fcport_work
);
66 queue_work(ha
->wq
, &sp
->fcport
->free_work
);
73 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t
*vha
,
74 struct qla_fcp_prio_cfg
*pri_cfg
, uint8_t flag
)
76 int i
, ret
, num_valid
;
78 struct qla_fcp_prio_entry
*pri_entry
;
79 uint32_t *bcode_val_ptr
, bcode_val
;
83 bcode
= (uint8_t *)pri_cfg
;
84 bcode_val_ptr
= (uint32_t *)pri_cfg
;
85 bcode_val
= (uint32_t)(*bcode_val_ptr
);
87 if (bcode_val
== 0xFFFFFFFF) {
88 /* No FCP Priority config data in flash */
89 ql_dbg(ql_dbg_user
, vha
, 0x7051,
90 "No FCP Priority config data.\n");
94 if (memcmp(bcode
, "HQOS", 4)) {
95 /* Invalid FCP priority data header*/
96 ql_dbg(ql_dbg_user
, vha
, 0x7052,
97 "Invalid FCP Priority data header. bcode=0x%x.\n",
104 pri_entry
= &pri_cfg
->entry
[0];
105 for (i
= 0; i
< pri_cfg
->num_entries
; i
++) {
106 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
111 if (num_valid
== 0) {
112 /* No valid FCP priority data entries */
113 ql_dbg(ql_dbg_user
, vha
, 0x7053,
114 "No valid FCP Priority data entries.\n");
117 /* FCP priority data is valid */
118 ql_dbg(ql_dbg_user
, vha
, 0x7054,
119 "Valid FCP priority data. num entries = %d.\n",
127 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job
*bsg_job
)
129 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
130 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
131 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
132 scsi_qla_host_t
*vha
= shost_priv(host
);
133 struct qla_hw_data
*ha
= vha
->hw
;
138 if (!(IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) || IS_P3P_TYPE(ha
))) {
140 goto exit_fcp_prio_cfg
;
143 /* Get the sub command */
144 oper
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
146 /* Only set config is allowed if config memory is not allocated */
147 if (!ha
->fcp_prio_cfg
&& (oper
!= QLFC_FCP_PRIO_SET_CONFIG
)) {
149 goto exit_fcp_prio_cfg
;
152 case QLFC_FCP_PRIO_DISABLE
:
153 if (ha
->flags
.fcp_prio_enabled
) {
154 ha
->flags
.fcp_prio_enabled
= 0;
155 ha
->fcp_prio_cfg
->attributes
&=
156 ~FCP_PRIO_ATTR_ENABLE
;
157 qla24xx_update_all_fcp_prio(vha
);
158 bsg_reply
->result
= DID_OK
;
161 bsg_reply
->result
= (DID_ERROR
<< 16);
162 goto exit_fcp_prio_cfg
;
166 case QLFC_FCP_PRIO_ENABLE
:
167 if (!ha
->flags
.fcp_prio_enabled
) {
168 if (ha
->fcp_prio_cfg
) {
169 ha
->flags
.fcp_prio_enabled
= 1;
170 ha
->fcp_prio_cfg
->attributes
|=
171 FCP_PRIO_ATTR_ENABLE
;
172 qla24xx_update_all_fcp_prio(vha
);
173 bsg_reply
->result
= DID_OK
;
176 bsg_reply
->result
= (DID_ERROR
<< 16);
177 goto exit_fcp_prio_cfg
;
182 case QLFC_FCP_PRIO_GET_CONFIG
:
183 len
= bsg_job
->reply_payload
.payload_len
;
184 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
186 bsg_reply
->result
= (DID_ERROR
<< 16);
187 goto exit_fcp_prio_cfg
;
190 bsg_reply
->result
= DID_OK
;
191 bsg_reply
->reply_payload_rcv_len
=
193 bsg_job
->reply_payload
.sg_list
,
194 bsg_job
->reply_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
199 case QLFC_FCP_PRIO_SET_CONFIG
:
200 len
= bsg_job
->request_payload
.payload_len
;
201 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
202 bsg_reply
->result
= (DID_ERROR
<< 16);
204 goto exit_fcp_prio_cfg
;
207 if (!ha
->fcp_prio_cfg
) {
208 ha
->fcp_prio_cfg
= vmalloc(FCP_PRIO_CFG_SIZE
);
209 if (!ha
->fcp_prio_cfg
) {
210 ql_log(ql_log_warn
, vha
, 0x7050,
211 "Unable to allocate memory for fcp prio "
212 "config data (%x).\n", FCP_PRIO_CFG_SIZE
);
213 bsg_reply
->result
= (DID_ERROR
<< 16);
215 goto exit_fcp_prio_cfg
;
219 memset(ha
->fcp_prio_cfg
, 0, FCP_PRIO_CFG_SIZE
);
220 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
221 bsg_job
->request_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
224 /* validate fcp priority data */
226 if (!qla24xx_fcp_prio_cfg_valid(vha
,
227 (struct qla_fcp_prio_cfg
*) ha
->fcp_prio_cfg
, 1)) {
228 bsg_reply
->result
= (DID_ERROR
<< 16);
230 /* If buffer was invalidatic int
231 * fcp_prio_cfg is of no use
233 vfree(ha
->fcp_prio_cfg
);
234 ha
->fcp_prio_cfg
= NULL
;
235 goto exit_fcp_prio_cfg
;
238 ha
->flags
.fcp_prio_enabled
= 0;
239 if (ha
->fcp_prio_cfg
->attributes
& FCP_PRIO_ATTR_ENABLE
)
240 ha
->flags
.fcp_prio_enabled
= 1;
241 qla24xx_update_all_fcp_prio(vha
);
242 bsg_reply
->result
= DID_OK
;
250 bsg_job_done(bsg_job
, bsg_reply
->result
,
251 bsg_reply
->reply_payload_rcv_len
);
256 qla2x00_process_els(struct bsg_job
*bsg_job
)
258 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
259 struct fc_rport
*rport
;
260 fc_port_t
*fcport
= NULL
;
261 struct Scsi_Host
*host
;
262 scsi_qla_host_t
*vha
;
263 struct qla_hw_data
*ha
;
266 int req_sg_cnt
, rsp_sg_cnt
;
267 int rval
= (DID_ERROR
<< 16);
268 uint16_t nextlid
= 0;
270 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
) {
271 rport
= fc_bsg_to_rport(bsg_job
);
272 fcport
= *(fc_port_t
**) rport
->dd_data
;
273 host
= rport_to_shost(rport
);
274 vha
= shost_priv(host
);
276 type
= "FC_BSG_RPT_ELS";
278 host
= fc_bsg_to_shost(bsg_job
);
279 vha
= shost_priv(host
);
281 type
= "FC_BSG_HST_ELS_NOLOGIN";
284 if (!vha
->flags
.online
) {
285 ql_log(ql_log_warn
, vha
, 0x7005, "Host not online.\n");
290 /* pass through is supported only for ISP 4Gb or higher */
291 if (!IS_FWI2_CAPABLE(ha
)) {
292 ql_dbg(ql_dbg_user
, vha
, 0x7001,
293 "ELS passthru not supported for ISP23xx based adapters.\n");
298 /* Multiple SG's are not supported for ELS requests */
299 if (bsg_job
->request_payload
.sg_cnt
> 1 ||
300 bsg_job
->reply_payload
.sg_cnt
> 1) {
301 ql_dbg(ql_dbg_user
, vha
, 0x7002,
302 "Multiple SG's are not supported for ELS requests, "
303 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
304 bsg_job
->request_payload
.sg_cnt
,
305 bsg_job
->reply_payload
.sg_cnt
);
310 /* ELS request for rport */
311 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
) {
312 /* make sure the rport is logged in,
313 * if not perform fabric login
315 if (qla2x00_fabric_login(vha
, fcport
, &nextlid
)) {
316 ql_dbg(ql_dbg_user
, vha
, 0x7003,
317 "Failed to login port %06X for ELS passthru.\n",
323 /* Allocate a dummy fcport structure, since functions
324 * preparing the IOCB and mailbox command retrieves port
325 * specific information from fcport structure. For Host based
326 * ELS commands there will be no fcport structure allocated
328 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
334 /* Initialize all required fields of fcport */
336 fcport
->d_id
.b
.al_pa
=
337 bsg_request
->rqst_data
.h_els
.port_id
[0];
338 fcport
->d_id
.b
.area
=
339 bsg_request
->rqst_data
.h_els
.port_id
[1];
340 fcport
->d_id
.b
.domain
=
341 bsg_request
->rqst_data
.h_els
.port_id
[2];
343 (fcport
->d_id
.b
.al_pa
== 0xFD) ?
344 NPH_FABRIC_CONTROLLER
: NPH_F_PORT
;
348 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
349 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
351 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
352 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
354 goto done_free_fcport
;
357 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
358 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
360 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
361 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
363 goto done_free_fcport
;
366 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
367 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
368 ql_log(ql_log_warn
, vha
, 0x7008,
369 "dma mapping resulted in different sg counts, "
370 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
371 "dma_reply_sg_cnt:%x.\n", bsg_job
->request_payload
.sg_cnt
,
372 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
377 /* Alloc SRB structure */
378 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
385 (bsg_request
->msgcode
== FC_BSG_RPT_ELS
?
386 SRB_ELS_CMD_RPT
: SRB_ELS_CMD_HST
);
388 (bsg_request
->msgcode
== FC_BSG_RPT_ELS
?
389 "bsg_els_rpt" : "bsg_els_hst");
390 sp
->u
.bsg_job
= bsg_job
;
391 sp
->free
= qla2x00_bsg_sp_free
;
392 sp
->done
= qla2x00_bsg_job_done
;
394 ql_dbg(ql_dbg_user
, vha
, 0x700a,
395 "bsg rqst type: %s els type: %x - loop-id=%x "
396 "portid=%-2x%02x%02x.\n", type
,
397 bsg_request
->rqst_data
.h_els
.command_code
, fcport
->loop_id
,
398 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
400 rval
= qla2x00_start_sp(sp
);
401 if (rval
!= QLA_SUCCESS
) {
402 ql_log(ql_log_warn
, vha
, 0x700e,
403 "qla2x00_start_sp failed = %d\n", rval
);
411 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
412 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
413 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
414 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
415 goto done_free_fcport
;
418 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
)
419 qla2x00_free_fcport(fcport
);
424 static inline uint16_t
425 qla24xx_calc_ct_iocbs(uint16_t dsds
)
431 iocbs
+= (dsds
- 2) / 5;
439 qla2x00_process_ct(struct bsg_job
*bsg_job
)
442 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
443 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
444 scsi_qla_host_t
*vha
= shost_priv(host
);
445 struct qla_hw_data
*ha
= vha
->hw
;
446 int rval
= (DID_ERROR
<< 16);
447 int req_sg_cnt
, rsp_sg_cnt
;
449 struct fc_port
*fcport
;
450 char *type
= "FC_BSG_HST_CT";
453 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
454 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
456 ql_log(ql_log_warn
, vha
, 0x700f,
457 "dma_map_sg return %d for request\n", req_sg_cnt
);
462 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
463 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
465 ql_log(ql_log_warn
, vha
, 0x7010,
466 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
471 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
472 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
473 ql_log(ql_log_warn
, vha
, 0x7011,
474 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
475 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
476 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
481 if (!vha
->flags
.online
) {
482 ql_log(ql_log_warn
, vha
, 0x7012,
483 "Host is not online.\n");
489 (bsg_request
->rqst_data
.h_ct
.preamble_word1
& 0xFF000000)
493 loop_id
= cpu_to_le16(NPH_SNS
);
496 loop_id
= vha
->mgmt_svr_loop_id
;
499 ql_dbg(ql_dbg_user
, vha
, 0x7013,
500 "Unknown loop id: %x.\n", loop_id
);
505 /* Allocate a dummy fcport structure, since functions preparing the
506 * IOCB and mailbox command retrieves port specific information
507 * from fcport structure. For Host based ELS commands there will be
508 * no fcport structure allocated
510 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
512 ql_log(ql_log_warn
, vha
, 0x7014,
513 "Failed to allocate fcport.\n");
518 /* Initialize all required fields of fcport */
520 fcport
->d_id
.b
.al_pa
= bsg_request
->rqst_data
.h_ct
.port_id
[0];
521 fcport
->d_id
.b
.area
= bsg_request
->rqst_data
.h_ct
.port_id
[1];
522 fcport
->d_id
.b
.domain
= bsg_request
->rqst_data
.h_ct
.port_id
[2];
523 fcport
->loop_id
= loop_id
;
525 /* Alloc SRB structure */
526 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
528 ql_log(ql_log_warn
, vha
, 0x7015,
529 "qla2x00_get_sp failed.\n");
531 goto done_free_fcport
;
534 sp
->type
= SRB_CT_CMD
;
536 sp
->iocbs
= qla24xx_calc_ct_iocbs(req_sg_cnt
+ rsp_sg_cnt
);
537 sp
->u
.bsg_job
= bsg_job
;
538 sp
->free
= qla2x00_bsg_sp_free
;
539 sp
->done
= qla2x00_bsg_job_done
;
541 ql_dbg(ql_dbg_user
, vha
, 0x7016,
542 "bsg rqst type: %s else type: %x - "
543 "loop-id=%x portid=%02x%02x%02x.\n", type
,
544 (bsg_request
->rqst_data
.h_ct
.preamble_word2
>> 16),
545 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
546 fcport
->d_id
.b
.al_pa
);
548 rval
= qla2x00_start_sp(sp
);
549 if (rval
!= QLA_SUCCESS
) {
550 ql_log(ql_log_warn
, vha
, 0x7017,
551 "qla2x00_start_sp failed=%d.\n", rval
);
554 goto done_free_fcport
;
559 qla2x00_free_fcport(fcport
);
561 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
562 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
563 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
564 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
569 /* Disable loopback mode */
571 qla81xx_reset_loopback_mode(scsi_qla_host_t
*vha
, uint16_t *config
,
576 uint16_t new_config
[4];
577 struct qla_hw_data
*ha
= vha
->hw
;
579 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
) && !IS_QLA8044(ha
))
580 goto done_reset_internal
;
582 memset(new_config
, 0 , sizeof(new_config
));
583 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
584 ENABLE_INTERNAL_LOOPBACK
||
585 (config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
586 ENABLE_EXTERNAL_LOOPBACK
) {
587 new_config
[0] = config
[0] & ~INTERNAL_LOOPBACK_MASK
;
588 ql_dbg(ql_dbg_user
, vha
, 0x70bf, "new_config[0]=%02x\n",
589 (new_config
[0] & INTERNAL_LOOPBACK_MASK
));
590 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
592 ha
->notify_dcbx_comp
= wait
;
593 ha
->notify_lb_portup_comp
= wait2
;
595 ret
= qla81xx_set_port_config(vha
, new_config
);
596 if (ret
!= QLA_SUCCESS
) {
597 ql_log(ql_log_warn
, vha
, 0x7025,
598 "Set port config failed.\n");
599 ha
->notify_dcbx_comp
= 0;
600 ha
->notify_lb_portup_comp
= 0;
602 goto done_reset_internal
;
605 /* Wait for DCBX complete event */
606 if (wait
&& !wait_for_completion_timeout(&ha
->dcbx_comp
,
607 (DCBX_COMP_TIMEOUT
* HZ
))) {
608 ql_dbg(ql_dbg_user
, vha
, 0x7026,
609 "DCBX completion not received.\n");
610 ha
->notify_dcbx_comp
= 0;
611 ha
->notify_lb_portup_comp
= 0;
613 goto done_reset_internal
;
615 ql_dbg(ql_dbg_user
, vha
, 0x7027,
616 "DCBX completion received.\n");
619 !wait_for_completion_timeout(&ha
->lb_portup_comp
,
620 (LB_PORTUP_COMP_TIMEOUT
* HZ
))) {
621 ql_dbg(ql_dbg_user
, vha
, 0x70c5,
622 "Port up completion not received.\n");
623 ha
->notify_lb_portup_comp
= 0;
625 goto done_reset_internal
;
627 ql_dbg(ql_dbg_user
, vha
, 0x70c6,
628 "Port up completion received.\n");
630 ha
->notify_dcbx_comp
= 0;
631 ha
->notify_lb_portup_comp
= 0;
638 * Set the port configuration to enable the internal or external loopback
639 * depending on the loopback mode.
642 qla81xx_set_loopback_mode(scsi_qla_host_t
*vha
, uint16_t *config
,
643 uint16_t *new_config
, uint16_t mode
)
647 unsigned long rem_tmo
= 0, current_tmo
= 0;
648 struct qla_hw_data
*ha
= vha
->hw
;
650 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
) && !IS_QLA8044(ha
))
651 goto done_set_internal
;
653 if (mode
== INTERNAL_LOOPBACK
)
654 new_config
[0] = config
[0] | (ENABLE_INTERNAL_LOOPBACK
<< 1);
655 else if (mode
== EXTERNAL_LOOPBACK
)
656 new_config
[0] = config
[0] | (ENABLE_EXTERNAL_LOOPBACK
<< 1);
657 ql_dbg(ql_dbg_user
, vha
, 0x70be,
658 "new_config[0]=%02x\n", (new_config
[0] & INTERNAL_LOOPBACK_MASK
));
660 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3);
662 ha
->notify_dcbx_comp
= 1;
663 ret
= qla81xx_set_port_config(vha
, new_config
);
664 if (ret
!= QLA_SUCCESS
) {
665 ql_log(ql_log_warn
, vha
, 0x7021,
666 "set port config failed.\n");
667 ha
->notify_dcbx_comp
= 0;
669 goto done_set_internal
;
672 /* Wait for DCBX complete event */
673 current_tmo
= DCBX_COMP_TIMEOUT
* HZ
;
675 rem_tmo
= wait_for_completion_timeout(&ha
->dcbx_comp
,
677 if (!ha
->idc_extend_tmo
|| rem_tmo
) {
678 ha
->idc_extend_tmo
= 0;
681 current_tmo
= ha
->idc_extend_tmo
* HZ
;
682 ha
->idc_extend_tmo
= 0;
686 ql_dbg(ql_dbg_user
, vha
, 0x7022,
687 "DCBX completion not received.\n");
688 ret
= qla81xx_reset_loopback_mode(vha
, new_config
, 0, 0);
690 * If the reset of the loopback mode doesn't work take a FCoE
691 * dump and reset the chip.
694 ha
->isp_ops
->fw_dump(vha
, 0);
695 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
699 if (ha
->flags
.idc_compl_status
) {
700 ql_dbg(ql_dbg_user
, vha
, 0x70c3,
701 "Bad status in IDC Completion AEN\n");
703 ha
->flags
.idc_compl_status
= 0;
705 ql_dbg(ql_dbg_user
, vha
, 0x7023,
706 "DCBX completion received.\n");
709 ha
->notify_dcbx_comp
= 0;
710 ha
->idc_extend_tmo
= 0;
717 qla2x00_process_loopback(struct bsg_job
*bsg_job
)
719 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
720 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
721 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
722 scsi_qla_host_t
*vha
= shost_priv(host
);
723 struct qla_hw_data
*ha
= vha
->hw
;
725 uint8_t command_sent
;
727 struct msg_echo_lb elreq
;
728 uint16_t response
[MAILBOX_REGISTER_COUNT
];
729 uint16_t config
[4], new_config
[4];
731 void *req_data
= NULL
;
732 dma_addr_t req_data_dma
;
733 uint32_t req_data_len
;
734 uint8_t *rsp_data
= NULL
;
735 dma_addr_t rsp_data_dma
;
736 uint32_t rsp_data_len
;
738 if (!vha
->flags
.online
) {
739 ql_log(ql_log_warn
, vha
, 0x7019, "Host is not online.\n");
743 memset(&elreq
, 0, sizeof(elreq
));
745 elreq
.req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
746 bsg_job
->request_payload
.sg_list
, bsg_job
->request_payload
.sg_cnt
,
749 if (!elreq
.req_sg_cnt
) {
750 ql_log(ql_log_warn
, vha
, 0x701a,
751 "dma_map_sg returned %d for request.\n", elreq
.req_sg_cnt
);
755 elreq
.rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
756 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
759 if (!elreq
.rsp_sg_cnt
) {
760 ql_log(ql_log_warn
, vha
, 0x701b,
761 "dma_map_sg returned %d for reply.\n", elreq
.rsp_sg_cnt
);
763 goto done_unmap_req_sg
;
766 if ((elreq
.req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
767 (elreq
.rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
768 ql_log(ql_log_warn
, vha
, 0x701c,
769 "dma mapping resulted in different sg counts, "
770 "request_sg_cnt: %x dma_request_sg_cnt: %x "
771 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
772 bsg_job
->request_payload
.sg_cnt
, elreq
.req_sg_cnt
,
773 bsg_job
->reply_payload
.sg_cnt
, elreq
.rsp_sg_cnt
);
777 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
778 req_data
= dma_alloc_coherent(&ha
->pdev
->dev
, req_data_len
,
779 &req_data_dma
, GFP_KERNEL
);
781 ql_log(ql_log_warn
, vha
, 0x701d,
782 "dma alloc failed for req_data.\n");
787 rsp_data
= dma_alloc_coherent(&ha
->pdev
->dev
, rsp_data_len
,
788 &rsp_data_dma
, GFP_KERNEL
);
790 ql_log(ql_log_warn
, vha
, 0x7004,
791 "dma alloc failed for rsp_data.\n");
793 goto done_free_dma_req
;
796 /* Copy the request buffer in req_data now */
797 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
798 bsg_job
->request_payload
.sg_cnt
, req_data
, req_data_len
);
800 elreq
.send_dma
= req_data_dma
;
801 elreq
.rcv_dma
= rsp_data_dma
;
802 elreq
.transfer_size
= req_data_len
;
804 elreq
.options
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
805 elreq
.iteration_count
=
806 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[2];
808 if (atomic_read(&vha
->loop_state
) == LOOP_READY
&&
809 ((ha
->current_topology
== ISP_CFG_F
&& (elreq
.options
& 7) >= 2) ||
810 ((IS_QLA81XX(ha
) || IS_QLA8031(ha
) || IS_QLA8044(ha
)) &&
811 get_unaligned_le32(req_data
) == ELS_OPCODE_BYTE
&&
812 req_data_len
== MAX_ELS_FRAME_PAYLOAD
&&
813 elreq
.options
== EXTERNAL_LOOPBACK
))) {
814 type
= "FC_BSG_HST_VENDOR_ECHO_DIAG";
815 ql_dbg(ql_dbg_user
, vha
, 0x701e,
816 "BSG request type: %s.\n", type
);
817 command_sent
= INT_DEF_LB_ECHO_CMD
;
818 rval
= qla2x00_echo_test(vha
, &elreq
, response
);
820 if (IS_QLA81XX(ha
) || IS_QLA8031(ha
) || IS_QLA8044(ha
)) {
821 memset(config
, 0, sizeof(config
));
822 memset(new_config
, 0, sizeof(new_config
));
824 if (qla81xx_get_port_config(vha
, config
)) {
825 ql_log(ql_log_warn
, vha
, 0x701f,
826 "Get port config failed.\n");
828 goto done_free_dma_rsp
;
831 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) != 0) {
832 ql_dbg(ql_dbg_user
, vha
, 0x70c4,
833 "Loopback operation already in "
836 goto done_free_dma_rsp
;
839 ql_dbg(ql_dbg_user
, vha
, 0x70c0,
840 "elreq.options=%04x\n", elreq
.options
);
842 if (elreq
.options
== EXTERNAL_LOOPBACK
)
843 if (IS_QLA8031(ha
) || IS_QLA8044(ha
))
844 rval
= qla81xx_set_loopback_mode(vha
,
845 config
, new_config
, elreq
.options
);
847 rval
= qla81xx_reset_loopback_mode(vha
,
850 rval
= qla81xx_set_loopback_mode(vha
, config
,
851 new_config
, elreq
.options
);
855 goto done_free_dma_rsp
;
858 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
859 ql_dbg(ql_dbg_user
, vha
, 0x7028,
860 "BSG request type: %s.\n", type
);
862 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
863 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
865 if (response
[0] == MBS_COMMAND_ERROR
&&
866 response
[1] == MBS_LB_RESET
) {
867 ql_log(ql_log_warn
, vha
, 0x7029,
868 "MBX command error, Aborting ISP.\n");
869 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
870 qla2xxx_wake_dpc(vha
);
871 qla2x00_wait_for_chip_reset(vha
);
872 /* Also reset the MPI */
873 if (IS_QLA81XX(ha
)) {
874 if (qla81xx_restart_mpi_firmware(vha
) !=
876 ql_log(ql_log_warn
, vha
, 0x702a,
877 "MPI reset failed.\n");
882 goto done_free_dma_rsp
;
888 /* Revert back to original port config
889 * Also clear internal loopback
891 ret
= qla81xx_reset_loopback_mode(vha
,
895 * If the reset of the loopback mode
896 * doesn't work take FCoE dump and then
899 ha
->isp_ops
->fw_dump(vha
, 0);
900 set_bit(ISP_ABORT_NEEDED
,
907 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
908 ql_dbg(ql_dbg_user
, vha
, 0x702b,
909 "BSG request type: %s.\n", type
);
910 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
911 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
916 ql_log(ql_log_warn
, vha
, 0x702c,
917 "Vendor request %s failed.\n", type
);
920 bsg_reply
->result
= (DID_ERROR
<< 16);
921 bsg_reply
->reply_payload_rcv_len
= 0;
923 ql_dbg(ql_dbg_user
, vha
, 0x702d,
924 "Vendor request %s completed.\n", type
);
925 bsg_reply
->result
= (DID_OK
<< 16);
926 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
927 bsg_job
->reply_payload
.sg_cnt
, rsp_data
,
931 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
932 sizeof(response
) + sizeof(uint8_t);
933 fw_sts_ptr
= bsg_job
->reply
+ sizeof(struct fc_bsg_reply
);
934 memcpy(bsg_job
->reply
+ sizeof(struct fc_bsg_reply
), response
,
936 fw_sts_ptr
+= sizeof(response
);
937 *fw_sts_ptr
= command_sent
;
940 dma_free_coherent(&ha
->pdev
->dev
, rsp_data_len
,
941 rsp_data
, rsp_data_dma
);
943 dma_free_coherent(&ha
->pdev
->dev
, req_data_len
,
944 req_data
, req_data_dma
);
946 dma_unmap_sg(&ha
->pdev
->dev
,
947 bsg_job
->reply_payload
.sg_list
,
948 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
950 dma_unmap_sg(&ha
->pdev
->dev
,
951 bsg_job
->request_payload
.sg_list
,
952 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
954 bsg_job_done(bsg_job
, bsg_reply
->result
,
955 bsg_reply
->reply_payload_rcv_len
);
960 qla84xx_reset(struct bsg_job
*bsg_job
)
962 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
963 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
964 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
965 scsi_qla_host_t
*vha
= shost_priv(host
);
966 struct qla_hw_data
*ha
= vha
->hw
;
970 if (!IS_QLA84XX(ha
)) {
971 ql_dbg(ql_dbg_user
, vha
, 0x702f, "Not 84xx, exiting.\n");
975 flag
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
977 rval
= qla84xx_reset_chip(vha
, flag
== A84_ISSUE_RESET_DIAG_FW
);
980 ql_log(ql_log_warn
, vha
, 0x7030,
981 "Vendor request 84xx reset failed.\n");
982 rval
= (DID_ERROR
<< 16);
985 ql_dbg(ql_dbg_user
, vha
, 0x7031,
986 "Vendor request 84xx reset completed.\n");
987 bsg_reply
->result
= DID_OK
;
988 bsg_job_done(bsg_job
, bsg_reply
->result
,
989 bsg_reply
->reply_payload_rcv_len
);
996 qla84xx_updatefw(struct bsg_job
*bsg_job
)
998 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
999 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1000 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1001 scsi_qla_host_t
*vha
= shost_priv(host
);
1002 struct qla_hw_data
*ha
= vha
->hw
;
1003 struct verify_chip_entry_84xx
*mn
= NULL
;
1004 dma_addr_t mn_dma
, fw_dma
;
1005 void *fw_buf
= NULL
;
1013 if (!IS_QLA84XX(ha
)) {
1014 ql_dbg(ql_dbg_user
, vha
, 0x7032,
1015 "Not 84xx, exiting.\n");
1019 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1020 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1022 ql_log(ql_log_warn
, vha
, 0x7033,
1023 "dma_map_sg returned %d for request.\n", sg_cnt
);
1027 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1028 ql_log(ql_log_warn
, vha
, 0x7034,
1029 "DMA mapping resulted in different sg counts, "
1030 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1031 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1036 data_len
= bsg_job
->request_payload
.payload_len
;
1037 fw_buf
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1038 &fw_dma
, GFP_KERNEL
);
1040 ql_log(ql_log_warn
, vha
, 0x7035,
1041 "DMA alloc failed for fw_buf.\n");
1046 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1047 bsg_job
->request_payload
.sg_cnt
, fw_buf
, data_len
);
1049 mn
= dma_pool_zalloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1051 ql_log(ql_log_warn
, vha
, 0x7036,
1052 "DMA alloc failed for fw buffer.\n");
1054 goto done_free_fw_buf
;
1057 flag
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1058 fw_ver
= get_unaligned_le32((uint32_t *)fw_buf
+ 2);
1060 mn
->entry_type
= VERIFY_CHIP_IOCB_TYPE
;
1061 mn
->entry_count
= 1;
1063 options
= VCO_FORCE_UPDATE
| VCO_END_OF_DATA
;
1064 if (flag
== A84_ISSUE_UPDATE_DIAGFW_CMD
)
1065 options
|= VCO_DIAG_FW
;
1067 mn
->options
= cpu_to_le16(options
);
1068 mn
->fw_ver
= cpu_to_le32(fw_ver
);
1069 mn
->fw_size
= cpu_to_le32(data_len
);
1070 mn
->fw_seq_size
= cpu_to_le32(data_len
);
1071 put_unaligned_le64(fw_dma
, &mn
->dsd
.address
);
1072 mn
->dsd
.length
= cpu_to_le32(data_len
);
1073 mn
->data_seg_cnt
= cpu_to_le16(1);
1075 rval
= qla2x00_issue_iocb_timeout(vha
, mn
, mn_dma
, 0, 120);
1078 ql_log(ql_log_warn
, vha
, 0x7037,
1079 "Vendor request 84xx updatefw failed.\n");
1081 rval
= (DID_ERROR
<< 16);
1083 ql_dbg(ql_dbg_user
, vha
, 0x7038,
1084 "Vendor request 84xx updatefw completed.\n");
1086 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1087 bsg_reply
->result
= DID_OK
;
1090 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1093 dma_free_coherent(&ha
->pdev
->dev
, data_len
, fw_buf
, fw_dma
);
1096 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1097 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1100 bsg_job_done(bsg_job
, bsg_reply
->result
,
1101 bsg_reply
->reply_payload_rcv_len
);
1106 qla84xx_mgmt_cmd(struct bsg_job
*bsg_job
)
1108 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1109 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1110 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1111 scsi_qla_host_t
*vha
= shost_priv(host
);
1112 struct qla_hw_data
*ha
= vha
->hw
;
1113 struct access_chip_84xx
*mn
= NULL
;
1114 dma_addr_t mn_dma
, mgmt_dma
;
1115 void *mgmt_b
= NULL
;
1117 struct qla_bsg_a84_mgmt
*ql84_mgmt
;
1119 uint32_t data_len
= 0;
1120 uint32_t dma_direction
= DMA_NONE
;
1122 if (!IS_QLA84XX(ha
)) {
1123 ql_log(ql_log_warn
, vha
, 0x703a,
1124 "Not 84xx, exiting.\n");
1128 mn
= dma_pool_zalloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1130 ql_log(ql_log_warn
, vha
, 0x703c,
1131 "DMA alloc failed for fw buffer.\n");
1135 mn
->entry_type
= ACCESS_CHIP_IOCB_TYPE
;
1136 mn
->entry_count
= 1;
1137 ql84_mgmt
= (void *)bsg_request
+ sizeof(struct fc_bsg_request
);
1138 switch (ql84_mgmt
->mgmt
.cmd
) {
1139 case QLA84_MGMT_READ_MEM
:
1140 case QLA84_MGMT_GET_INFO
:
1141 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1142 bsg_job
->reply_payload
.sg_list
,
1143 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1145 ql_log(ql_log_warn
, vha
, 0x703d,
1146 "dma_map_sg returned %d for reply.\n", sg_cnt
);
1151 dma_direction
= DMA_FROM_DEVICE
;
1153 if (sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
) {
1154 ql_log(ql_log_warn
, vha
, 0x703e,
1155 "DMA mapping resulted in different sg counts, "
1156 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1157 bsg_job
->reply_payload
.sg_cnt
, sg_cnt
);
1162 data_len
= bsg_job
->reply_payload
.payload_len
;
1164 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1165 &mgmt_dma
, GFP_KERNEL
);
1167 ql_log(ql_log_warn
, vha
, 0x703f,
1168 "DMA alloc failed for mgmt_b.\n");
1173 if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) {
1174 mn
->options
= cpu_to_le16(ACO_DUMP_MEMORY
);
1177 ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1179 } else if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
) {
1180 mn
->options
= cpu_to_le16(ACO_REQUEST_INFO
);
1182 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.info
.type
);
1186 ql84_mgmt
->mgmt
.mgmtp
.u
.info
.context
);
1190 case QLA84_MGMT_WRITE_MEM
:
1191 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1192 bsg_job
->request_payload
.sg_list
,
1193 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1196 ql_log(ql_log_warn
, vha
, 0x7040,
1197 "dma_map_sg returned %d.\n", sg_cnt
);
1202 dma_direction
= DMA_TO_DEVICE
;
1204 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1205 ql_log(ql_log_warn
, vha
, 0x7041,
1206 "DMA mapping resulted in different sg counts, "
1207 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1208 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1213 data_len
= bsg_job
->request_payload
.payload_len
;
1214 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1215 &mgmt_dma
, GFP_KERNEL
);
1217 ql_log(ql_log_warn
, vha
, 0x7042,
1218 "DMA alloc failed for mgmt_b.\n");
1223 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1224 bsg_job
->request_payload
.sg_cnt
, mgmt_b
, data_len
);
1226 mn
->options
= cpu_to_le16(ACO_LOAD_MEMORY
);
1228 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1231 case QLA84_MGMT_CHNG_CONFIG
:
1232 mn
->options
= cpu_to_le16(ACO_CHANGE_CONFIG_PARAM
);
1234 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.id
);
1237 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param0
);
1240 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param1
);
1248 if (ql84_mgmt
->mgmt
.cmd
!= QLA84_MGMT_CHNG_CONFIG
) {
1249 mn
->total_byte_cnt
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1250 mn
->dseg_count
= cpu_to_le16(1);
1251 put_unaligned_le64(mgmt_dma
, &mn
->dsd
.address
);
1252 mn
->dsd
.length
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1255 rval
= qla2x00_issue_iocb(vha
, mn
, mn_dma
, 0);
1258 ql_log(ql_log_warn
, vha
, 0x7043,
1259 "Vendor request 84xx mgmt failed.\n");
1261 rval
= (DID_ERROR
<< 16);
1264 ql_dbg(ql_dbg_user
, vha
, 0x7044,
1265 "Vendor request 84xx mgmt completed.\n");
1267 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1268 bsg_reply
->result
= DID_OK
;
1270 if ((ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) ||
1271 (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
)) {
1272 bsg_reply
->reply_payload_rcv_len
=
1273 bsg_job
->reply_payload
.payload_len
;
1275 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1276 bsg_job
->reply_payload
.sg_cnt
, mgmt_b
,
1283 dma_free_coherent(&ha
->pdev
->dev
, data_len
, mgmt_b
, mgmt_dma
);
1285 if (dma_direction
== DMA_TO_DEVICE
)
1286 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1287 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1288 else if (dma_direction
== DMA_FROM_DEVICE
)
1289 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1290 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1293 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1296 bsg_job_done(bsg_job
, bsg_reply
->result
,
1297 bsg_reply
->reply_payload_rcv_len
);
1302 qla24xx_iidma(struct bsg_job
*bsg_job
)
1304 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1305 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1306 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1307 scsi_qla_host_t
*vha
= shost_priv(host
);
1309 struct qla_port_param
*port_param
= NULL
;
1310 fc_port_t
*fcport
= NULL
;
1312 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
1313 uint8_t *rsp_ptr
= NULL
;
1315 if (!IS_IIDMA_CAPABLE(vha
->hw
)) {
1316 ql_log(ql_log_info
, vha
, 0x7046, "iiDMA not supported.\n");
1320 port_param
= (void *)bsg_request
+ sizeof(struct fc_bsg_request
);
1321 if (port_param
->fc_scsi_addr
.dest_type
!= EXT_DEF_TYPE_WWPN
) {
1322 ql_log(ql_log_warn
, vha
, 0x7048,
1323 "Invalid destination type.\n");
1327 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1328 if (fcport
->port_type
!= FCT_TARGET
)
1331 if (memcmp(port_param
->fc_scsi_addr
.dest_addr
.wwpn
,
1332 fcport
->port_name
, sizeof(fcport
->port_name
)))
1340 ql_log(ql_log_warn
, vha
, 0x7049,
1341 "Failed to find port.\n");
1345 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1346 ql_log(ql_log_warn
, vha
, 0x704a,
1347 "Port is not online.\n");
1351 if (fcport
->flags
& FCF_LOGIN_NEEDED
) {
1352 ql_log(ql_log_warn
, vha
, 0x704b,
1353 "Remote port not logged in flags = 0x%x.\n", fcport
->flags
);
1357 if (port_param
->mode
)
1358 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
,
1359 port_param
->speed
, mb
);
1361 rval
= qla2x00_get_idma_speed(vha
, fcport
->loop_id
,
1362 &port_param
->speed
, mb
);
1365 ql_log(ql_log_warn
, vha
, 0x704c,
1366 "iiDMA cmd failed for %8phN -- "
1367 "%04x %x %04x %04x.\n", fcport
->port_name
,
1368 rval
, fcport
->fp_speed
, mb
[0], mb
[1]);
1369 rval
= (DID_ERROR
<< 16);
1371 if (!port_param
->mode
) {
1372 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
1373 sizeof(struct qla_port_param
);
1375 rsp_ptr
= ((uint8_t *)bsg_reply
) +
1376 sizeof(struct fc_bsg_reply
);
1378 memcpy(rsp_ptr
, port_param
,
1379 sizeof(struct qla_port_param
));
1382 bsg_reply
->result
= DID_OK
;
1383 bsg_job_done(bsg_job
, bsg_reply
->result
,
1384 bsg_reply
->reply_payload_rcv_len
);
1391 qla2x00_optrom_setup(struct bsg_job
*bsg_job
, scsi_qla_host_t
*vha
,
1394 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1397 struct qla_hw_data
*ha
= vha
->hw
;
1399 if (unlikely(pci_channel_offline(ha
->pdev
)))
1402 start
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1403 if (start
> ha
->optrom_size
) {
1404 ql_log(ql_log_warn
, vha
, 0x7055,
1405 "start %d > optrom_size %d.\n", start
, ha
->optrom_size
);
1409 if (ha
->optrom_state
!= QLA_SWAITING
) {
1410 ql_log(ql_log_info
, vha
, 0x7056,
1411 "optrom_state %d.\n", ha
->optrom_state
);
1415 ha
->optrom_region_start
= start
;
1416 ql_dbg(ql_dbg_user
, vha
, 0x7057, "is_update=%d.\n", is_update
);
1418 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
1420 else if (start
== (ha
->flt_region_boot
* 4) ||
1421 start
== (ha
->flt_region_fw
* 4))
1423 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) ||
1424 IS_CNA_CAPABLE(ha
) || IS_QLA2031(ha
) || IS_QLA27XX(ha
) ||
1428 ql_log(ql_log_warn
, vha
, 0x7058,
1429 "Invalid start region 0x%x/0x%x.\n", start
,
1430 bsg_job
->request_payload
.payload_len
);
1434 ha
->optrom_region_size
= start
+
1435 bsg_job
->request_payload
.payload_len
> ha
->optrom_size
?
1436 ha
->optrom_size
- start
:
1437 bsg_job
->request_payload
.payload_len
;
1438 ha
->optrom_state
= QLA_SWRITING
;
1440 ha
->optrom_region_size
= start
+
1441 bsg_job
->reply_payload
.payload_len
> ha
->optrom_size
?
1442 ha
->optrom_size
- start
:
1443 bsg_job
->reply_payload
.payload_len
;
1444 ha
->optrom_state
= QLA_SREADING
;
1447 ha
->optrom_buffer
= vzalloc(ha
->optrom_region_size
);
1448 if (!ha
->optrom_buffer
) {
1449 ql_log(ql_log_warn
, vha
, 0x7059,
1450 "Read: Unable to allocate memory for optrom retrieval "
1451 "(%x)\n", ha
->optrom_region_size
);
1453 ha
->optrom_state
= QLA_SWAITING
;
1461 qla2x00_read_optrom(struct bsg_job
*bsg_job
)
1463 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1464 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1465 scsi_qla_host_t
*vha
= shost_priv(host
);
1466 struct qla_hw_data
*ha
= vha
->hw
;
1469 if (ha
->flags
.nic_core_reset_hdlr_active
)
1472 mutex_lock(&ha
->optrom_mutex
);
1473 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 0);
1475 mutex_unlock(&ha
->optrom_mutex
);
1479 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
1480 ha
->optrom_region_start
, ha
->optrom_region_size
);
1482 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1483 bsg_job
->reply_payload
.sg_cnt
, ha
->optrom_buffer
,
1484 ha
->optrom_region_size
);
1486 bsg_reply
->reply_payload_rcv_len
= ha
->optrom_region_size
;
1487 bsg_reply
->result
= DID_OK
;
1488 vfree(ha
->optrom_buffer
);
1489 ha
->optrom_buffer
= NULL
;
1490 ha
->optrom_state
= QLA_SWAITING
;
1491 mutex_unlock(&ha
->optrom_mutex
);
1492 bsg_job_done(bsg_job
, bsg_reply
->result
,
1493 bsg_reply
->reply_payload_rcv_len
);
1498 qla2x00_update_optrom(struct bsg_job
*bsg_job
)
1500 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1501 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1502 scsi_qla_host_t
*vha
= shost_priv(host
);
1503 struct qla_hw_data
*ha
= vha
->hw
;
1506 mutex_lock(&ha
->optrom_mutex
);
1507 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 1);
1509 mutex_unlock(&ha
->optrom_mutex
);
1513 /* Set the isp82xx_no_md_cap not to capture minidump */
1514 ha
->flags
.isp82xx_no_md_cap
= 1;
1516 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1517 bsg_job
->request_payload
.sg_cnt
, ha
->optrom_buffer
,
1518 ha
->optrom_region_size
);
1520 rval
= ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
1521 ha
->optrom_region_start
, ha
->optrom_region_size
);
1524 bsg_reply
->result
= -EINVAL
;
1527 bsg_reply
->result
= DID_OK
;
1529 vfree(ha
->optrom_buffer
);
1530 ha
->optrom_buffer
= NULL
;
1531 ha
->optrom_state
= QLA_SWAITING
;
1532 mutex_unlock(&ha
->optrom_mutex
);
1533 bsg_job_done(bsg_job
, bsg_reply
->result
,
1534 bsg_reply
->reply_payload_rcv_len
);
1539 qla2x00_update_fru_versions(struct bsg_job
*bsg_job
)
1541 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1542 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1543 scsi_qla_host_t
*vha
= shost_priv(host
);
1544 struct qla_hw_data
*ha
= vha
->hw
;
1546 uint8_t bsg
[DMA_POOL_SIZE
];
1547 struct qla_image_version_list
*list
= (void *)bsg
;
1548 struct qla_image_version
*image
;
1551 void *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1554 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1555 EXT_STATUS_NO_MEMORY
;
1559 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1560 bsg_job
->request_payload
.sg_cnt
, list
, sizeof(bsg
));
1562 image
= list
->version
;
1563 count
= list
->count
;
1565 memcpy(sfp
, &image
->field_info
, sizeof(image
->field_info
));
1566 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1567 image
->field_address
.device
, image
->field_address
.offset
,
1568 sizeof(image
->field_info
), image
->field_address
.option
);
1570 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1577 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1580 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1583 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1584 bsg_reply
->result
= DID_OK
<< 16;
1585 bsg_job_done(bsg_job
, bsg_reply
->result
,
1586 bsg_reply
->reply_payload_rcv_len
);
1592 qla2x00_read_fru_status(struct bsg_job
*bsg_job
)
1594 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1595 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1596 scsi_qla_host_t
*vha
= shost_priv(host
);
1597 struct qla_hw_data
*ha
= vha
->hw
;
1599 uint8_t bsg
[DMA_POOL_SIZE
];
1600 struct qla_status_reg
*sr
= (void *)bsg
;
1602 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1605 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1606 EXT_STATUS_NO_MEMORY
;
1610 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1611 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1613 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1614 sr
->field_address
.device
, sr
->field_address
.offset
,
1615 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1616 sr
->status_reg
= *sfp
;
1619 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1624 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1625 bsg_job
->reply_payload
.sg_cnt
, sr
, sizeof(*sr
));
1627 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1630 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1633 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1634 bsg_reply
->reply_payload_rcv_len
= sizeof(*sr
);
1635 bsg_reply
->result
= DID_OK
<< 16;
1636 bsg_job_done(bsg_job
, bsg_reply
->result
,
1637 bsg_reply
->reply_payload_rcv_len
);
1643 qla2x00_write_fru_status(struct bsg_job
*bsg_job
)
1645 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1646 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1647 scsi_qla_host_t
*vha
= shost_priv(host
);
1648 struct qla_hw_data
*ha
= vha
->hw
;
1650 uint8_t bsg
[DMA_POOL_SIZE
];
1651 struct qla_status_reg
*sr
= (void *)bsg
;
1653 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1656 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1657 EXT_STATUS_NO_MEMORY
;
1661 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1662 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1664 *sfp
= sr
->status_reg
;
1665 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1666 sr
->field_address
.device
, sr
->field_address
.offset
,
1667 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1670 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1675 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1678 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1681 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1682 bsg_reply
->result
= DID_OK
<< 16;
1683 bsg_job_done(bsg_job
, bsg_reply
->result
,
1684 bsg_reply
->reply_payload_rcv_len
);
1690 qla2x00_write_i2c(struct bsg_job
*bsg_job
)
1692 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1693 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1694 scsi_qla_host_t
*vha
= shost_priv(host
);
1695 struct qla_hw_data
*ha
= vha
->hw
;
1697 uint8_t bsg
[DMA_POOL_SIZE
];
1698 struct qla_i2c_access
*i2c
= (void *)bsg
;
1700 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1703 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1704 EXT_STATUS_NO_MEMORY
;
1708 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1709 bsg_job
->request_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1711 memcpy(sfp
, i2c
->buffer
, i2c
->length
);
1712 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1713 i2c
->device
, i2c
->offset
, i2c
->length
, i2c
->option
);
1716 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1721 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1724 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1727 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1728 bsg_reply
->result
= DID_OK
<< 16;
1729 bsg_job_done(bsg_job
, bsg_reply
->result
,
1730 bsg_reply
->reply_payload_rcv_len
);
1736 qla2x00_read_i2c(struct bsg_job
*bsg_job
)
1738 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1739 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1740 scsi_qla_host_t
*vha
= shost_priv(host
);
1741 struct qla_hw_data
*ha
= vha
->hw
;
1743 uint8_t bsg
[DMA_POOL_SIZE
];
1744 struct qla_i2c_access
*i2c
= (void *)bsg
;
1746 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1749 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1750 EXT_STATUS_NO_MEMORY
;
1754 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1755 bsg_job
->request_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1757 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1758 i2c
->device
, i2c
->offset
, i2c
->length
, i2c
->option
);
1761 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1766 memcpy(i2c
->buffer
, sfp
, i2c
->length
);
1767 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1768 bsg_job
->reply_payload
.sg_cnt
, i2c
, sizeof(*i2c
));
1770 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1773 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1776 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1777 bsg_reply
->reply_payload_rcv_len
= sizeof(*i2c
);
1778 bsg_reply
->result
= DID_OK
<< 16;
1779 bsg_job_done(bsg_job
, bsg_reply
->result
,
1780 bsg_reply
->reply_payload_rcv_len
);
1786 qla24xx_process_bidir_cmd(struct bsg_job
*bsg_job
)
1788 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1789 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1790 scsi_qla_host_t
*vha
= shost_priv(host
);
1791 struct qla_hw_data
*ha
= vha
->hw
;
1792 uint32_t rval
= EXT_STATUS_OK
;
1793 uint16_t req_sg_cnt
= 0;
1794 uint16_t rsp_sg_cnt
= 0;
1795 uint16_t nextlid
= 0;
1798 uint32_t req_data_len
;
1799 uint32_t rsp_data_len
;
1801 /* Check the type of the adapter */
1802 if (!IS_BIDI_CAPABLE(ha
)) {
1803 ql_log(ql_log_warn
, vha
, 0x70a0,
1804 "This adapter is not supported\n");
1805 rval
= EXT_STATUS_NOT_SUPPORTED
;
1809 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1810 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1811 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
1812 rval
= EXT_STATUS_BUSY
;
1816 /* Check if host is online */
1817 if (!vha
->flags
.online
) {
1818 ql_log(ql_log_warn
, vha
, 0x70a1,
1819 "Host is not online\n");
1820 rval
= EXT_STATUS_DEVICE_OFFLINE
;
1824 /* Check if cable is plugged in or not */
1825 if (vha
->device_flags
& DFLG_NO_CABLE
) {
1826 ql_log(ql_log_warn
, vha
, 0x70a2,
1827 "Cable is unplugged...\n");
1828 rval
= EXT_STATUS_INVALID_CFG
;
1832 /* Check if the switch is connected or not */
1833 if (ha
->current_topology
!= ISP_CFG_F
) {
1834 ql_log(ql_log_warn
, vha
, 0x70a3,
1835 "Host is not connected to the switch\n");
1836 rval
= EXT_STATUS_INVALID_CFG
;
1840 /* Check if operating mode is P2P */
1841 if (ha
->operating_mode
!= P2P
) {
1842 ql_log(ql_log_warn
, vha
, 0x70a4,
1843 "Host operating mode is not P2p\n");
1844 rval
= EXT_STATUS_INVALID_CFG
;
1848 mutex_lock(&ha
->selflogin_lock
);
1849 if (vha
->self_login_loop_id
== 0) {
1850 /* Initialize all required fields of fcport */
1851 vha
->bidir_fcport
.vha
= vha
;
1852 vha
->bidir_fcport
.d_id
.b
.al_pa
= vha
->d_id
.b
.al_pa
;
1853 vha
->bidir_fcport
.d_id
.b
.area
= vha
->d_id
.b
.area
;
1854 vha
->bidir_fcport
.d_id
.b
.domain
= vha
->d_id
.b
.domain
;
1855 vha
->bidir_fcport
.loop_id
= vha
->loop_id
;
1857 if (qla2x00_fabric_login(vha
, &(vha
->bidir_fcport
), &nextlid
)) {
1858 ql_log(ql_log_warn
, vha
, 0x70a7,
1859 "Failed to login port %06X for bidirectional IOCB\n",
1860 vha
->bidir_fcport
.d_id
.b24
);
1861 mutex_unlock(&ha
->selflogin_lock
);
1862 rval
= EXT_STATUS_MAILBOX
;
1865 vha
->self_login_loop_id
= nextlid
- 1;
1868 /* Assign the self login loop id to fcport */
1869 mutex_unlock(&ha
->selflogin_lock
);
1871 vha
->bidir_fcport
.loop_id
= vha
->self_login_loop_id
;
1873 req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1874 bsg_job
->request_payload
.sg_list
,
1875 bsg_job
->request_payload
.sg_cnt
,
1879 rval
= EXT_STATUS_NO_MEMORY
;
1883 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1884 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
1888 rval
= EXT_STATUS_NO_MEMORY
;
1889 goto done_unmap_req_sg
;
1892 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
1893 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
1894 ql_dbg(ql_dbg_user
, vha
, 0x70a9,
1895 "Dma mapping resulted in different sg counts "
1896 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1897 "%x dma_reply_sg_cnt: %x]\n",
1898 bsg_job
->request_payload
.sg_cnt
, req_sg_cnt
,
1899 bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
1900 rval
= EXT_STATUS_NO_MEMORY
;
1904 req_data_len
= bsg_job
->request_payload
.payload_len
;
1905 rsp_data_len
= bsg_job
->reply_payload
.payload_len
;
1907 if (req_data_len
!= rsp_data_len
) {
1908 rval
= EXT_STATUS_BUSY
;
1909 ql_log(ql_log_warn
, vha
, 0x70aa,
1910 "req_data_len != rsp_data_len\n");
1914 /* Alloc SRB structure */
1915 sp
= qla2x00_get_sp(vha
, &(vha
->bidir_fcport
), GFP_KERNEL
);
1917 ql_dbg(ql_dbg_user
, vha
, 0x70ac,
1918 "Alloc SRB structure failed\n");
1919 rval
= EXT_STATUS_NO_MEMORY
;
1923 /*Populate srb->ctx with bidir ctx*/
1924 sp
->u
.bsg_job
= bsg_job
;
1925 sp
->free
= qla2x00_bsg_sp_free
;
1926 sp
->type
= SRB_BIDI_CMD
;
1927 sp
->done
= qla2x00_bsg_job_done
;
1929 /* Add the read and write sg count */
1930 tot_dsds
= rsp_sg_cnt
+ req_sg_cnt
;
1932 rval
= qla2x00_start_bidir(sp
, vha
, tot_dsds
);
1933 if (rval
!= EXT_STATUS_OK
)
1935 /* the bsg request will be completed in the interrupt handler */
1939 mempool_free(sp
, ha
->srb_mempool
);
1941 dma_unmap_sg(&ha
->pdev
->dev
,
1942 bsg_job
->reply_payload
.sg_list
,
1943 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1945 dma_unmap_sg(&ha
->pdev
->dev
,
1946 bsg_job
->request_payload
.sg_list
,
1947 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1950 /* Return an error vendor specific response
1951 * and complete the bsg request
1953 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = rval
;
1954 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1955 bsg_reply
->reply_payload_rcv_len
= 0;
1956 bsg_reply
->result
= (DID_OK
) << 16;
1957 bsg_job_done(bsg_job
, bsg_reply
->result
,
1958 bsg_reply
->reply_payload_rcv_len
);
1959 /* Always return success, vendor rsp carries correct status */
1964 qlafx00_mgmt_cmd(struct bsg_job
*bsg_job
)
1966 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1967 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1968 scsi_qla_host_t
*vha
= shost_priv(host
);
1969 struct qla_hw_data
*ha
= vha
->hw
;
1970 int rval
= (DID_ERROR
<< 16);
1971 struct qla_mt_iocb_rqst_fx00
*piocb_rqst
;
1973 int req_sg_cnt
= 0, rsp_sg_cnt
= 0;
1974 struct fc_port
*fcport
;
1975 char *type
= "FC_BSG_HST_FX_MGMT";
1977 /* Copy the IOCB specific information */
1978 piocb_rqst
= (struct qla_mt_iocb_rqst_fx00
*)
1979 &bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1981 /* Dump the vendor information */
1982 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
, vha
, 0x70cf,
1983 piocb_rqst
, sizeof(*piocb_rqst
));
1985 if (!vha
->flags
.online
) {
1986 ql_log(ql_log_warn
, vha
, 0x70d0,
1987 "Host is not online.\n");
1992 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
) {
1993 req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1994 bsg_job
->request_payload
.sg_list
,
1995 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1997 ql_log(ql_log_warn
, vha
, 0x70c7,
1998 "dma_map_sg return %d for request\n", req_sg_cnt
);
2004 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
) {
2005 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
2006 bsg_job
->reply_payload
.sg_list
,
2007 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
2009 ql_log(ql_log_warn
, vha
, 0x70c8,
2010 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
2012 goto done_unmap_req_sg
;
2016 ql_dbg(ql_dbg_user
, vha
, 0x70c9,
2017 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2018 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
2019 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
2021 /* Allocate a dummy fcport structure, since functions preparing the
2022 * IOCB and mailbox command retrieves port specific information
2023 * from fcport structure. For Host based ELS commands there will be
2024 * no fcport structure allocated
2026 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
2028 ql_log(ql_log_warn
, vha
, 0x70ca,
2029 "Failed to allocate fcport.\n");
2031 goto done_unmap_rsp_sg
;
2034 /* Alloc SRB structure */
2035 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2037 ql_log(ql_log_warn
, vha
, 0x70cb,
2038 "qla2x00_get_sp failed.\n");
2040 goto done_free_fcport
;
2043 /* Initialize all required fields of fcport */
2045 fcport
->loop_id
= piocb_rqst
->dataword
;
2047 sp
->type
= SRB_FXIOCB_BCMD
;
2048 sp
->name
= "bsg_fx_mgmt";
2049 sp
->iocbs
= qla24xx_calc_ct_iocbs(req_sg_cnt
+ rsp_sg_cnt
);
2050 sp
->u
.bsg_job
= bsg_job
;
2051 sp
->free
= qla2x00_bsg_sp_free
;
2052 sp
->done
= qla2x00_bsg_job_done
;
2054 ql_dbg(ql_dbg_user
, vha
, 0x70cc,
2055 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2056 type
, piocb_rqst
->func_type
, fcport
->loop_id
);
2058 rval
= qla2x00_start_sp(sp
);
2059 if (rval
!= QLA_SUCCESS
) {
2060 ql_log(ql_log_warn
, vha
, 0x70cd,
2061 "qla2x00_start_sp failed=%d.\n", rval
);
2062 mempool_free(sp
, ha
->srb_mempool
);
2064 goto done_free_fcport
;
2069 qla2x00_free_fcport(fcport
);
2072 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
)
2073 dma_unmap_sg(&ha
->pdev
->dev
,
2074 bsg_job
->reply_payload
.sg_list
,
2075 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
2077 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
)
2078 dma_unmap_sg(&ha
->pdev
->dev
,
2079 bsg_job
->request_payload
.sg_list
,
2080 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
2087 qla26xx_serdes_op(struct bsg_job
*bsg_job
)
2089 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2090 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2091 scsi_qla_host_t
*vha
= shost_priv(host
);
2093 struct qla_serdes_reg sr
;
2095 memset(&sr
, 0, sizeof(sr
));
2097 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2098 bsg_job
->request_payload
.sg_cnt
, &sr
, sizeof(sr
));
2101 case INT_SC_SERDES_WRITE_REG
:
2102 rval
= qla2x00_write_serdes_word(vha
, sr
.addr
, sr
.val
);
2103 bsg_reply
->reply_payload_rcv_len
= 0;
2105 case INT_SC_SERDES_READ_REG
:
2106 rval
= qla2x00_read_serdes_word(vha
, sr
.addr
, &sr
.val
);
2107 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2108 bsg_job
->reply_payload
.sg_cnt
, &sr
, sizeof(sr
));
2109 bsg_reply
->reply_payload_rcv_len
= sizeof(sr
);
2112 ql_dbg(ql_dbg_user
, vha
, 0x708c,
2113 "Unknown serdes cmd %x.\n", sr
.cmd
);
2118 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2119 rval
? EXT_STATUS_MAILBOX
: 0;
2121 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2122 bsg_reply
->result
= DID_OK
<< 16;
2123 bsg_job_done(bsg_job
, bsg_reply
->result
,
2124 bsg_reply
->reply_payload_rcv_len
);
2129 qla8044_serdes_op(struct bsg_job
*bsg_job
)
2131 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2132 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2133 scsi_qla_host_t
*vha
= shost_priv(host
);
2135 struct qla_serdes_reg_ex sr
;
2137 memset(&sr
, 0, sizeof(sr
));
2139 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2140 bsg_job
->request_payload
.sg_cnt
, &sr
, sizeof(sr
));
2143 case INT_SC_SERDES_WRITE_REG
:
2144 rval
= qla8044_write_serdes_word(vha
, sr
.addr
, sr
.val
);
2145 bsg_reply
->reply_payload_rcv_len
= 0;
2147 case INT_SC_SERDES_READ_REG
:
2148 rval
= qla8044_read_serdes_word(vha
, sr
.addr
, &sr
.val
);
2149 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2150 bsg_job
->reply_payload
.sg_cnt
, &sr
, sizeof(sr
));
2151 bsg_reply
->reply_payload_rcv_len
= sizeof(sr
);
2154 ql_dbg(ql_dbg_user
, vha
, 0x7020,
2155 "Unknown serdes cmd %x.\n", sr
.cmd
);
2160 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2161 rval
? EXT_STATUS_MAILBOX
: 0;
2163 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2164 bsg_reply
->result
= DID_OK
<< 16;
2165 bsg_job_done(bsg_job
, bsg_reply
->result
,
2166 bsg_reply
->reply_payload_rcv_len
);
2171 qla27xx_get_flash_upd_cap(struct bsg_job
*bsg_job
)
2173 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2174 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2175 scsi_qla_host_t
*vha
= shost_priv(host
);
2176 struct qla_hw_data
*ha
= vha
->hw
;
2177 struct qla_flash_update_caps cap
;
2179 if (!(IS_QLA27XX(ha
)) && !IS_QLA28XX(ha
))
2182 memset(&cap
, 0, sizeof(cap
));
2183 cap
.capabilities
= (uint64_t)ha
->fw_attributes_ext
[1] << 48 |
2184 (uint64_t)ha
->fw_attributes_ext
[0] << 32 |
2185 (uint64_t)ha
->fw_attributes_h
<< 16 |
2186 (uint64_t)ha
->fw_attributes
;
2188 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2189 bsg_job
->reply_payload
.sg_cnt
, &cap
, sizeof(cap
));
2190 bsg_reply
->reply_payload_rcv_len
= sizeof(cap
);
2192 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2195 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2196 bsg_reply
->result
= DID_OK
<< 16;
2197 bsg_job_done(bsg_job
, bsg_reply
->result
,
2198 bsg_reply
->reply_payload_rcv_len
);
2203 qla27xx_set_flash_upd_cap(struct bsg_job
*bsg_job
)
2205 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2206 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2207 scsi_qla_host_t
*vha
= shost_priv(host
);
2208 struct qla_hw_data
*ha
= vha
->hw
;
2209 uint64_t online_fw_attr
= 0;
2210 struct qla_flash_update_caps cap
;
2212 if (!IS_QLA27XX(ha
) && !IS_QLA28XX(ha
))
2215 memset(&cap
, 0, sizeof(cap
));
2216 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2217 bsg_job
->request_payload
.sg_cnt
, &cap
, sizeof(cap
));
2219 online_fw_attr
= (uint64_t)ha
->fw_attributes_ext
[1] << 48 |
2220 (uint64_t)ha
->fw_attributes_ext
[0] << 32 |
2221 (uint64_t)ha
->fw_attributes_h
<< 16 |
2222 (uint64_t)ha
->fw_attributes
;
2224 if (online_fw_attr
!= cap
.capabilities
) {
2225 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2226 EXT_STATUS_INVALID_PARAM
;
2230 if (cap
.outage_duration
< MAX_LOOP_TIMEOUT
) {
2231 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2232 EXT_STATUS_INVALID_PARAM
;
2236 bsg_reply
->reply_payload_rcv_len
= 0;
2238 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2241 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2242 bsg_reply
->result
= DID_OK
<< 16;
2243 bsg_job_done(bsg_job
, bsg_reply
->result
,
2244 bsg_reply
->reply_payload_rcv_len
);
2249 qla27xx_get_bbcr_data(struct bsg_job
*bsg_job
)
2251 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2252 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2253 scsi_qla_host_t
*vha
= shost_priv(host
);
2254 struct qla_hw_data
*ha
= vha
->hw
;
2255 struct qla_bbcr_data bbcr
;
2256 uint16_t loop_id
, topo
, sw_cap
;
2257 uint8_t domain
, area
, al_pa
, state
;
2260 if (!IS_QLA27XX(ha
) && !IS_QLA28XX(ha
))
2263 memset(&bbcr
, 0, sizeof(bbcr
));
2265 if (vha
->flags
.bbcr_enable
)
2266 bbcr
.status
= QLA_BBCR_STATUS_ENABLED
;
2268 bbcr
.status
= QLA_BBCR_STATUS_DISABLED
;
2270 if (bbcr
.status
== QLA_BBCR_STATUS_ENABLED
) {
2271 rval
= qla2x00_get_adapter_id(vha
, &loop_id
, &al_pa
,
2272 &area
, &domain
, &topo
, &sw_cap
);
2273 if (rval
!= QLA_SUCCESS
) {
2274 bbcr
.status
= QLA_BBCR_STATUS_UNKNOWN
;
2275 bbcr
.state
= QLA_BBCR_STATE_OFFLINE
;
2276 bbcr
.mbx1
= loop_id
;
2280 state
= (vha
->bbcr
>> 12) & 0x1;
2283 bbcr
.state
= QLA_BBCR_STATE_OFFLINE
;
2284 bbcr
.offline_reason_code
= QLA_BBCR_REASON_LOGIN_REJECT
;
2286 bbcr
.state
= QLA_BBCR_STATE_ONLINE
;
2287 bbcr
.negotiated_bbscn
= (vha
->bbcr
>> 8) & 0xf;
2290 bbcr
.configured_bbscn
= vha
->bbcr
& 0xf;
2294 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2295 bsg_job
->reply_payload
.sg_cnt
, &bbcr
, sizeof(bbcr
));
2296 bsg_reply
->reply_payload_rcv_len
= sizeof(bbcr
);
2298 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = EXT_STATUS_OK
;
2300 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2301 bsg_reply
->result
= DID_OK
<< 16;
2302 bsg_job_done(bsg_job
, bsg_reply
->result
,
2303 bsg_reply
->reply_payload_rcv_len
);
2308 qla2x00_get_priv_stats(struct bsg_job
*bsg_job
)
2310 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2311 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2312 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2313 scsi_qla_host_t
*vha
= shost_priv(host
);
2314 struct qla_hw_data
*ha
= vha
->hw
;
2315 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
2316 struct link_statistics
*stats
= NULL
;
2317 dma_addr_t stats_dma
;
2319 uint32_t *cmd
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
2320 uint options
= cmd
[0] == QL_VND_GET_PRIV_STATS_EX
? cmd
[1] : 0;
2322 if (test_bit(UNLOADING
, &vha
->dpc_flags
))
2325 if (unlikely(pci_channel_offline(ha
->pdev
)))
2328 if (qla2x00_reset_active(vha
))
2331 if (!IS_FWI2_CAPABLE(ha
))
2334 stats
= dma_alloc_coherent(&ha
->pdev
->dev
, sizeof(*stats
), &stats_dma
,
2337 ql_log(ql_log_warn
, vha
, 0x70e2,
2338 "Failed to allocate memory for stats.\n");
2342 rval
= qla24xx_get_isp_stats(base_vha
, stats
, stats_dma
, options
);
2344 if (rval
== QLA_SUCCESS
) {
2345 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
, vha
, 0x70e5,
2346 stats
, sizeof(*stats
));
2347 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2348 bsg_job
->reply_payload
.sg_cnt
, stats
, sizeof(*stats
));
2351 bsg_reply
->reply_payload_rcv_len
= sizeof(*stats
);
2352 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2353 rval
? EXT_STATUS_MAILBOX
: EXT_STATUS_OK
;
2355 bsg_job
->reply_len
= sizeof(*bsg_reply
);
2356 bsg_reply
->result
= DID_OK
<< 16;
2357 bsg_job_done(bsg_job
, bsg_reply
->result
,
2358 bsg_reply
->reply_payload_rcv_len
);
2360 dma_free_coherent(&ha
->pdev
->dev
, sizeof(*stats
),
2367 qla2x00_do_dport_diagnostics(struct bsg_job
*bsg_job
)
2369 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2370 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
2371 scsi_qla_host_t
*vha
= shost_priv(host
);
2373 struct qla_dport_diag
*dd
;
2375 if (!IS_QLA83XX(vha
->hw
) && !IS_QLA27XX(vha
->hw
) &&
2376 !IS_QLA28XX(vha
->hw
))
2379 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
2381 ql_log(ql_log_warn
, vha
, 0x70db,
2382 "Failed to allocate memory for dport.\n");
2386 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
2387 bsg_job
->request_payload
.sg_cnt
, dd
, sizeof(*dd
));
2389 rval
= qla26xx_dport_diagnostics(
2390 vha
, dd
->buf
, sizeof(dd
->buf
), dd
->options
);
2391 if (rval
== QLA_SUCCESS
) {
2392 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2393 bsg_job
->reply_payload
.sg_cnt
, dd
, sizeof(*dd
));
2396 bsg_reply
->reply_payload_rcv_len
= sizeof(*dd
);
2397 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
2398 rval
? EXT_STATUS_MAILBOX
: EXT_STATUS_OK
;
2400 bsg_job
->reply_len
= sizeof(*bsg_reply
);
2401 bsg_reply
->result
= DID_OK
<< 16;
2402 bsg_job_done(bsg_job
, bsg_reply
->result
,
2403 bsg_reply
->reply_payload_rcv_len
);
2411 qla2x00_get_flash_image_status(struct bsg_job
*bsg_job
)
2413 scsi_qla_host_t
*vha
= shost_priv(fc_bsg_to_shost(bsg_job
));
2414 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2415 struct qla_hw_data
*ha
= vha
->hw
;
2416 struct qla_active_regions regions
= { };
2417 struct active_regions active_regions
= { };
2419 qla27xx_get_active_image(vha
, &active_regions
);
2420 regions
.global_image
= active_regions
.global
;
2422 if (IS_QLA28XX(ha
)) {
2423 qla28xx_get_aux_images(vha
, &active_regions
);
2424 regions
.board_config
= active_regions
.aux
.board_config
;
2425 regions
.vpd_nvram
= active_regions
.aux
.vpd_nvram
;
2426 regions
.npiv_config_0_1
= active_regions
.aux
.npiv_config_0_1
;
2427 regions
.npiv_config_2_3
= active_regions
.aux
.npiv_config_2_3
;
2430 ql_dbg(ql_dbg_user
, vha
, 0x70e1,
2431 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2432 __func__
, vha
->host_no
, regions
.global_image
,
2433 regions
.board_config
, regions
.vpd_nvram
,
2434 regions
.npiv_config_0_1
, regions
.npiv_config_2_3
);
2436 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2437 bsg_job
->reply_payload
.sg_cnt
, ®ions
, sizeof(regions
));
2439 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = EXT_STATUS_OK
;
2440 bsg_reply
->reply_payload_rcv_len
= sizeof(regions
);
2441 bsg_reply
->result
= DID_OK
<< 16;
2442 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
2443 bsg_job_done(bsg_job
, bsg_reply
->result
,
2444 bsg_reply
->reply_payload_rcv_len
);
2450 qla2x00_process_vendor_specific(struct bsg_job
*bsg_job
)
2452 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2454 switch (bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
2455 case QL_VND_LOOPBACK
:
2456 return qla2x00_process_loopback(bsg_job
);
2458 case QL_VND_A84_RESET
:
2459 return qla84xx_reset(bsg_job
);
2461 case QL_VND_A84_UPDATE_FW
:
2462 return qla84xx_updatefw(bsg_job
);
2464 case QL_VND_A84_MGMT_CMD
:
2465 return qla84xx_mgmt_cmd(bsg_job
);
2468 return qla24xx_iidma(bsg_job
);
2470 case QL_VND_FCP_PRIO_CFG_CMD
:
2471 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job
);
2473 case QL_VND_READ_FLASH
:
2474 return qla2x00_read_optrom(bsg_job
);
2476 case QL_VND_UPDATE_FLASH
:
2477 return qla2x00_update_optrom(bsg_job
);
2479 case QL_VND_SET_FRU_VERSION
:
2480 return qla2x00_update_fru_versions(bsg_job
);
2482 case QL_VND_READ_FRU_STATUS
:
2483 return qla2x00_read_fru_status(bsg_job
);
2485 case QL_VND_WRITE_FRU_STATUS
:
2486 return qla2x00_write_fru_status(bsg_job
);
2488 case QL_VND_WRITE_I2C
:
2489 return qla2x00_write_i2c(bsg_job
);
2491 case QL_VND_READ_I2C
:
2492 return qla2x00_read_i2c(bsg_job
);
2494 case QL_VND_DIAG_IO_CMD
:
2495 return qla24xx_process_bidir_cmd(bsg_job
);
2497 case QL_VND_FX00_MGMT_CMD
:
2498 return qlafx00_mgmt_cmd(bsg_job
);
2500 case QL_VND_SERDES_OP
:
2501 return qla26xx_serdes_op(bsg_job
);
2503 case QL_VND_SERDES_OP_EX
:
2504 return qla8044_serdes_op(bsg_job
);
2506 case QL_VND_GET_FLASH_UPDATE_CAPS
:
2507 return qla27xx_get_flash_upd_cap(bsg_job
);
2509 case QL_VND_SET_FLASH_UPDATE_CAPS
:
2510 return qla27xx_set_flash_upd_cap(bsg_job
);
2512 case QL_VND_GET_BBCR_DATA
:
2513 return qla27xx_get_bbcr_data(bsg_job
);
2515 case QL_VND_GET_PRIV_STATS
:
2516 case QL_VND_GET_PRIV_STATS_EX
:
2517 return qla2x00_get_priv_stats(bsg_job
);
2519 case QL_VND_DPORT_DIAGNOSTICS
:
2520 return qla2x00_do_dport_diagnostics(bsg_job
);
2522 case QL_VND_SS_GET_FLASH_IMAGE_STATUS
:
2523 return qla2x00_get_flash_image_status(bsg_job
);
2531 qla24xx_bsg_request(struct bsg_job
*bsg_job
)
2533 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2534 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2536 struct fc_rport
*rport
;
2537 struct Scsi_Host
*host
;
2538 scsi_qla_host_t
*vha
;
2540 /* In case no data transferred. */
2541 bsg_reply
->reply_payload_rcv_len
= 0;
2543 if (bsg_request
->msgcode
== FC_BSG_RPT_ELS
) {
2544 rport
= fc_bsg_to_rport(bsg_job
);
2545 host
= rport_to_shost(rport
);
2546 vha
= shost_priv(host
);
2548 host
= fc_bsg_to_shost(bsg_job
);
2549 vha
= shost_priv(host
);
2552 if (qla2x00_chip_is_down(vha
)) {
2553 ql_dbg(ql_dbg_user
, vha
, 0x709f,
2554 "BSG: ISP abort active/needed -- cmd=%d.\n",
2555 bsg_request
->msgcode
);
2559 ql_dbg(ql_dbg_user
, vha
, 0x7000,
2560 "Entered %s msgcode=0x%x.\n", __func__
, bsg_request
->msgcode
);
2562 switch (bsg_request
->msgcode
) {
2563 case FC_BSG_RPT_ELS
:
2564 case FC_BSG_HST_ELS_NOLOGIN
:
2565 ret
= qla2x00_process_els(bsg_job
);
2568 ret
= qla2x00_process_ct(bsg_job
);
2570 case FC_BSG_HST_VENDOR
:
2571 ret
= qla2x00_process_vendor_specific(bsg_job
);
2573 case FC_BSG_HST_ADD_RPORT
:
2574 case FC_BSG_HST_DEL_RPORT
:
2577 ql_log(ql_log_warn
, vha
, 0x705a, "Unsupported BSG request.\n");
2584 qla24xx_bsg_timeout(struct bsg_job
*bsg_job
)
2586 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
2587 scsi_qla_host_t
*vha
= shost_priv(fc_bsg_to_shost(bsg_job
));
2588 struct qla_hw_data
*ha
= vha
->hw
;
2591 unsigned long flags
;
2592 struct req_que
*req
;
2594 /* find the bsg job from the active list of commands */
2595 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2596 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
2597 req
= ha
->req_q_map
[que
];
2601 for (cnt
= 1; cnt
< req
->num_outstanding_cmds
; cnt
++) {
2602 sp
= req
->outstanding_cmds
[cnt
];
2604 if (((sp
->type
== SRB_CT_CMD
) ||
2605 (sp
->type
== SRB_ELS_CMD_HST
) ||
2606 (sp
->type
== SRB_FXIOCB_BCMD
))
2607 && (sp
->u
.bsg_job
== bsg_job
)) {
2608 req
->outstanding_cmds
[cnt
] = NULL
;
2609 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2610 if (ha
->isp_ops
->abort_command(sp
)) {
2611 ql_log(ql_log_warn
, vha
, 0x7089,
2612 "mbx abort_command "
2614 bsg_reply
->result
= -EIO
;
2616 ql_dbg(ql_dbg_user
, vha
, 0x708a,
2617 "mbx abort_command "
2619 bsg_reply
->result
= 0;
2621 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2627 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2628 ql_log(ql_log_info
, vha
, 0x708b, "SRB not found to abort.\n");
2629 bsg_reply
->result
= -ENXIO
;
2633 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);