2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
13 /* BSG support for ELS/CT pass through */
15 qla2x00_bsg_job_done(void *data
, void *ptr
, int res
)
17 srb_t
*sp
= (srb_t
*)ptr
;
18 struct scsi_qla_host
*vha
= (scsi_qla_host_t
*)data
;
19 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
21 bsg_job
->reply
->result
= res
;
22 bsg_job
->job_done(bsg_job
);
27 qla2x00_bsg_sp_free(void *data
, void *ptr
)
29 srb_t
*sp
= (srb_t
*)ptr
;
30 struct scsi_qla_host
*vha
= (scsi_qla_host_t
*)data
;
31 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
32 struct qla_hw_data
*ha
= vha
->hw
;
34 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
35 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
37 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
38 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
40 if (sp
->type
== SRB_CT_CMD
||
41 sp
->type
== SRB_ELS_CMD_HST
)
43 mempool_free(sp
, vha
->hw
->srb_mempool
);
47 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t
*vha
,
48 struct qla_fcp_prio_cfg
*pri_cfg
, uint8_t flag
)
50 int i
, ret
, num_valid
;
52 struct qla_fcp_prio_entry
*pri_entry
;
53 uint32_t *bcode_val_ptr
, bcode_val
;
57 bcode
= (uint8_t *)pri_cfg
;
58 bcode_val_ptr
= (uint32_t *)pri_cfg
;
59 bcode_val
= (uint32_t)(*bcode_val_ptr
);
61 if (bcode_val
== 0xFFFFFFFF) {
62 /* No FCP Priority config data in flash */
63 ql_dbg(ql_dbg_user
, vha
, 0x7051,
64 "No FCP Priority config data.\n");
68 if (bcode
[0] != 'H' || bcode
[1] != 'Q' || bcode
[2] != 'O' ||
70 /* Invalid FCP priority data header*/
71 ql_dbg(ql_dbg_user
, vha
, 0x7052,
72 "Invalid FCP Priority data header. bcode=0x%x.\n",
79 pri_entry
= &pri_cfg
->entry
[0];
80 for (i
= 0; i
< pri_cfg
->num_entries
; i
++) {
81 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
87 /* No valid FCP priority data entries */
88 ql_dbg(ql_dbg_user
, vha
, 0x7053,
89 "No valid FCP Priority data entries.\n");
92 /* FCP priority data is valid */
93 ql_dbg(ql_dbg_user
, vha
, 0x7054,
94 "Valid FCP priority data. num entries = %d.\n",
102 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job
*bsg_job
)
104 struct Scsi_Host
*host
= bsg_job
->shost
;
105 scsi_qla_host_t
*vha
= shost_priv(host
);
106 struct qla_hw_data
*ha
= vha
->hw
;
111 if (!(IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) || IS_QLA82XX(ha
))) {
113 goto exit_fcp_prio_cfg
;
116 /* Get the sub command */
117 oper
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
119 /* Only set config is allowed if config memory is not allocated */
120 if (!ha
->fcp_prio_cfg
&& (oper
!= QLFC_FCP_PRIO_SET_CONFIG
)) {
122 goto exit_fcp_prio_cfg
;
125 case QLFC_FCP_PRIO_DISABLE
:
126 if (ha
->flags
.fcp_prio_enabled
) {
127 ha
->flags
.fcp_prio_enabled
= 0;
128 ha
->fcp_prio_cfg
->attributes
&=
129 ~FCP_PRIO_ATTR_ENABLE
;
130 qla24xx_update_all_fcp_prio(vha
);
131 bsg_job
->reply
->result
= DID_OK
;
134 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
135 goto exit_fcp_prio_cfg
;
139 case QLFC_FCP_PRIO_ENABLE
:
140 if (!ha
->flags
.fcp_prio_enabled
) {
141 if (ha
->fcp_prio_cfg
) {
142 ha
->flags
.fcp_prio_enabled
= 1;
143 ha
->fcp_prio_cfg
->attributes
|=
144 FCP_PRIO_ATTR_ENABLE
;
145 qla24xx_update_all_fcp_prio(vha
);
146 bsg_job
->reply
->result
= DID_OK
;
149 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
150 goto exit_fcp_prio_cfg
;
155 case QLFC_FCP_PRIO_GET_CONFIG
:
156 len
= bsg_job
->reply_payload
.payload_len
;
157 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
159 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
160 goto exit_fcp_prio_cfg
;
163 bsg_job
->reply
->result
= DID_OK
;
164 bsg_job
->reply
->reply_payload_rcv_len
=
166 bsg_job
->reply_payload
.sg_list
,
167 bsg_job
->reply_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
172 case QLFC_FCP_PRIO_SET_CONFIG
:
173 len
= bsg_job
->request_payload
.payload_len
;
174 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
175 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
177 goto exit_fcp_prio_cfg
;
180 if (!ha
->fcp_prio_cfg
) {
181 ha
->fcp_prio_cfg
= vmalloc(FCP_PRIO_CFG_SIZE
);
182 if (!ha
->fcp_prio_cfg
) {
183 ql_log(ql_log_warn
, vha
, 0x7050,
184 "Unable to allocate memory for fcp prio "
185 "config data (%x).\n", FCP_PRIO_CFG_SIZE
);
186 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
188 goto exit_fcp_prio_cfg
;
192 memset(ha
->fcp_prio_cfg
, 0, FCP_PRIO_CFG_SIZE
);
193 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
194 bsg_job
->request_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
197 /* validate fcp priority data */
199 if (!qla24xx_fcp_prio_cfg_valid(vha
,
200 (struct qla_fcp_prio_cfg
*) ha
->fcp_prio_cfg
, 1)) {
201 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
203 /* If buffer was invalidatic int
204 * fcp_prio_cfg is of no use
206 vfree(ha
->fcp_prio_cfg
);
207 ha
->fcp_prio_cfg
= NULL
;
208 goto exit_fcp_prio_cfg
;
211 ha
->flags
.fcp_prio_enabled
= 0;
212 if (ha
->fcp_prio_cfg
->attributes
& FCP_PRIO_ATTR_ENABLE
)
213 ha
->flags
.fcp_prio_enabled
= 1;
214 qla24xx_update_all_fcp_prio(vha
);
215 bsg_job
->reply
->result
= DID_OK
;
222 bsg_job
->job_done(bsg_job
);
227 qla2x00_process_els(struct fc_bsg_job
*bsg_job
)
229 struct fc_rport
*rport
;
230 fc_port_t
*fcport
= NULL
;
231 struct Scsi_Host
*host
;
232 scsi_qla_host_t
*vha
;
233 struct qla_hw_data
*ha
;
236 int req_sg_cnt
, rsp_sg_cnt
;
237 int rval
= (DRIVER_ERROR
<< 16);
238 uint16_t nextlid
= 0;
240 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
241 rport
= bsg_job
->rport
;
242 fcport
= *(fc_port_t
**) rport
->dd_data
;
243 host
= rport_to_shost(rport
);
244 vha
= shost_priv(host
);
246 type
= "FC_BSG_RPT_ELS";
248 host
= bsg_job
->shost
;
249 vha
= shost_priv(host
);
251 type
= "FC_BSG_HST_ELS_NOLOGIN";
254 /* pass through is supported only for ISP 4Gb or higher */
255 if (!IS_FWI2_CAPABLE(ha
)) {
256 ql_dbg(ql_dbg_user
, vha
, 0x7001,
257 "ELS passthru not supported for ISP23xx based adapters.\n");
262 /* Multiple SG's are not supported for ELS requests */
263 if (bsg_job
->request_payload
.sg_cnt
> 1 ||
264 bsg_job
->reply_payload
.sg_cnt
> 1) {
265 ql_dbg(ql_dbg_user
, vha
, 0x7002,
266 "Multiple SG's are not suppored for ELS requests, "
267 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
268 bsg_job
->request_payload
.sg_cnt
,
269 bsg_job
->reply_payload
.sg_cnt
);
274 /* ELS request for rport */
275 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
276 /* make sure the rport is logged in,
277 * if not perform fabric login
279 if (qla2x00_fabric_login(vha
, fcport
, &nextlid
)) {
280 ql_dbg(ql_dbg_user
, vha
, 0x7003,
281 "Failed to login port %06X for ELS passthru.\n",
287 /* Allocate a dummy fcport structure, since functions
288 * preparing the IOCB and mailbox command retrieves port
289 * specific information from fcport structure. For Host based
290 * ELS commands there will be no fcport structure allocated
292 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
298 /* Initialize all required fields of fcport */
300 fcport
->d_id
.b
.al_pa
=
301 bsg_job
->request
->rqst_data
.h_els
.port_id
[0];
302 fcport
->d_id
.b
.area
=
303 bsg_job
->request
->rqst_data
.h_els
.port_id
[1];
304 fcport
->d_id
.b
.domain
=
305 bsg_job
->request
->rqst_data
.h_els
.port_id
[2];
307 (fcport
->d_id
.b
.al_pa
== 0xFD) ?
308 NPH_FABRIC_CONTROLLER
: NPH_F_PORT
;
311 if (!vha
->flags
.online
) {
312 ql_log(ql_log_warn
, vha
, 0x7005, "Host not online.\n");
318 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
319 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
322 goto done_free_fcport
;
325 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
326 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
329 goto done_free_fcport
;
332 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
333 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
334 ql_log(ql_log_warn
, vha
, 0x7008,
335 "dma mapping resulted in different sg counts, "
336 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
337 "dma_reply_sg_cnt:%x.\n", bsg_job
->request_payload
.sg_cnt
,
338 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
343 /* Alloc SRB structure */
344 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
351 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
352 SRB_ELS_CMD_RPT
: SRB_ELS_CMD_HST
);
354 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
355 "bsg_els_rpt" : "bsg_els_hst");
356 sp
->u
.bsg_job
= bsg_job
;
357 sp
->free
= qla2x00_bsg_sp_free
;
358 sp
->done
= qla2x00_bsg_job_done
;
360 ql_dbg(ql_dbg_user
, vha
, 0x700a,
361 "bsg rqst type: %s els type: %x - loop-id=%x "
362 "portid=%-2x%02x%02x.\n", type
,
363 bsg_job
->request
->rqst_data
.h_els
.command_code
, fcport
->loop_id
,
364 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
366 rval
= qla2x00_start_sp(sp
);
367 if (rval
!= QLA_SUCCESS
) {
368 ql_log(ql_log_warn
, vha
, 0x700e,
369 "qla2x00_start_sp failed = %d\n", rval
);
370 mempool_free(sp
, ha
->srb_mempool
);
377 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
378 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
379 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
380 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
381 goto done_free_fcport
;
384 if (bsg_job
->request
->msgcode
== FC_BSG_HST_ELS_NOLOGIN
)
391 qla24xx_calc_ct_iocbs(uint16_t dsds
)
397 iocbs
+= (dsds
- 2) / 5;
405 qla2x00_process_ct(struct fc_bsg_job
*bsg_job
)
408 struct Scsi_Host
*host
= bsg_job
->shost
;
409 scsi_qla_host_t
*vha
= shost_priv(host
);
410 struct qla_hw_data
*ha
= vha
->hw
;
411 int rval
= (DRIVER_ERROR
<< 16);
412 int req_sg_cnt
, rsp_sg_cnt
;
414 struct fc_port
*fcport
;
415 char *type
= "FC_BSG_HST_CT";
418 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
419 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
421 ql_log(ql_log_warn
, vha
, 0x700f,
422 "dma_map_sg return %d for request\n", req_sg_cnt
);
427 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
428 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
430 ql_log(ql_log_warn
, vha
, 0x7010,
431 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
436 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
437 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
438 ql_log(ql_log_warn
, vha
, 0x7011,
439 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
440 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
441 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
446 if (!vha
->flags
.online
) {
447 ql_log(ql_log_warn
, vha
, 0x7012,
448 "Host is not online.\n");
454 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word1
& 0xFF000000)
458 loop_id
= cpu_to_le16(NPH_SNS
);
461 loop_id
= vha
->mgmt_svr_loop_id
;
464 ql_dbg(ql_dbg_user
, vha
, 0x7013,
465 "Unknown loop id: %x.\n", loop_id
);
470 /* Allocate a dummy fcport structure, since functions preparing the
471 * IOCB and mailbox command retrieves port specific information
472 * from fcport structure. For Host based ELS commands there will be
473 * no fcport structure allocated
475 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
477 ql_log(ql_log_warn
, vha
, 0x7014,
478 "Failed to allocate fcport.\n");
483 /* Initialize all required fields of fcport */
485 fcport
->d_id
.b
.al_pa
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[0];
486 fcport
->d_id
.b
.area
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[1];
487 fcport
->d_id
.b
.domain
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[2];
488 fcport
->loop_id
= loop_id
;
490 /* Alloc SRB structure */
491 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
493 ql_log(ql_log_warn
, vha
, 0x7015,
494 "qla2x00_get_sp failed.\n");
496 goto done_free_fcport
;
499 sp
->type
= SRB_CT_CMD
;
501 sp
->iocbs
= qla24xx_calc_ct_iocbs(req_sg_cnt
+ rsp_sg_cnt
);
502 sp
->u
.bsg_job
= bsg_job
;
503 sp
->free
= qla2x00_bsg_sp_free
;
504 sp
->done
= qla2x00_bsg_job_done
;
506 ql_dbg(ql_dbg_user
, vha
, 0x7016,
507 "bsg rqst type: %s else type: %x - "
508 "loop-id=%x portid=%02x%02x%02x.\n", type
,
509 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word2
>> 16),
510 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
511 fcport
->d_id
.b
.al_pa
);
513 rval
= qla2x00_start_sp(sp
);
514 if (rval
!= QLA_SUCCESS
) {
515 ql_log(ql_log_warn
, vha
, 0x7017,
516 "qla2x00_start_sp failed=%d.\n", rval
);
517 mempool_free(sp
, ha
->srb_mempool
);
519 goto done_free_fcport
;
526 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
527 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
528 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
529 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
534 /* Set the port configuration to enable the
535 * internal loopback on ISP81XX
538 qla81xx_set_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
539 uint16_t *new_config
)
543 struct qla_hw_data
*ha
= vha
->hw
;
545 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
))
546 goto done_set_internal
;
548 new_config
[0] = config
[0] | (ENABLE_INTERNAL_LOOPBACK
<< 1);
549 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
551 ha
->notify_dcbx_comp
= 1;
552 ret
= qla81xx_set_port_config(vha
, new_config
);
553 if (ret
!= QLA_SUCCESS
) {
554 ql_log(ql_log_warn
, vha
, 0x7021,
555 "set port config failed.\n");
556 ha
->notify_dcbx_comp
= 0;
558 goto done_set_internal
;
561 /* Wait for DCBX complete event */
562 if (!wait_for_completion_timeout(&ha
->dcbx_comp
, (20 * HZ
))) {
563 ql_dbg(ql_dbg_user
, vha
, 0x7022,
564 "State change notification not received.\n");
566 ql_dbg(ql_dbg_user
, vha
, 0x7023,
567 "State change received.\n");
569 ha
->notify_dcbx_comp
= 0;
575 /* Set the port configuration to disable the
576 * internal loopback on ISP81XX
579 qla81xx_reset_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
584 uint16_t new_config
[4];
585 struct qla_hw_data
*ha
= vha
->hw
;
587 if (!IS_QLA81XX(ha
) && !IS_QLA8031(ha
))
588 goto done_reset_internal
;
590 memset(new_config
, 0 , sizeof(new_config
));
591 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
592 ENABLE_INTERNAL_LOOPBACK
) {
593 new_config
[0] = config
[0] & ~INTERNAL_LOOPBACK_MASK
;
594 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
596 ha
->notify_dcbx_comp
= wait
;
597 ret
= qla81xx_set_port_config(vha
, new_config
);
598 if (ret
!= QLA_SUCCESS
) {
599 ql_log(ql_log_warn
, vha
, 0x7025,
600 "Set port config failed.\n");
601 ha
->notify_dcbx_comp
= 0;
603 goto done_reset_internal
;
606 /* Wait for DCBX complete event */
607 if (wait
&& !wait_for_completion_timeout(&ha
->dcbx_comp
,
609 ql_dbg(ql_dbg_user
, vha
, 0x7026,
610 "State change notification not received.\n");
611 ha
->notify_dcbx_comp
= 0;
613 goto done_reset_internal
;
615 ql_dbg(ql_dbg_user
, vha
, 0x7027,
616 "State change received.\n");
618 ha
->notify_dcbx_comp
= 0;
625 qla2x00_process_loopback(struct fc_bsg_job
*bsg_job
)
627 struct Scsi_Host
*host
= bsg_job
->shost
;
628 scsi_qla_host_t
*vha
= shost_priv(host
);
629 struct qla_hw_data
*ha
= vha
->hw
;
631 uint8_t command_sent
;
633 struct msg_echo_lb elreq
;
634 uint16_t response
[MAILBOX_REGISTER_COUNT
];
635 uint16_t config
[4], new_config
[4];
637 uint8_t *req_data
= NULL
;
638 dma_addr_t req_data_dma
;
639 uint32_t req_data_len
;
640 uint8_t *rsp_data
= NULL
;
641 dma_addr_t rsp_data_dma
;
642 uint32_t rsp_data_len
;
644 if (!vha
->flags
.online
) {
645 ql_log(ql_log_warn
, vha
, 0x7019, "Host is not online.\n");
649 elreq
.req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
650 bsg_job
->request_payload
.sg_list
, bsg_job
->request_payload
.sg_cnt
,
653 if (!elreq
.req_sg_cnt
) {
654 ql_log(ql_log_warn
, vha
, 0x701a,
655 "dma_map_sg returned %d for request.\n", elreq
.req_sg_cnt
);
659 elreq
.rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
660 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
663 if (!elreq
.rsp_sg_cnt
) {
664 ql_log(ql_log_warn
, vha
, 0x701b,
665 "dma_map_sg returned %d for reply.\n", elreq
.rsp_sg_cnt
);
667 goto done_unmap_req_sg
;
670 if ((elreq
.req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
671 (elreq
.rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
672 ql_log(ql_log_warn
, vha
, 0x701c,
673 "dma mapping resulted in different sg counts, "
674 "request_sg_cnt: %x dma_request_sg_cnt: %x "
675 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
676 bsg_job
->request_payload
.sg_cnt
, elreq
.req_sg_cnt
,
677 bsg_job
->reply_payload
.sg_cnt
, elreq
.rsp_sg_cnt
);
681 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
682 req_data
= dma_alloc_coherent(&ha
->pdev
->dev
, req_data_len
,
683 &req_data_dma
, GFP_KERNEL
);
685 ql_log(ql_log_warn
, vha
, 0x701d,
686 "dma alloc failed for req_data.\n");
691 rsp_data
= dma_alloc_coherent(&ha
->pdev
->dev
, rsp_data_len
,
692 &rsp_data_dma
, GFP_KERNEL
);
694 ql_log(ql_log_warn
, vha
, 0x7004,
695 "dma alloc failed for rsp_data.\n");
697 goto done_free_dma_req
;
700 /* Copy the request buffer in req_data now */
701 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
702 bsg_job
->request_payload
.sg_cnt
, req_data
, req_data_len
);
704 elreq
.send_dma
= req_data_dma
;
705 elreq
.rcv_dma
= rsp_data_dma
;
706 elreq
.transfer_size
= req_data_len
;
708 elreq
.options
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
710 if ((ha
->current_topology
== ISP_CFG_F
||
711 ((IS_QLA81XX(ha
) || IS_QLA8031(ha
)) &&
712 le32_to_cpu(*(uint32_t *)req_data
) == ELS_OPCODE_BYTE
713 && req_data_len
== MAX_ELS_FRAME_PAYLOAD
)) &&
714 elreq
.options
== EXTERNAL_LOOPBACK
) {
715 type
= "FC_BSG_HST_VENDOR_ECHO_DIAG";
716 ql_dbg(ql_dbg_user
, vha
, 0x701e,
717 "BSG request type: %s.\n", type
);
718 command_sent
= INT_DEF_LB_ECHO_CMD
;
719 rval
= qla2x00_echo_test(vha
, &elreq
, response
);
721 if (IS_QLA81XX(ha
) || IS_QLA8031(ha
)) {
722 memset(config
, 0, sizeof(config
));
723 memset(new_config
, 0, sizeof(new_config
));
724 if (qla81xx_get_port_config(vha
, config
)) {
725 ql_log(ql_log_warn
, vha
, 0x701f,
726 "Get port config failed.\n");
727 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
729 goto done_free_dma_req
;
732 if (elreq
.options
!= EXTERNAL_LOOPBACK
) {
733 ql_dbg(ql_dbg_user
, vha
, 0x7020,
734 "Internal: current port config = %x\n",
736 if (qla81xx_set_internal_loopback(vha
, config
,
738 ql_log(ql_log_warn
, vha
, 0x7024,
739 "Internal loopback failed.\n");
740 bsg_job
->reply
->result
=
743 goto done_free_dma_req
;
746 /* For external loopback to work
747 * ensure internal loopback is disabled
749 if (qla81xx_reset_internal_loopback(vha
,
751 bsg_job
->reply
->result
=
754 goto done_free_dma_req
;
758 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
759 ql_dbg(ql_dbg_user
, vha
, 0x7028,
760 "BSG request type: %s.\n", type
);
762 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
763 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
766 /* Revert back to original port config
767 * Also clear internal loopback
769 qla81xx_reset_internal_loopback(vha
,
773 if (response
[0] == MBS_COMMAND_ERROR
&&
774 response
[1] == MBS_LB_RESET
) {
775 ql_log(ql_log_warn
, vha
, 0x7029,
776 "MBX command error, Aborting ISP.\n");
777 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
778 qla2xxx_wake_dpc(vha
);
779 qla2x00_wait_for_chip_reset(vha
);
780 /* Also reset the MPI */
781 if (qla81xx_restart_mpi_firmware(vha
) !=
783 ql_log(ql_log_warn
, vha
, 0x702a,
784 "MPI reset failed.\n");
787 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
789 goto done_free_dma_req
;
792 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
793 ql_dbg(ql_dbg_user
, vha
, 0x702b,
794 "BSG request type: %s.\n", type
);
795 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
796 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
801 ql_log(ql_log_warn
, vha
, 0x702c,
802 "Vendor request %s failed.\n", type
);
804 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
805 sizeof(struct fc_bsg_reply
);
807 memcpy(fw_sts_ptr
, response
, sizeof(response
));
808 fw_sts_ptr
+= sizeof(response
);
809 *fw_sts_ptr
= command_sent
;
811 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
813 ql_dbg(ql_dbg_user
, vha
, 0x702d,
814 "Vendor request %s completed.\n", type
);
816 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
817 sizeof(response
) + sizeof(uint8_t);
818 bsg_job
->reply
->reply_payload_rcv_len
=
819 bsg_job
->reply_payload
.payload_len
;
820 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
821 sizeof(struct fc_bsg_reply
);
822 memcpy(fw_sts_ptr
, response
, sizeof(response
));
823 fw_sts_ptr
+= sizeof(response
);
824 *fw_sts_ptr
= command_sent
;
825 bsg_job
->reply
->result
= DID_OK
;
826 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
827 bsg_job
->reply_payload
.sg_cnt
, rsp_data
,
830 bsg_job
->job_done(bsg_job
);
832 dma_free_coherent(&ha
->pdev
->dev
, rsp_data_len
,
833 rsp_data
, rsp_data_dma
);
835 dma_free_coherent(&ha
->pdev
->dev
, req_data_len
,
836 req_data
, req_data_dma
);
838 dma_unmap_sg(&ha
->pdev
->dev
,
839 bsg_job
->reply_payload
.sg_list
,
840 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
842 dma_unmap_sg(&ha
->pdev
->dev
,
843 bsg_job
->request_payload
.sg_list
,
844 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
849 qla84xx_reset(struct fc_bsg_job
*bsg_job
)
851 struct Scsi_Host
*host
= bsg_job
->shost
;
852 scsi_qla_host_t
*vha
= shost_priv(host
);
853 struct qla_hw_data
*ha
= vha
->hw
;
857 if (!IS_QLA84XX(ha
)) {
858 ql_dbg(ql_dbg_user
, vha
, 0x702f, "Not 84xx, exiting.\n");
862 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
864 rval
= qla84xx_reset_chip(vha
, flag
== A84_ISSUE_RESET_DIAG_FW
);
867 ql_log(ql_log_warn
, vha
, 0x7030,
868 "Vendor request 84xx reset failed.\n");
870 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
873 ql_dbg(ql_dbg_user
, vha
, 0x7031,
874 "Vendor request 84xx reset completed.\n");
875 bsg_job
->reply
->result
= DID_OK
;
878 bsg_job
->job_done(bsg_job
);
883 qla84xx_updatefw(struct fc_bsg_job
*bsg_job
)
885 struct Scsi_Host
*host
= bsg_job
->shost
;
886 scsi_qla_host_t
*vha
= shost_priv(host
);
887 struct qla_hw_data
*ha
= vha
->hw
;
888 struct verify_chip_entry_84xx
*mn
= NULL
;
889 dma_addr_t mn_dma
, fw_dma
;
898 if (!IS_QLA84XX(ha
)) {
899 ql_dbg(ql_dbg_user
, vha
, 0x7032,
900 "Not 84xx, exiting.\n");
904 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
905 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
907 ql_log(ql_log_warn
, vha
, 0x7033,
908 "dma_map_sg returned %d for request.\n", sg_cnt
);
912 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
913 ql_log(ql_log_warn
, vha
, 0x7034,
914 "DMA mapping resulted in different sg counts, "
915 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
916 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
921 data_len
= bsg_job
->request_payload
.payload_len
;
922 fw_buf
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
923 &fw_dma
, GFP_KERNEL
);
925 ql_log(ql_log_warn
, vha
, 0x7035,
926 "DMA alloc failed for fw_buf.\n");
931 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
932 bsg_job
->request_payload
.sg_cnt
, fw_buf
, data_len
);
934 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
936 ql_log(ql_log_warn
, vha
, 0x7036,
937 "DMA alloc failed for fw buffer.\n");
939 goto done_free_fw_buf
;
942 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
943 fw_ver
= le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf
+ 2)));
945 memset(mn
, 0, sizeof(struct access_chip_84xx
));
946 mn
->entry_type
= VERIFY_CHIP_IOCB_TYPE
;
949 options
= VCO_FORCE_UPDATE
| VCO_END_OF_DATA
;
950 if (flag
== A84_ISSUE_UPDATE_DIAGFW_CMD
)
951 options
|= VCO_DIAG_FW
;
953 mn
->options
= cpu_to_le16(options
);
954 mn
->fw_ver
= cpu_to_le32(fw_ver
);
955 mn
->fw_size
= cpu_to_le32(data_len
);
956 mn
->fw_seq_size
= cpu_to_le32(data_len
);
957 mn
->dseg_address
[0] = cpu_to_le32(LSD(fw_dma
));
958 mn
->dseg_address
[1] = cpu_to_le32(MSD(fw_dma
));
959 mn
->dseg_length
= cpu_to_le32(data_len
);
960 mn
->data_seg_cnt
= cpu_to_le16(1);
962 rval
= qla2x00_issue_iocb_timeout(vha
, mn
, mn_dma
, 0, 120);
965 ql_log(ql_log_warn
, vha
, 0x7037,
966 "Vendor request 84xx updatefw failed.\n");
969 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
971 ql_dbg(ql_dbg_user
, vha
, 0x7038,
972 "Vendor request 84xx updatefw completed.\n");
974 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
975 bsg_job
->reply
->result
= DID_OK
;
978 bsg_job
->job_done(bsg_job
);
979 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
982 dma_free_coherent(&ha
->pdev
->dev
, data_len
, fw_buf
, fw_dma
);
985 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
986 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
992 qla84xx_mgmt_cmd(struct fc_bsg_job
*bsg_job
)
994 struct Scsi_Host
*host
= bsg_job
->shost
;
995 scsi_qla_host_t
*vha
= shost_priv(host
);
996 struct qla_hw_data
*ha
= vha
->hw
;
997 struct access_chip_84xx
*mn
= NULL
;
998 dma_addr_t mn_dma
, mgmt_dma
;
1001 struct qla_bsg_a84_mgmt
*ql84_mgmt
;
1003 uint32_t data_len
= 0;
1004 uint32_t dma_direction
= DMA_NONE
;
1006 if (!IS_QLA84XX(ha
)) {
1007 ql_log(ql_log_warn
, vha
, 0x703a,
1008 "Not 84xx, exiting.\n");
1012 ql84_mgmt
= (struct qla_bsg_a84_mgmt
*)((char *)bsg_job
->request
+
1013 sizeof(struct fc_bsg_request
));
1015 ql_log(ql_log_warn
, vha
, 0x703b,
1016 "MGMT header not provided, exiting.\n");
1020 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1022 ql_log(ql_log_warn
, vha
, 0x703c,
1023 "DMA alloc failed for fw buffer.\n");
1027 memset(mn
, 0, sizeof(struct access_chip_84xx
));
1028 mn
->entry_type
= ACCESS_CHIP_IOCB_TYPE
;
1029 mn
->entry_count
= 1;
1031 switch (ql84_mgmt
->mgmt
.cmd
) {
1032 case QLA84_MGMT_READ_MEM
:
1033 case QLA84_MGMT_GET_INFO
:
1034 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1035 bsg_job
->reply_payload
.sg_list
,
1036 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1038 ql_log(ql_log_warn
, vha
, 0x703d,
1039 "dma_map_sg returned %d for reply.\n", sg_cnt
);
1044 dma_direction
= DMA_FROM_DEVICE
;
1046 if (sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
) {
1047 ql_log(ql_log_warn
, vha
, 0x703e,
1048 "DMA mapping resulted in different sg counts, "
1049 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1050 bsg_job
->reply_payload
.sg_cnt
, sg_cnt
);
1055 data_len
= bsg_job
->reply_payload
.payload_len
;
1057 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1058 &mgmt_dma
, GFP_KERNEL
);
1060 ql_log(ql_log_warn
, vha
, 0x703f,
1061 "DMA alloc failed for mgmt_b.\n");
1066 if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) {
1067 mn
->options
= cpu_to_le16(ACO_DUMP_MEMORY
);
1070 ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1072 } else if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
) {
1073 mn
->options
= cpu_to_le16(ACO_REQUEST_INFO
);
1075 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.info
.type
);
1079 ql84_mgmt
->mgmt
.mgmtp
.u
.info
.context
);
1083 case QLA84_MGMT_WRITE_MEM
:
1084 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1085 bsg_job
->request_payload
.sg_list
,
1086 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1089 ql_log(ql_log_warn
, vha
, 0x7040,
1090 "dma_map_sg returned %d.\n", sg_cnt
);
1095 dma_direction
= DMA_TO_DEVICE
;
1097 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1098 ql_log(ql_log_warn
, vha
, 0x7041,
1099 "DMA mapping resulted in different sg counts, "
1100 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1101 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1106 data_len
= bsg_job
->request_payload
.payload_len
;
1107 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1108 &mgmt_dma
, GFP_KERNEL
);
1110 ql_log(ql_log_warn
, vha
, 0x7042,
1111 "DMA alloc failed for mgmt_b.\n");
1116 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1117 bsg_job
->request_payload
.sg_cnt
, mgmt_b
, data_len
);
1119 mn
->options
= cpu_to_le16(ACO_LOAD_MEMORY
);
1121 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1124 case QLA84_MGMT_CHNG_CONFIG
:
1125 mn
->options
= cpu_to_le16(ACO_CHANGE_CONFIG_PARAM
);
1127 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.id
);
1130 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param0
);
1133 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param1
);
1141 if (ql84_mgmt
->mgmt
.cmd
!= QLA84_MGMT_CHNG_CONFIG
) {
1142 mn
->total_byte_cnt
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1143 mn
->dseg_count
= cpu_to_le16(1);
1144 mn
->dseg_address
[0] = cpu_to_le32(LSD(mgmt_dma
));
1145 mn
->dseg_address
[1] = cpu_to_le32(MSD(mgmt_dma
));
1146 mn
->dseg_length
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1149 rval
= qla2x00_issue_iocb(vha
, mn
, mn_dma
, 0);
1152 ql_log(ql_log_warn
, vha
, 0x7043,
1153 "Vendor request 84xx mgmt failed.\n");
1156 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1159 ql_dbg(ql_dbg_user
, vha
, 0x7044,
1160 "Vendor request 84xx mgmt completed.\n");
1162 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1163 bsg_job
->reply
->result
= DID_OK
;
1165 if ((ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) ||
1166 (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
)) {
1167 bsg_job
->reply
->reply_payload_rcv_len
=
1168 bsg_job
->reply_payload
.payload_len
;
1170 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1171 bsg_job
->reply_payload
.sg_cnt
, mgmt_b
,
1176 bsg_job
->job_done(bsg_job
);
1180 dma_free_coherent(&ha
->pdev
->dev
, data_len
, mgmt_b
, mgmt_dma
);
1182 if (dma_direction
== DMA_TO_DEVICE
)
1183 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1184 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1185 else if (dma_direction
== DMA_FROM_DEVICE
)
1186 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1187 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1190 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1196 qla24xx_iidma(struct fc_bsg_job
*bsg_job
)
1198 struct Scsi_Host
*host
= bsg_job
->shost
;
1199 scsi_qla_host_t
*vha
= shost_priv(host
);
1201 struct qla_port_param
*port_param
= NULL
;
1202 fc_port_t
*fcport
= NULL
;
1203 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
1204 uint8_t *rsp_ptr
= NULL
;
1206 if (!IS_IIDMA_CAPABLE(vha
->hw
)) {
1207 ql_log(ql_log_info
, vha
, 0x7046, "iiDMA not supported.\n");
1211 port_param
= (struct qla_port_param
*)((char *)bsg_job
->request
+
1212 sizeof(struct fc_bsg_request
));
1214 ql_log(ql_log_warn
, vha
, 0x7047,
1215 "port_param header not provided.\n");
1219 if (port_param
->fc_scsi_addr
.dest_type
!= EXT_DEF_TYPE_WWPN
) {
1220 ql_log(ql_log_warn
, vha
, 0x7048,
1221 "Invalid destination type.\n");
1225 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1226 if (fcport
->port_type
!= FCT_TARGET
)
1229 if (memcmp(port_param
->fc_scsi_addr
.dest_addr
.wwpn
,
1230 fcport
->port_name
, sizeof(fcport
->port_name
)))
1236 ql_log(ql_log_warn
, vha
, 0x7049,
1237 "Failed to find port.\n");
1241 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1242 ql_log(ql_log_warn
, vha
, 0x704a,
1243 "Port is not online.\n");
1247 if (fcport
->flags
& FCF_LOGIN_NEEDED
) {
1248 ql_log(ql_log_warn
, vha
, 0x704b,
1249 "Remote port not logged in flags = 0x%x.\n", fcport
->flags
);
1253 if (port_param
->mode
)
1254 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
,
1255 port_param
->speed
, mb
);
1257 rval
= qla2x00_get_idma_speed(vha
, fcport
->loop_id
,
1258 &port_param
->speed
, mb
);
1261 ql_log(ql_log_warn
, vha
, 0x704c,
1262 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1263 "%04x %x %04x %04x.\n", fcport
->port_name
[0],
1264 fcport
->port_name
[1], fcport
->port_name
[2],
1265 fcport
->port_name
[3], fcport
->port_name
[4],
1266 fcport
->port_name
[5], fcport
->port_name
[6],
1267 fcport
->port_name
[7], rval
, fcport
->fp_speed
, mb
[0], mb
[1]);
1269 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1272 if (!port_param
->mode
) {
1273 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
1274 sizeof(struct qla_port_param
);
1276 rsp_ptr
= ((uint8_t *)bsg_job
->reply
) +
1277 sizeof(struct fc_bsg_reply
);
1279 memcpy(rsp_ptr
, port_param
,
1280 sizeof(struct qla_port_param
));
1283 bsg_job
->reply
->result
= DID_OK
;
1286 bsg_job
->job_done(bsg_job
);
1291 qla2x00_optrom_setup(struct fc_bsg_job
*bsg_job
, scsi_qla_host_t
*vha
,
1296 struct qla_hw_data
*ha
= vha
->hw
;
1298 if (unlikely(pci_channel_offline(ha
->pdev
)))
1301 start
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1302 if (start
> ha
->optrom_size
) {
1303 ql_log(ql_log_warn
, vha
, 0x7055,
1304 "start %d > optrom_size %d.\n", start
, ha
->optrom_size
);
1308 if (ha
->optrom_state
!= QLA_SWAITING
) {
1309 ql_log(ql_log_info
, vha
, 0x7056,
1310 "optrom_state %d.\n", ha
->optrom_state
);
1314 ha
->optrom_region_start
= start
;
1315 ql_dbg(ql_dbg_user
, vha
, 0x7057, "is_update=%d.\n", is_update
);
1317 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
1319 else if (start
== (ha
->flt_region_boot
* 4) ||
1320 start
== (ha
->flt_region_fw
* 4))
1322 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) ||
1323 IS_CNA_CAPABLE(ha
) || IS_QLA2031(ha
))
1326 ql_log(ql_log_warn
, vha
, 0x7058,
1327 "Invalid start region 0x%x/0x%x.\n", start
,
1328 bsg_job
->request_payload
.payload_len
);
1332 ha
->optrom_region_size
= start
+
1333 bsg_job
->request_payload
.payload_len
> ha
->optrom_size
?
1334 ha
->optrom_size
- start
:
1335 bsg_job
->request_payload
.payload_len
;
1336 ha
->optrom_state
= QLA_SWRITING
;
1338 ha
->optrom_region_size
= start
+
1339 bsg_job
->reply_payload
.payload_len
> ha
->optrom_size
?
1340 ha
->optrom_size
- start
:
1341 bsg_job
->reply_payload
.payload_len
;
1342 ha
->optrom_state
= QLA_SREADING
;
1345 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
1346 if (!ha
->optrom_buffer
) {
1347 ql_log(ql_log_warn
, vha
, 0x7059,
1348 "Read: Unable to allocate memory for optrom retrieval "
1349 "(%x)\n", ha
->optrom_region_size
);
1351 ha
->optrom_state
= QLA_SWAITING
;
1355 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
1360 qla2x00_read_optrom(struct fc_bsg_job
*bsg_job
)
1362 struct Scsi_Host
*host
= bsg_job
->shost
;
1363 scsi_qla_host_t
*vha
= shost_priv(host
);
1364 struct qla_hw_data
*ha
= vha
->hw
;
1367 if (ha
->flags
.isp82xx_reset_hdlr_active
)
1370 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 0);
1374 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
1375 ha
->optrom_region_start
, ha
->optrom_region_size
);
1377 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1378 bsg_job
->reply_payload
.sg_cnt
, ha
->optrom_buffer
,
1379 ha
->optrom_region_size
);
1381 bsg_job
->reply
->reply_payload_rcv_len
= ha
->optrom_region_size
;
1382 bsg_job
->reply
->result
= DID_OK
;
1383 vfree(ha
->optrom_buffer
);
1384 ha
->optrom_buffer
= NULL
;
1385 ha
->optrom_state
= QLA_SWAITING
;
1386 bsg_job
->job_done(bsg_job
);
1391 qla2x00_update_optrom(struct fc_bsg_job
*bsg_job
)
1393 struct Scsi_Host
*host
= bsg_job
->shost
;
1394 scsi_qla_host_t
*vha
= shost_priv(host
);
1395 struct qla_hw_data
*ha
= vha
->hw
;
1398 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 1);
1402 /* Set the isp82xx_no_md_cap not to capture minidump */
1403 ha
->flags
.isp82xx_no_md_cap
= 1;
1405 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1406 bsg_job
->request_payload
.sg_cnt
, ha
->optrom_buffer
,
1407 ha
->optrom_region_size
);
1409 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
1410 ha
->optrom_region_start
, ha
->optrom_region_size
);
1412 bsg_job
->reply
->result
= DID_OK
;
1413 vfree(ha
->optrom_buffer
);
1414 ha
->optrom_buffer
= NULL
;
1415 ha
->optrom_state
= QLA_SWAITING
;
1416 bsg_job
->job_done(bsg_job
);
1421 qla2x00_update_fru_versions(struct fc_bsg_job
*bsg_job
)
1423 struct Scsi_Host
*host
= bsg_job
->shost
;
1424 scsi_qla_host_t
*vha
= shost_priv(host
);
1425 struct qla_hw_data
*ha
= vha
->hw
;
1427 uint8_t bsg
[DMA_POOL_SIZE
];
1428 struct qla_image_version_list
*list
= (void *)bsg
;
1429 struct qla_image_version
*image
;
1432 void *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1434 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1435 EXT_STATUS_NO_MEMORY
;
1439 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1440 bsg_job
->request_payload
.sg_cnt
, list
, sizeof(bsg
));
1442 image
= list
->version
;
1443 count
= list
->count
;
1445 memcpy(sfp
, &image
->field_info
, sizeof(image
->field_info
));
1446 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1447 image
->field_address
.device
, image
->field_address
.offset
,
1448 sizeof(image
->field_info
), image
->field_address
.option
);
1450 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1457 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1460 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1463 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1464 bsg_job
->reply
->result
= DID_OK
<< 16;
1465 bsg_job
->job_done(bsg_job
);
1471 qla2x00_read_fru_status(struct fc_bsg_job
*bsg_job
)
1473 struct Scsi_Host
*host
= bsg_job
->shost
;
1474 scsi_qla_host_t
*vha
= shost_priv(host
);
1475 struct qla_hw_data
*ha
= vha
->hw
;
1477 uint8_t bsg
[DMA_POOL_SIZE
];
1478 struct qla_status_reg
*sr
= (void *)bsg
;
1480 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1482 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1483 EXT_STATUS_NO_MEMORY
;
1487 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1488 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1490 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1491 sr
->field_address
.device
, sr
->field_address
.offset
,
1492 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1493 sr
->status_reg
= *sfp
;
1496 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1501 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1502 bsg_job
->reply_payload
.sg_cnt
, sr
, sizeof(*sr
));
1504 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1507 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1510 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1511 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(*sr
);
1512 bsg_job
->reply
->result
= DID_OK
<< 16;
1513 bsg_job
->job_done(bsg_job
);
1519 qla2x00_write_fru_status(struct fc_bsg_job
*bsg_job
)
1521 struct Scsi_Host
*host
= bsg_job
->shost
;
1522 scsi_qla_host_t
*vha
= shost_priv(host
);
1523 struct qla_hw_data
*ha
= vha
->hw
;
1525 uint8_t bsg
[DMA_POOL_SIZE
];
1526 struct qla_status_reg
*sr
= (void *)bsg
;
1528 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1530 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1531 EXT_STATUS_NO_MEMORY
;
1535 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1536 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1538 *sfp
= sr
->status_reg
;
1539 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1540 sr
->field_address
.device
, sr
->field_address
.offset
,
1541 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1544 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1549 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1552 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1555 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1556 bsg_job
->reply
->result
= DID_OK
<< 16;
1557 bsg_job
->job_done(bsg_job
);
1563 qla2x00_process_vendor_specific(struct fc_bsg_job
*bsg_job
)
1565 switch (bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
1566 case QL_VND_LOOPBACK
:
1567 return qla2x00_process_loopback(bsg_job
);
1569 case QL_VND_A84_RESET
:
1570 return qla84xx_reset(bsg_job
);
1572 case QL_VND_A84_UPDATE_FW
:
1573 return qla84xx_updatefw(bsg_job
);
1575 case QL_VND_A84_MGMT_CMD
:
1576 return qla84xx_mgmt_cmd(bsg_job
);
1579 return qla24xx_iidma(bsg_job
);
1581 case QL_VND_FCP_PRIO_CFG_CMD
:
1582 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job
);
1584 case QL_VND_READ_FLASH
:
1585 return qla2x00_read_optrom(bsg_job
);
1587 case QL_VND_UPDATE_FLASH
:
1588 return qla2x00_update_optrom(bsg_job
);
1590 case QL_VND_SET_FRU_VERSION
:
1591 return qla2x00_update_fru_versions(bsg_job
);
1593 case QL_VND_READ_FRU_STATUS
:
1594 return qla2x00_read_fru_status(bsg_job
);
1596 case QL_VND_WRITE_FRU_STATUS
:
1597 return qla2x00_write_fru_status(bsg_job
);
1600 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1601 bsg_job
->job_done(bsg_job
);
1607 qla24xx_bsg_request(struct fc_bsg_job
*bsg_job
)
1610 struct fc_rport
*rport
;
1611 fc_port_t
*fcport
= NULL
;
1612 struct Scsi_Host
*host
;
1613 scsi_qla_host_t
*vha
;
1615 /* In case no data transferred. */
1616 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1618 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
1619 rport
= bsg_job
->rport
;
1620 fcport
= *(fc_port_t
**) rport
->dd_data
;
1621 host
= rport_to_shost(rport
);
1622 vha
= shost_priv(host
);
1624 host
= bsg_job
->shost
;
1625 vha
= shost_priv(host
);
1628 if (qla2x00_reset_active(vha
)) {
1629 ql_dbg(ql_dbg_user
, vha
, 0x709f,
1630 "BSG: ISP abort active/needed -- cmd=%d.\n",
1631 bsg_job
->request
->msgcode
);
1632 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1633 bsg_job
->job_done(bsg_job
);
1637 ql_dbg(ql_dbg_user
, vha
, 0x7000,
1638 "Entered %s msgcode=0x%x.\n", __func__
, bsg_job
->request
->msgcode
);
1640 switch (bsg_job
->request
->msgcode
) {
1641 case FC_BSG_RPT_ELS
:
1642 case FC_BSG_HST_ELS_NOLOGIN
:
1643 ret
= qla2x00_process_els(bsg_job
);
1646 ret
= qla2x00_process_ct(bsg_job
);
1648 case FC_BSG_HST_VENDOR
:
1649 ret
= qla2x00_process_vendor_specific(bsg_job
);
1651 case FC_BSG_HST_ADD_RPORT
:
1652 case FC_BSG_HST_DEL_RPORT
:
1655 ql_log(ql_log_warn
, vha
, 0x705a, "Unsupported BSG request.\n");
1656 bsg_job
->reply
->result
= ret
;
1663 qla24xx_bsg_timeout(struct fc_bsg_job
*bsg_job
)
1665 scsi_qla_host_t
*vha
= shost_priv(bsg_job
->shost
);
1666 struct qla_hw_data
*ha
= vha
->hw
;
1669 unsigned long flags
;
1670 struct req_que
*req
;
1672 /* find the bsg job from the active list of commands */
1673 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1674 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
1675 req
= ha
->req_q_map
[que
];
1679 for (cnt
= 1; cnt
< MAX_OUTSTANDING_COMMANDS
; cnt
++) {
1680 sp
= req
->outstanding_cmds
[cnt
];
1682 if (((sp
->type
== SRB_CT_CMD
) ||
1683 (sp
->type
== SRB_ELS_CMD_HST
))
1684 && (sp
->u
.bsg_job
== bsg_job
)) {
1685 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1686 if (ha
->isp_ops
->abort_command(sp
)) {
1687 ql_log(ql_log_warn
, vha
, 0x7089,
1688 "mbx abort_command "
1690 bsg_job
->req
->errors
=
1691 bsg_job
->reply
->result
= -EIO
;
1693 ql_dbg(ql_dbg_user
, vha
, 0x708a,
1694 "mbx abort_command "
1696 bsg_job
->req
->errors
=
1697 bsg_job
->reply
->result
= 0;
1699 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1705 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1706 ql_log(ql_log_info
, vha
, 0x708b, "SRB not found to abort.\n");
1707 bsg_job
->req
->errors
= bsg_job
->reply
->result
= -ENXIO
;
1711 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1712 if (bsg_job
->request
->msgcode
== FC_BSG_HST_CT
)
1714 mempool_free(sp
, ha
->srb_mempool
);