2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
13 /* BSG support for ELS/CT pass through */
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t
*vha
, fc_port_t
*fcport
, size_t size
)
18 struct qla_hw_data
*ha
= vha
->hw
;
21 sp
= mempool_alloc(ha
->srb_mempool
, GFP_KERNEL
);
24 ctx
= kzalloc(size
, GFP_KERNEL
);
26 mempool_free(sp
, ha
->srb_mempool
);
31 memset(sp
, 0, sizeof(*sp
));
39 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t
*vha
,
40 struct qla_fcp_prio_cfg
*pri_cfg
, uint8_t flag
)
42 int i
, ret
, num_valid
;
44 struct qla_fcp_prio_entry
*pri_entry
;
45 uint32_t *bcode_val_ptr
, bcode_val
;
49 bcode
= (uint8_t *)pri_cfg
;
50 bcode_val_ptr
= (uint32_t *)pri_cfg
;
51 bcode_val
= (uint32_t)(*bcode_val_ptr
);
53 if (bcode_val
== 0xFFFFFFFF) {
54 /* No FCP Priority config data in flash */
55 ql_dbg(ql_dbg_user
, vha
, 0x7051,
56 "No FCP Priority config data.\n");
60 if (bcode
[0] != 'H' || bcode
[1] != 'Q' || bcode
[2] != 'O' ||
62 /* Invalid FCP priority data header*/
63 ql_dbg(ql_dbg_user
, vha
, 0x7052,
64 "Invalid FCP Priority data header. bcode=0x%x.\n",
71 pri_entry
= &pri_cfg
->entry
[0];
72 for (i
= 0; i
< pri_cfg
->num_entries
; i
++) {
73 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
79 /* No valid FCP priority data entries */
80 ql_dbg(ql_dbg_user
, vha
, 0x7053,
81 "No valid FCP Priority data entries.\n");
84 /* FCP priority data is valid */
85 ql_dbg(ql_dbg_user
, vha
, 0x7054,
86 "Valid FCP priority data. num entries = %d.\n",
94 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job
*bsg_job
)
96 struct Scsi_Host
*host
= bsg_job
->shost
;
97 scsi_qla_host_t
*vha
= shost_priv(host
);
98 struct qla_hw_data
*ha
= vha
->hw
;
103 bsg_job
->reply
->reply_payload_rcv_len
= 0;
105 if (!(IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
))) {
107 goto exit_fcp_prio_cfg
;
110 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
111 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
112 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
114 goto exit_fcp_prio_cfg
;
117 /* Get the sub command */
118 oper
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
120 /* Only set config is allowed if config memory is not allocated */
121 if (!ha
->fcp_prio_cfg
&& (oper
!= QLFC_FCP_PRIO_SET_CONFIG
)) {
123 goto exit_fcp_prio_cfg
;
126 case QLFC_FCP_PRIO_DISABLE
:
127 if (ha
->flags
.fcp_prio_enabled
) {
128 ha
->flags
.fcp_prio_enabled
= 0;
129 ha
->fcp_prio_cfg
->attributes
&=
130 ~FCP_PRIO_ATTR_ENABLE
;
131 qla24xx_update_all_fcp_prio(vha
);
132 bsg_job
->reply
->result
= DID_OK
;
135 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
136 goto exit_fcp_prio_cfg
;
140 case QLFC_FCP_PRIO_ENABLE
:
141 if (!ha
->flags
.fcp_prio_enabled
) {
142 if (ha
->fcp_prio_cfg
) {
143 ha
->flags
.fcp_prio_enabled
= 1;
144 ha
->fcp_prio_cfg
->attributes
|=
145 FCP_PRIO_ATTR_ENABLE
;
146 qla24xx_update_all_fcp_prio(vha
);
147 bsg_job
->reply
->result
= DID_OK
;
150 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
151 goto exit_fcp_prio_cfg
;
156 case QLFC_FCP_PRIO_GET_CONFIG
:
157 len
= bsg_job
->reply_payload
.payload_len
;
158 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
160 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
161 goto exit_fcp_prio_cfg
;
164 bsg_job
->reply
->result
= DID_OK
;
165 bsg_job
->reply
->reply_payload_rcv_len
=
167 bsg_job
->reply_payload
.sg_list
,
168 bsg_job
->reply_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
173 case QLFC_FCP_PRIO_SET_CONFIG
:
174 len
= bsg_job
->request_payload
.payload_len
;
175 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
176 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
178 goto exit_fcp_prio_cfg
;
181 if (!ha
->fcp_prio_cfg
) {
182 ha
->fcp_prio_cfg
= vmalloc(FCP_PRIO_CFG_SIZE
);
183 if (!ha
->fcp_prio_cfg
) {
184 ql_log(ql_log_warn
, vha
, 0x7050,
185 "Unable to allocate memory for fcp prio "
186 "config data (%x).\n", FCP_PRIO_CFG_SIZE
);
187 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
189 goto exit_fcp_prio_cfg
;
193 memset(ha
->fcp_prio_cfg
, 0, FCP_PRIO_CFG_SIZE
);
194 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
195 bsg_job
->request_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
198 /* validate fcp priority data */
200 if (!qla24xx_fcp_prio_cfg_valid(vha
,
201 (struct qla_fcp_prio_cfg
*) ha
->fcp_prio_cfg
, 1)) {
202 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
204 /* If buffer was invalidatic int
205 * fcp_prio_cfg is of no use
207 vfree(ha
->fcp_prio_cfg
);
208 ha
->fcp_prio_cfg
= NULL
;
209 goto exit_fcp_prio_cfg
;
212 ha
->flags
.fcp_prio_enabled
= 0;
213 if (ha
->fcp_prio_cfg
->attributes
& FCP_PRIO_ATTR_ENABLE
)
214 ha
->flags
.fcp_prio_enabled
= 1;
215 qla24xx_update_all_fcp_prio(vha
);
216 bsg_job
->reply
->result
= DID_OK
;
223 bsg_job
->job_done(bsg_job
);
227 qla2x00_process_els(struct fc_bsg_job
*bsg_job
)
229 struct fc_rport
*rport
;
230 fc_port_t
*fcport
= NULL
;
231 struct Scsi_Host
*host
;
232 scsi_qla_host_t
*vha
;
233 struct qla_hw_data
*ha
;
236 int req_sg_cnt
, rsp_sg_cnt
;
237 int rval
= (DRIVER_ERROR
<< 16);
238 uint16_t nextlid
= 0;
241 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
242 rport
= bsg_job
->rport
;
243 fcport
= *(fc_port_t
**) rport
->dd_data
;
244 host
= rport_to_shost(rport
);
245 vha
= shost_priv(host
);
247 type
= "FC_BSG_RPT_ELS";
249 host
= bsg_job
->shost
;
250 vha
= shost_priv(host
);
252 type
= "FC_BSG_HST_ELS_NOLOGIN";
255 /* pass through is supported only for ISP 4Gb or higher */
256 if (!IS_FWI2_CAPABLE(ha
)) {
257 ql_dbg(ql_dbg_user
, vha
, 0x7001,
258 "ELS passthru not supported for ISP23xx based adapters.\n");
263 /* Multiple SG's are not supported for ELS requests */
264 if (bsg_job
->request_payload
.sg_cnt
> 1 ||
265 bsg_job
->reply_payload
.sg_cnt
> 1) {
266 ql_dbg(ql_dbg_user
, vha
, 0x7002,
267 "Multiple SG's are not suppored for ELS requests, "
268 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
269 bsg_job
->request_payload
.sg_cnt
,
270 bsg_job
->reply_payload
.sg_cnt
);
275 /* ELS request for rport */
276 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
277 /* make sure the rport is logged in,
278 * if not perform fabric login
280 if (qla2x00_fabric_login(vha
, fcport
, &nextlid
)) {
281 ql_dbg(ql_dbg_user
, vha
, 0x7003,
282 "Failed to login port %06X for ELS passthru.\n",
288 /* Allocate a dummy fcport structure, since functions
289 * preparing the IOCB and mailbox command retrieves port
290 * specific information from fcport structure. For Host based
291 * ELS commands there will be no fcport structure allocated
293 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
299 /* Initialize all required fields of fcport */
301 fcport
->vp_idx
= vha
->vp_idx
;
302 fcport
->d_id
.b
.al_pa
=
303 bsg_job
->request
->rqst_data
.h_els
.port_id
[0];
304 fcport
->d_id
.b
.area
=
305 bsg_job
->request
->rqst_data
.h_els
.port_id
[1];
306 fcport
->d_id
.b
.domain
=
307 bsg_job
->request
->rqst_data
.h_els
.port_id
[2];
309 (fcport
->d_id
.b
.al_pa
== 0xFD) ?
310 NPH_FABRIC_CONTROLLER
: NPH_F_PORT
;
313 if (!vha
->flags
.online
) {
314 ql_log(ql_log_warn
, vha
, 0x7005, "Host not online.\n");
320 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
321 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
324 goto done_free_fcport
;
327 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
328 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
331 goto done_free_fcport
;
334 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
335 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
336 ql_log(ql_log_warn
, vha
, 0x7008,
337 "dma mapping resulted in different sg counts, "
338 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
339 "dma_reply_sg_cnt:%x.\n", bsg_job
->request_payload
.sg_cnt
,
340 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
345 /* Alloc SRB structure */
346 sp
= qla2x00_get_ctx_bsg_sp(vha
, fcport
, sizeof(struct srb_ctx
));
354 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
355 SRB_ELS_CMD_RPT
: SRB_ELS_CMD_HST
);
357 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
358 "bsg_els_rpt" : "bsg_els_hst");
359 els
->u
.bsg_job
= bsg_job
;
361 ql_dbg(ql_dbg_user
, vha
, 0x700a,
362 "bsg rqst type: %s els type: %x - loop-id=%x "
363 "portid=%-2x%02x%02x.\n", type
,
364 bsg_job
->request
->rqst_data
.h_els
.command_code
, fcport
->loop_id
,
365 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
367 rval
= qla2x00_start_sp(sp
);
368 if (rval
!= QLA_SUCCESS
) {
369 ql_log(ql_log_warn
, vha
, 0x700e,
370 "qla2x00_start_sp failed = %d\n", rval
);
372 mempool_free(sp
, ha
->srb_mempool
);
379 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
380 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
381 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
382 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
383 goto done_free_fcport
;
386 if (bsg_job
->request
->msgcode
== FC_BSG_HST_ELS_NOLOGIN
)
393 qla2x00_process_ct(struct fc_bsg_job
*bsg_job
)
396 struct Scsi_Host
*host
= bsg_job
->shost
;
397 scsi_qla_host_t
*vha
= shost_priv(host
);
398 struct qla_hw_data
*ha
= vha
->hw
;
399 int rval
= (DRIVER_ERROR
<< 16);
400 int req_sg_cnt
, rsp_sg_cnt
;
402 struct fc_port
*fcport
;
403 char *type
= "FC_BSG_HST_CT";
407 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
408 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
410 ql_log(ql_log_warn
, vha
, 0x700f,
411 "dma_map_sg return %d for request\n", req_sg_cnt
);
416 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
417 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
419 ql_log(ql_log_warn
, vha
, 0x7010,
420 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
425 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
426 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
427 ql_log(ql_log_warn
, vha
, 0x7011,
428 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
430 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
435 if (!vha
->flags
.online
) {
436 ql_log(ql_log_warn
, vha
, 0x7012,
437 "Host is not online.\n");
443 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word1
& 0xFF000000)
447 loop_id
= cpu_to_le16(NPH_SNS
);
450 loop_id
= vha
->mgmt_svr_loop_id
;
453 ql_dbg(ql_dbg_user
, vha
, 0x7013,
454 "Unknown loop id: %x.\n", loop_id
);
459 /* Allocate a dummy fcport structure, since functions preparing the
460 * IOCB and mailbox command retrieves port specific information
461 * from fcport structure. For Host based ELS commands there will be
462 * no fcport structure allocated
464 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
466 ql_log(ql_log_warn
, vha
, 0x7014,
467 "Failed to allocate fcport.\n");
472 /* Initialize all required fields of fcport */
474 fcport
->vp_idx
= vha
->vp_idx
;
475 fcport
->d_id
.b
.al_pa
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[0];
476 fcport
->d_id
.b
.area
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[1];
477 fcport
->d_id
.b
.domain
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[2];
478 fcport
->loop_id
= loop_id
;
480 /* Alloc SRB structure */
481 sp
= qla2x00_get_ctx_bsg_sp(vha
, fcport
, sizeof(struct srb_ctx
));
483 ql_log(ql_log_warn
, vha
, 0x7015,
484 "qla2x00_get_ctx_bsg_sp failed.\n");
486 goto done_free_fcport
;
490 ct
->type
= SRB_CT_CMD
;
492 ct
->u
.bsg_job
= bsg_job
;
494 ql_dbg(ql_dbg_user
, vha
, 0x7016,
495 "bsg rqst type: %s else type: %x - "
496 "loop-id=%x portid=%02x%02x%02x.\n", type
,
497 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word2
>> 16),
498 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
499 fcport
->d_id
.b
.al_pa
);
501 rval
= qla2x00_start_sp(sp
);
502 if (rval
!= QLA_SUCCESS
) {
503 ql_log(ql_log_warn
, vha
, 0x7017,
504 "qla2x00_start_sp failed=%d.\n", rval
);
506 mempool_free(sp
, ha
->srb_mempool
);
508 goto done_free_fcport
;
515 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
516 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
517 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
518 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
523 /* Set the port configuration to enable the
524 * internal loopback on ISP81XX
527 qla81xx_set_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
528 uint16_t *new_config
)
532 struct qla_hw_data
*ha
= vha
->hw
;
535 goto done_set_internal
;
537 new_config
[0] = config
[0] | (ENABLE_INTERNAL_LOOPBACK
<< 1);
538 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
540 ha
->notify_dcbx_comp
= 1;
541 ret
= qla81xx_set_port_config(vha
, new_config
);
542 if (ret
!= QLA_SUCCESS
) {
543 ql_log(ql_log_warn
, vha
, 0x7021,
544 "set port config failed.\n");
545 ha
->notify_dcbx_comp
= 0;
547 goto done_set_internal
;
550 /* Wait for DCBX complete event */
551 if (!wait_for_completion_timeout(&ha
->dcbx_comp
, (20 * HZ
))) {
552 ql_dbg(ql_dbg_user
, vha
, 0x7022,
553 "State change notification not received.\n");
555 ql_dbg(ql_dbg_user
, vha
, 0x7023,
556 "State change received.\n");
558 ha
->notify_dcbx_comp
= 0;
564 /* Set the port configuration to disable the
565 * internal loopback on ISP81XX
568 qla81xx_reset_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
573 uint16_t new_config
[4];
574 struct qla_hw_data
*ha
= vha
->hw
;
577 goto done_reset_internal
;
579 memset(new_config
, 0 , sizeof(new_config
));
580 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
581 ENABLE_INTERNAL_LOOPBACK
) {
582 new_config
[0] = config
[0] & ~INTERNAL_LOOPBACK_MASK
;
583 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
585 ha
->notify_dcbx_comp
= wait
;
586 ret
= qla81xx_set_port_config(vha
, new_config
);
587 if (ret
!= QLA_SUCCESS
) {
588 ql_log(ql_log_warn
, vha
, 0x7025,
589 "Set port config failed.\n");
590 ha
->notify_dcbx_comp
= 0;
592 goto done_reset_internal
;
595 /* Wait for DCBX complete event */
596 if (wait
&& !wait_for_completion_timeout(&ha
->dcbx_comp
,
598 ql_dbg(ql_dbg_user
, vha
, 0x7026,
599 "State change notification not received.\n");
600 ha
->notify_dcbx_comp
= 0;
602 goto done_reset_internal
;
604 ql_dbg(ql_dbg_user
, vha
, 0x7027,
605 "State change received.\n");
607 ha
->notify_dcbx_comp
= 0;
614 qla2x00_process_loopback(struct fc_bsg_job
*bsg_job
)
616 struct Scsi_Host
*host
= bsg_job
->shost
;
617 scsi_qla_host_t
*vha
= shost_priv(host
);
618 struct qla_hw_data
*ha
= vha
->hw
;
620 uint8_t command_sent
;
622 struct msg_echo_lb elreq
;
623 uint16_t response
[MAILBOX_REGISTER_COUNT
];
624 uint16_t config
[4], new_config
[4];
626 uint8_t *req_data
= NULL
;
627 dma_addr_t req_data_dma
;
628 uint32_t req_data_len
;
629 uint8_t *rsp_data
= NULL
;
630 dma_addr_t rsp_data_dma
;
631 uint32_t rsp_data_len
;
633 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
634 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
635 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
636 ql_log(ql_log_warn
, vha
, 0x7018, "Abort active or needed.\n");
640 if (!vha
->flags
.online
) {
641 ql_log(ql_log_warn
, vha
, 0x7019, "Host is not online.\n");
645 elreq
.req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
646 bsg_job
->request_payload
.sg_list
, bsg_job
->request_payload
.sg_cnt
,
649 if (!elreq
.req_sg_cnt
) {
650 ql_log(ql_log_warn
, vha
, 0x701a,
651 "dma_map_sg returned %d for request.\n", elreq
.req_sg_cnt
);
655 elreq
.rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
656 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
659 if (!elreq
.rsp_sg_cnt
) {
660 ql_log(ql_log_warn
, vha
, 0x701b,
661 "dma_map_sg returned %d for reply.\n", elreq
.rsp_sg_cnt
);
663 goto done_unmap_req_sg
;
666 if ((elreq
.req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
667 (elreq
.rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
668 ql_log(ql_log_warn
, vha
, 0x701c,
669 "dma mapping resulted in different sg counts, "
670 "request_sg_cnt: %x dma_request_sg_cnt: %x "
671 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
672 bsg_job
->request_payload
.sg_cnt
, elreq
.req_sg_cnt
,
673 bsg_job
->reply_payload
.sg_cnt
, elreq
.rsp_sg_cnt
);
677 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
678 req_data
= dma_alloc_coherent(&ha
->pdev
->dev
, req_data_len
,
679 &req_data_dma
, GFP_KERNEL
);
681 ql_log(ql_log_warn
, vha
, 0x701d,
682 "dma alloc failed for req_data.\n");
687 rsp_data
= dma_alloc_coherent(&ha
->pdev
->dev
, rsp_data_len
,
688 &rsp_data_dma
, GFP_KERNEL
);
690 ql_log(ql_log_warn
, vha
, 0x7004,
691 "dma alloc failed for rsp_data.\n");
693 goto done_free_dma_req
;
696 /* Copy the request buffer in req_data now */
697 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
698 bsg_job
->request_payload
.sg_cnt
, req_data
, req_data_len
);
700 elreq
.send_dma
= req_data_dma
;
701 elreq
.rcv_dma
= rsp_data_dma
;
702 elreq
.transfer_size
= req_data_len
;
704 elreq
.options
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
706 if ((ha
->current_topology
== ISP_CFG_F
||
708 le32_to_cpu(*(uint32_t *)req_data
) == ELS_OPCODE_BYTE
709 && req_data_len
== MAX_ELS_FRAME_PAYLOAD
)) &&
710 elreq
.options
== EXTERNAL_LOOPBACK
) {
711 type
= "FC_BSG_HST_VENDOR_ECHO_DIAG";
712 ql_dbg(ql_dbg_user
, vha
, 0x701e,
713 "BSG request type: %s.\n", type
);
714 command_sent
= INT_DEF_LB_ECHO_CMD
;
715 rval
= qla2x00_echo_test(vha
, &elreq
, response
);
717 if (IS_QLA81XX(ha
)) {
718 memset(config
, 0, sizeof(config
));
719 memset(new_config
, 0, sizeof(new_config
));
720 if (qla81xx_get_port_config(vha
, config
)) {
721 ql_log(ql_log_warn
, vha
, 0x701f,
722 "Get port config failed.\n");
723 bsg_job
->reply
->reply_payload_rcv_len
= 0;
724 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
726 goto done_free_dma_req
;
729 if (elreq
.options
!= EXTERNAL_LOOPBACK
) {
730 ql_dbg(ql_dbg_user
, vha
, 0x7020,
731 "Internal: curent port config = %x\n",
733 if (qla81xx_set_internal_loopback(vha
, config
,
735 ql_log(ql_log_warn
, vha
, 0x7024,
736 "Internal loopback failed.\n");
737 bsg_job
->reply
->reply_payload_rcv_len
=
739 bsg_job
->reply
->result
=
742 goto done_free_dma_req
;
745 /* For external loopback to work
746 * ensure internal loopback is disabled
748 if (qla81xx_reset_internal_loopback(vha
,
750 bsg_job
->reply
->reply_payload_rcv_len
=
752 bsg_job
->reply
->result
=
755 goto done_free_dma_req
;
759 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
760 ql_dbg(ql_dbg_user
, vha
, 0x7028,
761 "BSG request type: %s.\n", type
);
763 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
764 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
767 /* Revert back to original port config
768 * Also clear internal loopback
770 qla81xx_reset_internal_loopback(vha
,
774 if (response
[0] == MBS_COMMAND_ERROR
&&
775 response
[1] == MBS_LB_RESET
) {
776 ql_log(ql_log_warn
, vha
, 0x7029,
777 "MBX command error, Aborting ISP.\n");
778 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
779 qla2xxx_wake_dpc(vha
);
780 qla2x00_wait_for_chip_reset(vha
);
781 /* Also reset the MPI */
782 if (qla81xx_restart_mpi_firmware(vha
) !=
784 ql_log(ql_log_warn
, vha
, 0x702a,
785 "MPI reset failed.\n");
788 bsg_job
->reply
->reply_payload_rcv_len
= 0;
789 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
791 goto done_free_dma_req
;
794 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
795 ql_dbg(ql_dbg_user
, vha
, 0x702b,
796 "BSG request type: %s.\n", type
);
797 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
798 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
803 ql_log(ql_log_warn
, vha
, 0x702c,
804 "Vendor request %s failed.\n", type
);
806 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
807 sizeof(struct fc_bsg_reply
);
809 memcpy(fw_sts_ptr
, response
, sizeof(response
));
810 fw_sts_ptr
+= sizeof(response
);
811 *fw_sts_ptr
= command_sent
;
813 bsg_job
->reply
->reply_payload_rcv_len
= 0;
814 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
816 ql_dbg(ql_dbg_user
, vha
, 0x702d,
817 "Vendor request %s completed.\n", type
);
819 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
820 sizeof(response
) + sizeof(uint8_t);
821 bsg_job
->reply
->reply_payload_rcv_len
=
822 bsg_job
->reply_payload
.payload_len
;
823 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
824 sizeof(struct fc_bsg_reply
);
825 memcpy(fw_sts_ptr
, response
, sizeof(response
));
826 fw_sts_ptr
+= sizeof(response
);
827 *fw_sts_ptr
= command_sent
;
828 bsg_job
->reply
->result
= DID_OK
;
829 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
830 bsg_job
->reply_payload
.sg_cnt
, rsp_data
,
833 bsg_job
->job_done(bsg_job
);
835 dma_free_coherent(&ha
->pdev
->dev
, rsp_data_len
,
836 rsp_data
, rsp_data_dma
);
838 dma_free_coherent(&ha
->pdev
->dev
, req_data_len
,
839 req_data
, req_data_dma
);
841 dma_unmap_sg(&ha
->pdev
->dev
,
842 bsg_job
->reply_payload
.sg_list
,
843 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
845 dma_unmap_sg(&ha
->pdev
->dev
,
846 bsg_job
->request_payload
.sg_list
,
847 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
852 qla84xx_reset(struct fc_bsg_job
*bsg_job
)
854 struct Scsi_Host
*host
= bsg_job
->shost
;
855 scsi_qla_host_t
*vha
= shost_priv(host
);
856 struct qla_hw_data
*ha
= vha
->hw
;
860 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
861 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
862 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
863 ql_log(ql_log_warn
, vha
, 0x702e, "Abort active or needed.\n");
867 if (!IS_QLA84XX(ha
)) {
868 ql_dbg(ql_dbg_user
, vha
, 0x702f, "Not 84xx, exiting.\n");
872 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
874 rval
= qla84xx_reset_chip(vha
, flag
== A84_ISSUE_RESET_DIAG_FW
);
877 ql_log(ql_log_warn
, vha
, 0x7030,
878 "Vendor request 84xx reset failed.\n");
879 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
880 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
883 ql_dbg(ql_dbg_user
, vha
, 0x7031,
884 "Vendor request 84xx reset completed.\n");
885 bsg_job
->reply
->result
= DID_OK
;
888 bsg_job
->job_done(bsg_job
);
893 qla84xx_updatefw(struct fc_bsg_job
*bsg_job
)
895 struct Scsi_Host
*host
= bsg_job
->shost
;
896 scsi_qla_host_t
*vha
= shost_priv(host
);
897 struct qla_hw_data
*ha
= vha
->hw
;
898 struct verify_chip_entry_84xx
*mn
= NULL
;
899 dma_addr_t mn_dma
, fw_dma
;
908 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
909 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
910 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
))
913 if (!IS_QLA84XX(ha
)) {
914 ql_dbg(ql_dbg_user
, vha
, 0x7032,
915 "Not 84xx, exiting.\n");
919 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
920 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
922 ql_log(ql_log_warn
, vha
, 0x7033,
923 "dma_map_sg returned %d for request.\n", sg_cnt
);
927 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
928 ql_log(ql_log_warn
, vha
, 0x7034,
929 "DMA mapping resulted in different sg counts, "
930 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
931 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
936 data_len
= bsg_job
->request_payload
.payload_len
;
937 fw_buf
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
938 &fw_dma
, GFP_KERNEL
);
940 ql_log(ql_log_warn
, vha
, 0x7035,
941 "DMA alloc failed for fw_buf.\n");
946 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
947 bsg_job
->request_payload
.sg_cnt
, fw_buf
, data_len
);
949 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
951 ql_log(ql_log_warn
, vha
, 0x7036,
952 "DMA alloc failed for fw buffer.\n");
954 goto done_free_fw_buf
;
957 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
958 fw_ver
= le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf
+ 2)));
960 memset(mn
, 0, sizeof(struct access_chip_84xx
));
961 mn
->entry_type
= VERIFY_CHIP_IOCB_TYPE
;
964 options
= VCO_FORCE_UPDATE
| VCO_END_OF_DATA
;
965 if (flag
== A84_ISSUE_UPDATE_DIAGFW_CMD
)
966 options
|= VCO_DIAG_FW
;
968 mn
->options
= cpu_to_le16(options
);
969 mn
->fw_ver
= cpu_to_le32(fw_ver
);
970 mn
->fw_size
= cpu_to_le32(data_len
);
971 mn
->fw_seq_size
= cpu_to_le32(data_len
);
972 mn
->dseg_address
[0] = cpu_to_le32(LSD(fw_dma
));
973 mn
->dseg_address
[1] = cpu_to_le32(MSD(fw_dma
));
974 mn
->dseg_length
= cpu_to_le32(data_len
);
975 mn
->data_seg_cnt
= cpu_to_le16(1);
977 rval
= qla2x00_issue_iocb_timeout(vha
, mn
, mn_dma
, 0, 120);
980 ql_log(ql_log_warn
, vha
, 0x7037,
981 "Vendor request 84xx updatefw failed.\n");
983 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
984 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
987 ql_dbg(ql_dbg_user
, vha
, 0x7038,
988 "Vendor request 84xx updatefw completed.\n");
990 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
991 bsg_job
->reply
->result
= DID_OK
;
994 bsg_job
->job_done(bsg_job
);
995 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
998 dma_free_coherent(&ha
->pdev
->dev
, data_len
, fw_buf
, fw_dma
);
1001 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1002 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1008 qla84xx_mgmt_cmd(struct fc_bsg_job
*bsg_job
)
1010 struct Scsi_Host
*host
= bsg_job
->shost
;
1011 scsi_qla_host_t
*vha
= shost_priv(host
);
1012 struct qla_hw_data
*ha
= vha
->hw
;
1013 struct access_chip_84xx
*mn
= NULL
;
1014 dma_addr_t mn_dma
, mgmt_dma
;
1015 void *mgmt_b
= NULL
;
1017 struct qla_bsg_a84_mgmt
*ql84_mgmt
;
1019 uint32_t data_len
= 0;
1020 uint32_t dma_direction
= DMA_NONE
;
1022 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1023 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1024 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
1025 ql_log(ql_log_warn
, vha
, 0x7039,
1026 "Abort active or needed.\n");
1030 if (!IS_QLA84XX(ha
)) {
1031 ql_log(ql_log_warn
, vha
, 0x703a,
1032 "Not 84xx, exiting.\n");
1036 ql84_mgmt
= (struct qla_bsg_a84_mgmt
*)((char *)bsg_job
->request
+
1037 sizeof(struct fc_bsg_request
));
1039 ql_log(ql_log_warn
, vha
, 0x703b,
1040 "MGMT header not provided, exiting.\n");
1044 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1046 ql_log(ql_log_warn
, vha
, 0x703c,
1047 "DMA alloc failed for fw buffer.\n");
1051 memset(mn
, 0, sizeof(struct access_chip_84xx
));
1052 mn
->entry_type
= ACCESS_CHIP_IOCB_TYPE
;
1053 mn
->entry_count
= 1;
1055 switch (ql84_mgmt
->mgmt
.cmd
) {
1056 case QLA84_MGMT_READ_MEM
:
1057 case QLA84_MGMT_GET_INFO
:
1058 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1059 bsg_job
->reply_payload
.sg_list
,
1060 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1062 ql_log(ql_log_warn
, vha
, 0x703d,
1063 "dma_map_sg returned %d for reply.\n", sg_cnt
);
1068 dma_direction
= DMA_FROM_DEVICE
;
1070 if (sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
) {
1071 ql_log(ql_log_warn
, vha
, 0x703e,
1072 "DMA mapping resulted in different sg counts, "
1073 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1074 bsg_job
->reply_payload
.sg_cnt
, sg_cnt
);
1079 data_len
= bsg_job
->reply_payload
.payload_len
;
1081 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1082 &mgmt_dma
, GFP_KERNEL
);
1084 ql_log(ql_log_warn
, vha
, 0x703f,
1085 "DMA alloc failed for mgmt_b.\n");
1090 if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) {
1091 mn
->options
= cpu_to_le16(ACO_DUMP_MEMORY
);
1094 ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1096 } else if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
) {
1097 mn
->options
= cpu_to_le16(ACO_REQUEST_INFO
);
1099 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.info
.type
);
1103 ql84_mgmt
->mgmt
.mgmtp
.u
.info
.context
);
1107 case QLA84_MGMT_WRITE_MEM
:
1108 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1109 bsg_job
->request_payload
.sg_list
,
1110 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1113 ql_log(ql_log_warn
, vha
, 0x7040,
1114 "dma_map_sg returned %d.\n", sg_cnt
);
1119 dma_direction
= DMA_TO_DEVICE
;
1121 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1122 ql_log(ql_log_warn
, vha
, 0x7041,
1123 "DMA mapping resulted in different sg counts, "
1124 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1125 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1130 data_len
= bsg_job
->request_payload
.payload_len
;
1131 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1132 &mgmt_dma
, GFP_KERNEL
);
1134 ql_log(ql_log_warn
, vha
, 0x7042,
1135 "DMA alloc failed for mgmt_b.\n");
1140 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1141 bsg_job
->request_payload
.sg_cnt
, mgmt_b
, data_len
);
1143 mn
->options
= cpu_to_le16(ACO_LOAD_MEMORY
);
1145 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1148 case QLA84_MGMT_CHNG_CONFIG
:
1149 mn
->options
= cpu_to_le16(ACO_CHANGE_CONFIG_PARAM
);
1151 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.id
);
1154 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param0
);
1157 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param1
);
1165 if (ql84_mgmt
->mgmt
.cmd
!= QLA84_MGMT_CHNG_CONFIG
) {
1166 mn
->total_byte_cnt
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1167 mn
->dseg_count
= cpu_to_le16(1);
1168 mn
->dseg_address
[0] = cpu_to_le32(LSD(mgmt_dma
));
1169 mn
->dseg_address
[1] = cpu_to_le32(MSD(mgmt_dma
));
1170 mn
->dseg_length
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1173 rval
= qla2x00_issue_iocb(vha
, mn
, mn_dma
, 0);
1176 ql_log(ql_log_warn
, vha
, 0x7043,
1177 "Vendor request 84xx mgmt failed.\n");
1179 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
1180 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1183 ql_dbg(ql_dbg_user
, vha
, 0x7044,
1184 "Vendor request 84xx mgmt completed.\n");
1186 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1187 bsg_job
->reply
->result
= DID_OK
;
1189 if ((ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) ||
1190 (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
)) {
1191 bsg_job
->reply
->reply_payload_rcv_len
=
1192 bsg_job
->reply_payload
.payload_len
;
1194 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1195 bsg_job
->reply_payload
.sg_cnt
, mgmt_b
,
1200 bsg_job
->job_done(bsg_job
);
1204 dma_free_coherent(&ha
->pdev
->dev
, data_len
, mgmt_b
, mgmt_dma
);
1206 if (dma_direction
== DMA_TO_DEVICE
)
1207 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1208 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1209 else if (dma_direction
== DMA_FROM_DEVICE
)
1210 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1211 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1214 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1220 qla24xx_iidma(struct fc_bsg_job
*bsg_job
)
1222 struct Scsi_Host
*host
= bsg_job
->shost
;
1223 scsi_qla_host_t
*vha
= shost_priv(host
);
1225 struct qla_port_param
*port_param
= NULL
;
1226 fc_port_t
*fcport
= NULL
;
1227 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
1228 uint8_t *rsp_ptr
= NULL
;
1230 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1232 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1233 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1234 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
1235 ql_log(ql_log_warn
, vha
, 0x7045, "abort active or needed.\n");
1239 if (!IS_IIDMA_CAPABLE(vha
->hw
)) {
1240 ql_log(ql_log_info
, vha
, 0x7046, "iiDMA not supported.\n");
1244 port_param
= (struct qla_port_param
*)((char *)bsg_job
->request
+
1245 sizeof(struct fc_bsg_request
));
1247 ql_log(ql_log_warn
, vha
, 0x7047,
1248 "port_param header not provided.\n");
1252 if (port_param
->fc_scsi_addr
.dest_type
!= EXT_DEF_TYPE_WWPN
) {
1253 ql_log(ql_log_warn
, vha
, 0x7048,
1254 "Invalid destination type.\n");
1258 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1259 if (fcport
->port_type
!= FCT_TARGET
)
1262 if (memcmp(port_param
->fc_scsi_addr
.dest_addr
.wwpn
,
1263 fcport
->port_name
, sizeof(fcport
->port_name
)))
1269 ql_log(ql_log_warn
, vha
, 0x7049,
1270 "Failed to find port.\n");
1274 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1275 ql_log(ql_log_warn
, vha
, 0x704a,
1276 "Port is not online.\n");
1280 if (fcport
->flags
& FCF_LOGIN_NEEDED
) {
1281 ql_log(ql_log_warn
, vha
, 0x704b,
1282 "Remote port not logged in flags = 0x%x.\n", fcport
->flags
);
1286 if (port_param
->mode
)
1287 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
,
1288 port_param
->speed
, mb
);
1290 rval
= qla2x00_get_idma_speed(vha
, fcport
->loop_id
,
1291 &port_param
->speed
, mb
);
1294 ql_log(ql_log_warn
, vha
, 0x704c,
1295 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1296 "%04x %x %04x %04x.\n", fcport
->port_name
[0],
1297 fcport
->port_name
[1], fcport
->port_name
[2],
1298 fcport
->port_name
[3], fcport
->port_name
[4],
1299 fcport
->port_name
[5], fcport
->port_name
[6],
1300 fcport
->port_name
[7], rval
, fcport
->fp_speed
, mb
[0], mb
[1]);
1302 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1305 if (!port_param
->mode
) {
1306 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
1307 sizeof(struct qla_port_param
);
1309 rsp_ptr
= ((uint8_t *)bsg_job
->reply
) +
1310 sizeof(struct fc_bsg_reply
);
1312 memcpy(rsp_ptr
, port_param
,
1313 sizeof(struct qla_port_param
));
1316 bsg_job
->reply
->result
= DID_OK
;
1319 bsg_job
->job_done(bsg_job
);
1324 qla2x00_optrom_setup(struct fc_bsg_job
*bsg_job
, scsi_qla_host_t
*vha
,
1329 struct qla_hw_data
*ha
= vha
->hw
;
1331 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1333 if (unlikely(pci_channel_offline(ha
->pdev
)))
1336 start
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1337 if (start
> ha
->optrom_size
) {
1338 ql_log(ql_log_warn
, vha
, 0x7055,
1339 "start %d > optrom_size %d.\n", start
, ha
->optrom_size
);
1343 if (ha
->optrom_state
!= QLA_SWAITING
) {
1344 ql_log(ql_log_info
, vha
, 0x7056,
1345 "optrom_state %d.\n", ha
->optrom_state
);
1349 ha
->optrom_region_start
= start
;
1350 ql_dbg(ql_dbg_user
, vha
, 0x7057, "is_update=%d.\n", is_update
);
1352 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
1354 else if (start
== (ha
->flt_region_boot
* 4) ||
1355 start
== (ha
->flt_region_fw
* 4))
1357 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) ||
1358 IS_QLA8XXX_TYPE(ha
))
1361 ql_log(ql_log_warn
, vha
, 0x7058,
1362 "Invalid start region 0x%x/0x%x.\n", start
,
1363 bsg_job
->request_payload
.payload_len
);
1367 ha
->optrom_region_size
= start
+
1368 bsg_job
->request_payload
.payload_len
> ha
->optrom_size
?
1369 ha
->optrom_size
- start
:
1370 bsg_job
->request_payload
.payload_len
;
1371 ha
->optrom_state
= QLA_SWRITING
;
1373 ha
->optrom_region_size
= start
+
1374 bsg_job
->reply_payload
.payload_len
> ha
->optrom_size
?
1375 ha
->optrom_size
- start
:
1376 bsg_job
->reply_payload
.payload_len
;
1377 ha
->optrom_state
= QLA_SREADING
;
1380 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
1381 if (!ha
->optrom_buffer
) {
1382 ql_log(ql_log_warn
, vha
, 0x7059,
1383 "Read: Unable to allocate memory for optrom retrieval "
1384 "(%x)\n", ha
->optrom_region_size
);
1386 ha
->optrom_state
= QLA_SWAITING
;
1390 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
1395 qla2x00_read_optrom(struct fc_bsg_job
*bsg_job
)
1397 struct Scsi_Host
*host
= bsg_job
->shost
;
1398 scsi_qla_host_t
*vha
= shost_priv(host
);
1399 struct qla_hw_data
*ha
= vha
->hw
;
1402 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 0);
1406 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
1407 ha
->optrom_region_start
, ha
->optrom_region_size
);
1409 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1410 bsg_job
->reply_payload
.sg_cnt
, ha
->optrom_buffer
,
1411 ha
->optrom_region_size
);
1413 bsg_job
->reply
->reply_payload_rcv_len
= ha
->optrom_region_size
;
1414 bsg_job
->reply
->result
= DID_OK
;
1415 vfree(ha
->optrom_buffer
);
1416 ha
->optrom_buffer
= NULL
;
1417 ha
->optrom_state
= QLA_SWAITING
;
1418 bsg_job
->job_done(bsg_job
);
1423 qla2x00_update_optrom(struct fc_bsg_job
*bsg_job
)
1425 struct Scsi_Host
*host
= bsg_job
->shost
;
1426 scsi_qla_host_t
*vha
= shost_priv(host
);
1427 struct qla_hw_data
*ha
= vha
->hw
;
1430 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 1);
1434 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1435 bsg_job
->request_payload
.sg_cnt
, ha
->optrom_buffer
,
1436 ha
->optrom_region_size
);
1438 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
1439 ha
->optrom_region_start
, ha
->optrom_region_size
);
1441 bsg_job
->reply
->result
= DID_OK
;
1442 vfree(ha
->optrom_buffer
);
1443 ha
->optrom_buffer
= NULL
;
1444 ha
->optrom_state
= QLA_SWAITING
;
1445 bsg_job
->job_done(bsg_job
);
1450 qla2x00_process_vendor_specific(struct fc_bsg_job
*bsg_job
)
1452 switch (bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
1453 case QL_VND_LOOPBACK
:
1454 return qla2x00_process_loopback(bsg_job
);
1456 case QL_VND_A84_RESET
:
1457 return qla84xx_reset(bsg_job
);
1459 case QL_VND_A84_UPDATE_FW
:
1460 return qla84xx_updatefw(bsg_job
);
1462 case QL_VND_A84_MGMT_CMD
:
1463 return qla84xx_mgmt_cmd(bsg_job
);
1466 return qla24xx_iidma(bsg_job
);
1468 case QL_VND_FCP_PRIO_CFG_CMD
:
1469 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job
);
1471 case QL_VND_READ_FLASH
:
1472 return qla2x00_read_optrom(bsg_job
);
1474 case QL_VND_UPDATE_FLASH
:
1475 return qla2x00_update_optrom(bsg_job
);
1478 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1479 bsg_job
->job_done(bsg_job
);
1485 qla24xx_bsg_request(struct fc_bsg_job
*bsg_job
)
1488 struct fc_rport
*rport
;
1489 fc_port_t
*fcport
= NULL
;
1490 struct Scsi_Host
*host
;
1491 scsi_qla_host_t
*vha
;
1493 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
1494 rport
= bsg_job
->rport
;
1495 fcport
= *(fc_port_t
**) rport
->dd_data
;
1496 host
= rport_to_shost(rport
);
1497 vha
= shost_priv(host
);
1499 host
= bsg_job
->shost
;
1500 vha
= shost_priv(host
);
1503 ql_dbg(ql_dbg_user
, vha
, 0x7000,
1504 "Entered %s msgcode=%d.\n", __func__
, bsg_job
->request
->msgcode
);
1506 switch (bsg_job
->request
->msgcode
) {
1507 case FC_BSG_RPT_ELS
:
1508 case FC_BSG_HST_ELS_NOLOGIN
:
1509 ret
= qla2x00_process_els(bsg_job
);
1512 ret
= qla2x00_process_ct(bsg_job
);
1514 case FC_BSG_HST_VENDOR
:
1515 ret
= qla2x00_process_vendor_specific(bsg_job
);
1517 case FC_BSG_HST_ADD_RPORT
:
1518 case FC_BSG_HST_DEL_RPORT
:
1521 ql_log(ql_log_warn
, vha
, 0x705a, "Unsupported BSG request.\n");
1528 qla24xx_bsg_timeout(struct fc_bsg_job
*bsg_job
)
1530 scsi_qla_host_t
*vha
= shost_priv(bsg_job
->shost
);
1531 struct qla_hw_data
*ha
= vha
->hw
;
1534 unsigned long flags
;
1535 struct req_que
*req
;
1536 struct srb_ctx
*sp_bsg
;
1538 /* find the bsg job from the active list of commands */
1539 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1540 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
1541 req
= ha
->req_q_map
[que
];
1545 for (cnt
= 1; cnt
< MAX_OUTSTANDING_COMMANDS
; cnt
++) {
1546 sp
= req
->outstanding_cmds
[cnt
];
1550 if (((sp_bsg
->type
== SRB_CT_CMD
) ||
1551 (sp_bsg
->type
== SRB_ELS_CMD_HST
))
1552 && (sp_bsg
->u
.bsg_job
== bsg_job
)) {
1553 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1554 if (ha
->isp_ops
->abort_command(sp
)) {
1555 ql_log(ql_log_warn
, vha
, 0x7089,
1556 "mbx abort_command "
1558 bsg_job
->req
->errors
=
1559 bsg_job
->reply
->result
= -EIO
;
1561 ql_dbg(ql_dbg_user
, vha
, 0x708a,
1562 "mbx abort_command "
1564 bsg_job
->req
->errors
=
1565 bsg_job
->reply
->result
= 0;
1567 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1573 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1574 ql_log(ql_log_info
, vha
, 0x708b, "SRB not found to abort.\n");
1575 bsg_job
->req
->errors
= bsg_job
->reply
->result
= -ENXIO
;
1579 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1580 if (bsg_job
->request
->msgcode
== FC_BSG_HST_CT
)
1583 mempool_free(sp
, ha
->srb_mempool
);