2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
13 /* BSG support for ELS/CT pass through */
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t
*vha
, fc_port_t
*fcport
, size_t size
)
18 struct qla_hw_data
*ha
= vha
->hw
;
21 sp
= mempool_alloc(ha
->srb_mempool
, GFP_KERNEL
);
24 ctx
= kzalloc(size
, GFP_KERNEL
);
26 mempool_free(sp
, ha
->srb_mempool
);
31 memset(sp
, 0, sizeof(*sp
));
39 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t
*vha
,
40 struct qla_fcp_prio_cfg
*pri_cfg
, uint8_t flag
)
42 int i
, ret
, num_valid
;
44 struct qla_fcp_prio_entry
*pri_entry
;
45 uint32_t *bcode_val_ptr
, bcode_val
;
49 bcode
= (uint8_t *)pri_cfg
;
50 bcode_val_ptr
= (uint32_t *)pri_cfg
;
51 bcode_val
= (uint32_t)(*bcode_val_ptr
);
53 if (bcode_val
== 0xFFFFFFFF) {
54 /* No FCP Priority config data in flash */
55 ql_dbg(ql_dbg_user
, vha
, 0x7051,
56 "No FCP Priority config data.\n");
60 if (bcode
[0] != 'H' || bcode
[1] != 'Q' || bcode
[2] != 'O' ||
62 /* Invalid FCP priority data header*/
63 ql_dbg(ql_dbg_user
, vha
, 0x7052,
64 "Invalid FCP Priority data header. bcode=0x%x.\n",
71 pri_entry
= &pri_cfg
->entry
[0];
72 for (i
= 0; i
< pri_cfg
->num_entries
; i
++) {
73 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
79 /* No valid FCP priority data entries */
80 ql_dbg(ql_dbg_user
, vha
, 0x7053,
81 "No valid FCP Priority data entries.\n");
84 /* FCP priority data is valid */
85 ql_dbg(ql_dbg_user
, vha
, 0x7054,
86 "Valid FCP priority data. num entries = %d.\n",
94 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job
*bsg_job
)
96 struct Scsi_Host
*host
= bsg_job
->shost
;
97 scsi_qla_host_t
*vha
= shost_priv(host
);
98 struct qla_hw_data
*ha
= vha
->hw
;
103 bsg_job
->reply
->reply_payload_rcv_len
= 0;
105 if (!(IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
))) {
107 goto exit_fcp_prio_cfg
;
110 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
111 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
112 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
114 goto exit_fcp_prio_cfg
;
117 /* Get the sub command */
118 oper
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
120 /* Only set config is allowed if config memory is not allocated */
121 if (!ha
->fcp_prio_cfg
&& (oper
!= QLFC_FCP_PRIO_SET_CONFIG
)) {
123 goto exit_fcp_prio_cfg
;
126 case QLFC_FCP_PRIO_DISABLE
:
127 if (ha
->flags
.fcp_prio_enabled
) {
128 ha
->flags
.fcp_prio_enabled
= 0;
129 ha
->fcp_prio_cfg
->attributes
&=
130 ~FCP_PRIO_ATTR_ENABLE
;
131 qla24xx_update_all_fcp_prio(vha
);
132 bsg_job
->reply
->result
= DID_OK
;
135 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
136 goto exit_fcp_prio_cfg
;
140 case QLFC_FCP_PRIO_ENABLE
:
141 if (!ha
->flags
.fcp_prio_enabled
) {
142 if (ha
->fcp_prio_cfg
) {
143 ha
->flags
.fcp_prio_enabled
= 1;
144 ha
->fcp_prio_cfg
->attributes
|=
145 FCP_PRIO_ATTR_ENABLE
;
146 qla24xx_update_all_fcp_prio(vha
);
147 bsg_job
->reply
->result
= DID_OK
;
150 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
151 goto exit_fcp_prio_cfg
;
156 case QLFC_FCP_PRIO_GET_CONFIG
:
157 len
= bsg_job
->reply_payload
.payload_len
;
158 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
160 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
161 goto exit_fcp_prio_cfg
;
164 bsg_job
->reply
->result
= DID_OK
;
165 bsg_job
->reply
->reply_payload_rcv_len
=
167 bsg_job
->reply_payload
.sg_list
,
168 bsg_job
->reply_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
173 case QLFC_FCP_PRIO_SET_CONFIG
:
174 len
= bsg_job
->request_payload
.payload_len
;
175 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
176 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
178 goto exit_fcp_prio_cfg
;
181 if (!ha
->fcp_prio_cfg
) {
182 ha
->fcp_prio_cfg
= vmalloc(FCP_PRIO_CFG_SIZE
);
183 if (!ha
->fcp_prio_cfg
) {
184 ql_log(ql_log_warn
, vha
, 0x7050,
185 "Unable to allocate memory for fcp prio "
186 "config data (%x).\n", FCP_PRIO_CFG_SIZE
);
187 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
189 goto exit_fcp_prio_cfg
;
193 memset(ha
->fcp_prio_cfg
, 0, FCP_PRIO_CFG_SIZE
);
194 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
195 bsg_job
->request_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
198 /* validate fcp priority data */
200 if (!qla24xx_fcp_prio_cfg_valid(vha
,
201 (struct qla_fcp_prio_cfg
*) ha
->fcp_prio_cfg
, 1)) {
202 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
204 /* If buffer was invalidatic int
205 * fcp_prio_cfg is of no use
207 vfree(ha
->fcp_prio_cfg
);
208 ha
->fcp_prio_cfg
= NULL
;
209 goto exit_fcp_prio_cfg
;
212 ha
->flags
.fcp_prio_enabled
= 0;
213 if (ha
->fcp_prio_cfg
->attributes
& FCP_PRIO_ATTR_ENABLE
)
214 ha
->flags
.fcp_prio_enabled
= 1;
215 qla24xx_update_all_fcp_prio(vha
);
216 bsg_job
->reply
->result
= DID_OK
;
223 bsg_job
->job_done(bsg_job
);
227 qla2x00_process_els(struct fc_bsg_job
*bsg_job
)
229 struct fc_rport
*rport
;
230 fc_port_t
*fcport
= NULL
;
231 struct Scsi_Host
*host
;
232 scsi_qla_host_t
*vha
;
233 struct qla_hw_data
*ha
;
236 int req_sg_cnt
, rsp_sg_cnt
;
237 int rval
= (DRIVER_ERROR
<< 16);
238 uint16_t nextlid
= 0;
241 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
242 rport
= bsg_job
->rport
;
243 fcport
= *(fc_port_t
**) rport
->dd_data
;
244 host
= rport_to_shost(rport
);
245 vha
= shost_priv(host
);
247 type
= "FC_BSG_RPT_ELS";
249 host
= bsg_job
->shost
;
250 vha
= shost_priv(host
);
252 type
= "FC_BSG_HST_ELS_NOLOGIN";
255 /* pass through is supported only for ISP 4Gb or higher */
256 if (!IS_FWI2_CAPABLE(ha
)) {
257 ql_dbg(ql_dbg_user
, vha
, 0x7001,
258 "ELS passthru not supported for ISP23xx based adapters.\n");
263 /* Multiple SG's are not supported for ELS requests */
264 if (bsg_job
->request_payload
.sg_cnt
> 1 ||
265 bsg_job
->reply_payload
.sg_cnt
> 1) {
266 ql_dbg(ql_dbg_user
, vha
, 0x7002,
267 "Multiple SG's are not suppored for ELS requests, "
268 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
269 bsg_job
->request_payload
.sg_cnt
,
270 bsg_job
->reply_payload
.sg_cnt
);
275 /* ELS request for rport */
276 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
277 /* make sure the rport is logged in,
278 * if not perform fabric login
280 if (qla2x00_fabric_login(vha
, fcport
, &nextlid
)) {
281 ql_dbg(ql_dbg_user
, vha
, 0x7003,
282 "Failed to login port %06X for ELS passthru.\n",
288 /* Allocate a dummy fcport structure, since functions
289 * preparing the IOCB and mailbox command retrieves port
290 * specific information from fcport structure. For Host based
291 * ELS commands there will be no fcport structure allocated
293 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
299 /* Initialize all required fields of fcport */
301 fcport
->vp_idx
= vha
->vp_idx
;
302 fcport
->d_id
.b
.al_pa
=
303 bsg_job
->request
->rqst_data
.h_els
.port_id
[0];
304 fcport
->d_id
.b
.area
=
305 bsg_job
->request
->rqst_data
.h_els
.port_id
[1];
306 fcport
->d_id
.b
.domain
=
307 bsg_job
->request
->rqst_data
.h_els
.port_id
[2];
309 (fcport
->d_id
.b
.al_pa
== 0xFD) ?
310 NPH_FABRIC_CONTROLLER
: NPH_F_PORT
;
313 if (!vha
->flags
.online
) {
314 ql_log(ql_log_warn
, vha
, 0x7005, "Host not online.\n");
320 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
321 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
324 goto done_free_fcport
;
327 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
328 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
331 goto done_free_fcport
;
334 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
335 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
336 ql_log(ql_log_warn
, vha
, 0x7008,
337 "dma mapping resulted in different sg counts, "
338 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
339 "dma_reply_sg_cnt:%x.\n", bsg_job
->request_payload
.sg_cnt
,
340 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
345 /* Alloc SRB structure */
346 sp
= qla2x00_get_ctx_bsg_sp(vha
, fcport
, sizeof(struct srb_ctx
));
354 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
355 SRB_ELS_CMD_RPT
: SRB_ELS_CMD_HST
);
357 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
358 "bsg_els_rpt" : "bsg_els_hst");
359 els
->u
.bsg_job
= bsg_job
;
361 ql_dbg(ql_dbg_user
, vha
, 0x700a,
362 "bsg rqst type: %s els type: %x - loop-id=%x "
363 "portid=%-2x%02x%02x.\n", type
,
364 bsg_job
->request
->rqst_data
.h_els
.command_code
, fcport
->loop_id
,
365 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
367 rval
= qla2x00_start_sp(sp
);
368 if (rval
!= QLA_SUCCESS
) {
369 ql_log(ql_log_warn
, vha
, 0x700e,
370 "qla2x00_start_sp failed = %d\n", rval
);
372 mempool_free(sp
, ha
->srb_mempool
);
379 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
380 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
381 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
382 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
383 goto done_free_fcport
;
386 if (bsg_job
->request
->msgcode
== FC_BSG_HST_ELS_NOLOGIN
)
393 qla2x00_process_ct(struct fc_bsg_job
*bsg_job
)
396 struct Scsi_Host
*host
= bsg_job
->shost
;
397 scsi_qla_host_t
*vha
= shost_priv(host
);
398 struct qla_hw_data
*ha
= vha
->hw
;
399 int rval
= (DRIVER_ERROR
<< 16);
400 int req_sg_cnt
, rsp_sg_cnt
;
402 struct fc_port
*fcport
;
403 char *type
= "FC_BSG_HST_CT";
407 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
408 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
410 ql_log(ql_log_warn
, vha
, 0x700f,
411 "dma_map_sg return %d for request\n", req_sg_cnt
);
416 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
417 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
419 ql_log(ql_log_warn
, vha
, 0x7010,
420 "dma_map_sg return %d for reply\n", rsp_sg_cnt
);
425 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
426 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
427 ql_log(ql_log_warn
, vha
, 0x7011,
428 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429 "dma_reply_sg_cnt: %x\n", bsg_job
->request_payload
.sg_cnt
,
430 req_sg_cnt
, bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
);
435 if (!vha
->flags
.online
) {
436 ql_log(ql_log_warn
, vha
, 0x7012,
437 "Host is not online.\n");
443 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word1
& 0xFF000000)
447 loop_id
= cpu_to_le16(NPH_SNS
);
450 loop_id
= vha
->mgmt_svr_loop_id
;
453 ql_dbg(ql_dbg_user
, vha
, 0x7013,
454 "Unknown loop id: %x.\n", loop_id
);
459 /* Allocate a dummy fcport structure, since functions preparing the
460 * IOCB and mailbox command retrieves port specific information
461 * from fcport structure. For Host based ELS commands there will be
462 * no fcport structure allocated
464 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
466 ql_log(ql_log_warn
, vha
, 0x7014,
467 "Failed to allocate fcport.\n");
472 /* Initialize all required fields of fcport */
474 fcport
->vp_idx
= vha
->vp_idx
;
475 fcport
->d_id
.b
.al_pa
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[0];
476 fcport
->d_id
.b
.area
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[1];
477 fcport
->d_id
.b
.domain
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[2];
478 fcport
->loop_id
= loop_id
;
480 /* Alloc SRB structure */
481 sp
= qla2x00_get_ctx_bsg_sp(vha
, fcport
, sizeof(struct srb_ctx
));
483 ql_log(ql_log_warn
, vha
, 0x7015,
484 "qla2x00_get_ctx_bsg_sp failed.\n");
486 goto done_free_fcport
;
490 ct
->type
= SRB_CT_CMD
;
492 ct
->u
.bsg_job
= bsg_job
;
494 ql_dbg(ql_dbg_user
, vha
, 0x7016,
495 "bsg rqst type: %s else type: %x - "
496 "loop-id=%x portid=%02x%02x%02x.\n", type
,
497 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word2
>> 16),
498 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
499 fcport
->d_id
.b
.al_pa
);
501 rval
= qla2x00_start_sp(sp
);
502 if (rval
!= QLA_SUCCESS
) {
503 ql_log(ql_log_warn
, vha
, 0x7017,
504 "qla2x00_start_sp failed=%d.\n", rval
);
506 mempool_free(sp
, ha
->srb_mempool
);
508 goto done_free_fcport
;
515 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
516 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
517 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
518 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
523 /* Set the port configuration to enable the
524 * internal loopback on ISP81XX
527 qla81xx_set_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
528 uint16_t *new_config
)
532 struct qla_hw_data
*ha
= vha
->hw
;
535 goto done_set_internal
;
537 new_config
[0] = config
[0] | (ENABLE_INTERNAL_LOOPBACK
<< 1);
538 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
540 ha
->notify_dcbx_comp
= 1;
541 ret
= qla81xx_set_port_config(vha
, new_config
);
542 if (ret
!= QLA_SUCCESS
) {
543 ql_log(ql_log_warn
, vha
, 0x7021,
544 "set port config failed.\n");
545 ha
->notify_dcbx_comp
= 0;
547 goto done_set_internal
;
550 /* Wait for DCBX complete event */
551 if (!wait_for_completion_timeout(&ha
->dcbx_comp
, (20 * HZ
))) {
552 ql_dbg(ql_dbg_user
, vha
, 0x7022,
553 "State change notification not received.\n");
555 ql_dbg(ql_dbg_user
, vha
, 0x7023,
556 "State change received.\n");
558 ha
->notify_dcbx_comp
= 0;
564 /* Set the port configuration to disable the
565 * internal loopback on ISP81XX
568 qla81xx_reset_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
573 uint16_t new_config
[4];
574 struct qla_hw_data
*ha
= vha
->hw
;
577 goto done_reset_internal
;
579 memset(new_config
, 0 , sizeof(new_config
));
580 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
581 ENABLE_INTERNAL_LOOPBACK
) {
582 new_config
[0] = config
[0] & ~INTERNAL_LOOPBACK_MASK
;
583 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
585 ha
->notify_dcbx_comp
= wait
;
586 ret
= qla81xx_set_port_config(vha
, new_config
);
587 if (ret
!= QLA_SUCCESS
) {
588 ql_log(ql_log_warn
, vha
, 0x7025,
589 "Set port config failed.\n");
590 ha
->notify_dcbx_comp
= 0;
592 goto done_reset_internal
;
595 /* Wait for DCBX complete event */
596 if (wait
&& !wait_for_completion_timeout(&ha
->dcbx_comp
,
598 ql_dbg(ql_dbg_user
, vha
, 0x7026,
599 "State change notification not received.\n");
600 ha
->notify_dcbx_comp
= 0;
602 goto done_reset_internal
;
604 ql_dbg(ql_dbg_user
, vha
, 0x7027,
605 "State change received.\n");
607 ha
->notify_dcbx_comp
= 0;
614 qla2x00_process_loopback(struct fc_bsg_job
*bsg_job
)
616 struct Scsi_Host
*host
= bsg_job
->shost
;
617 scsi_qla_host_t
*vha
= shost_priv(host
);
618 struct qla_hw_data
*ha
= vha
->hw
;
620 uint8_t command_sent
;
622 struct msg_echo_lb elreq
;
623 uint16_t response
[MAILBOX_REGISTER_COUNT
];
624 uint16_t config
[4], new_config
[4];
626 uint8_t *req_data
= NULL
;
627 dma_addr_t req_data_dma
;
628 uint32_t req_data_len
;
629 uint8_t *rsp_data
= NULL
;
630 dma_addr_t rsp_data_dma
;
631 uint32_t rsp_data_len
;
633 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
634 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
635 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
636 ql_log(ql_log_warn
, vha
, 0x7018, "Abort active or needed.\n");
640 if (!vha
->flags
.online
) {
641 ql_log(ql_log_warn
, vha
, 0x7019, "Host is not online.\n");
645 elreq
.req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
646 bsg_job
->request_payload
.sg_list
, bsg_job
->request_payload
.sg_cnt
,
649 if (!elreq
.req_sg_cnt
) {
650 ql_log(ql_log_warn
, vha
, 0x701a,
651 "dma_map_sg returned %d for request.\n", elreq
.req_sg_cnt
);
655 elreq
.rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
656 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
659 if (!elreq
.rsp_sg_cnt
) {
660 ql_log(ql_log_warn
, vha
, 0x701b,
661 "dma_map_sg returned %d for reply.\n", elreq
.rsp_sg_cnt
);
663 goto done_unmap_req_sg
;
666 if ((elreq
.req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
667 (elreq
.rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
668 ql_log(ql_log_warn
, vha
, 0x701c,
669 "dma mapping resulted in different sg counts, "
670 "request_sg_cnt: %x dma_request_sg_cnt: %x "
671 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
672 bsg_job
->request_payload
.sg_cnt
, elreq
.req_sg_cnt
,
673 bsg_job
->reply_payload
.sg_cnt
, elreq
.rsp_sg_cnt
);
677 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
678 req_data
= dma_alloc_coherent(&ha
->pdev
->dev
, req_data_len
,
679 &req_data_dma
, GFP_KERNEL
);
681 ql_log(ql_log_warn
, vha
, 0x701d,
682 "dma alloc failed for req_data.\n");
687 rsp_data
= dma_alloc_coherent(&ha
->pdev
->dev
, rsp_data_len
,
688 &rsp_data_dma
, GFP_KERNEL
);
690 ql_log(ql_log_warn
, vha
, 0x7004,
691 "dma alloc failed for rsp_data.\n");
693 goto done_free_dma_req
;
696 /* Copy the request buffer in req_data now */
697 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
698 bsg_job
->request_payload
.sg_cnt
, req_data
, req_data_len
);
700 elreq
.send_dma
= req_data_dma
;
701 elreq
.rcv_dma
= rsp_data_dma
;
702 elreq
.transfer_size
= req_data_len
;
704 elreq
.options
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
706 if ((ha
->current_topology
== ISP_CFG_F
||
707 (atomic_read(&vha
->loop_state
) == LOOP_DOWN
) ||
709 le32_to_cpu(*(uint32_t *)req_data
) == ELS_OPCODE_BYTE
710 && req_data_len
== MAX_ELS_FRAME_PAYLOAD
)) &&
711 elreq
.options
== EXTERNAL_LOOPBACK
) {
712 type
= "FC_BSG_HST_VENDOR_ECHO_DIAG";
713 ql_dbg(ql_dbg_user
, vha
, 0x701e,
714 "BSG request type: %s.\n", type
);
715 command_sent
= INT_DEF_LB_ECHO_CMD
;
716 rval
= qla2x00_echo_test(vha
, &elreq
, response
);
718 if (IS_QLA81XX(ha
)) {
719 memset(config
, 0, sizeof(config
));
720 memset(new_config
, 0, sizeof(new_config
));
721 if (qla81xx_get_port_config(vha
, config
)) {
722 ql_log(ql_log_warn
, vha
, 0x701f,
723 "Get port config failed.\n");
724 bsg_job
->reply
->reply_payload_rcv_len
= 0;
725 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
727 goto done_free_dma_req
;
730 if (elreq
.options
!= EXTERNAL_LOOPBACK
) {
731 ql_dbg(ql_dbg_user
, vha
, 0x7020,
732 "Internal: curent port config = %x\n",
734 if (qla81xx_set_internal_loopback(vha
, config
,
736 ql_log(ql_log_warn
, vha
, 0x7024,
737 "Internal loopback failed.\n");
738 bsg_job
->reply
->reply_payload_rcv_len
=
740 bsg_job
->reply
->result
=
743 goto done_free_dma_req
;
746 /* For external loopback to work
747 * ensure internal loopback is disabled
749 if (qla81xx_reset_internal_loopback(vha
,
751 bsg_job
->reply
->reply_payload_rcv_len
=
753 bsg_job
->reply
->result
=
756 goto done_free_dma_req
;
760 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
761 ql_dbg(ql_dbg_user
, vha
, 0x7028,
762 "BSG request type: %s.\n", type
);
764 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
765 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
768 /* Revert back to original port config
769 * Also clear internal loopback
771 qla81xx_reset_internal_loopback(vha
,
775 if (response
[0] == MBS_COMMAND_ERROR
&&
776 response
[1] == MBS_LB_RESET
) {
777 ql_log(ql_log_warn
, vha
, 0x7029,
778 "MBX command error, Aborting ISP.\n");
779 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
780 qla2xxx_wake_dpc(vha
);
781 qla2x00_wait_for_chip_reset(vha
);
782 /* Also reset the MPI */
783 if (qla81xx_restart_mpi_firmware(vha
) !=
785 ql_log(ql_log_warn
, vha
, 0x702a,
786 "MPI reset failed.\n");
789 bsg_job
->reply
->reply_payload_rcv_len
= 0;
790 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
792 goto done_free_dma_req
;
795 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
796 ql_dbg(ql_dbg_user
, vha
, 0x702b,
797 "BSG request type: %s.\n", type
);
798 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
799 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
804 ql_log(ql_log_warn
, vha
, 0x702c,
805 "Vendor request %s failed.\n", type
);
807 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
808 sizeof(struct fc_bsg_reply
);
810 memcpy(fw_sts_ptr
, response
, sizeof(response
));
811 fw_sts_ptr
+= sizeof(response
);
812 *fw_sts_ptr
= command_sent
;
814 bsg_job
->reply
->reply_payload_rcv_len
= 0;
815 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
817 ql_dbg(ql_dbg_user
, vha
, 0x702d,
818 "Vendor request %s completed.\n", type
);
820 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
821 sizeof(response
) + sizeof(uint8_t);
822 bsg_job
->reply
->reply_payload_rcv_len
=
823 bsg_job
->reply_payload
.payload_len
;
824 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
825 sizeof(struct fc_bsg_reply
);
826 memcpy(fw_sts_ptr
, response
, sizeof(response
));
827 fw_sts_ptr
+= sizeof(response
);
828 *fw_sts_ptr
= command_sent
;
829 bsg_job
->reply
->result
= DID_OK
;
830 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
831 bsg_job
->reply_payload
.sg_cnt
, rsp_data
,
834 bsg_job
->job_done(bsg_job
);
836 dma_free_coherent(&ha
->pdev
->dev
, rsp_data_len
,
837 rsp_data
, rsp_data_dma
);
839 dma_free_coherent(&ha
->pdev
->dev
, req_data_len
,
840 req_data
, req_data_dma
);
842 dma_unmap_sg(&ha
->pdev
->dev
,
843 bsg_job
->reply_payload
.sg_list
,
844 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
846 dma_unmap_sg(&ha
->pdev
->dev
,
847 bsg_job
->request_payload
.sg_list
,
848 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
853 qla84xx_reset(struct fc_bsg_job
*bsg_job
)
855 struct Scsi_Host
*host
= bsg_job
->shost
;
856 scsi_qla_host_t
*vha
= shost_priv(host
);
857 struct qla_hw_data
*ha
= vha
->hw
;
861 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
862 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
863 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
864 ql_log(ql_log_warn
, vha
, 0x702e, "Abort active or needed.\n");
868 if (!IS_QLA84XX(ha
)) {
869 ql_dbg(ql_dbg_user
, vha
, 0x702f, "Not 84xx, exiting.\n");
873 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
875 rval
= qla84xx_reset_chip(vha
, flag
== A84_ISSUE_RESET_DIAG_FW
);
878 ql_log(ql_log_warn
, vha
, 0x7030,
879 "Vendor request 84xx reset failed.\n");
880 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
881 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
884 ql_dbg(ql_dbg_user
, vha
, 0x7031,
885 "Vendor request 84xx reset completed.\n");
886 bsg_job
->reply
->result
= DID_OK
;
889 bsg_job
->job_done(bsg_job
);
894 qla84xx_updatefw(struct fc_bsg_job
*bsg_job
)
896 struct Scsi_Host
*host
= bsg_job
->shost
;
897 scsi_qla_host_t
*vha
= shost_priv(host
);
898 struct qla_hw_data
*ha
= vha
->hw
;
899 struct verify_chip_entry_84xx
*mn
= NULL
;
900 dma_addr_t mn_dma
, fw_dma
;
909 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
910 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
911 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
))
914 if (!IS_QLA84XX(ha
)) {
915 ql_dbg(ql_dbg_user
, vha
, 0x7032,
916 "Not 84xx, exiting.\n");
920 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
921 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
923 ql_log(ql_log_warn
, vha
, 0x7033,
924 "dma_map_sg returned %d for request.\n", sg_cnt
);
928 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
929 ql_log(ql_log_warn
, vha
, 0x7034,
930 "DMA mapping resulted in different sg counts, "
931 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
932 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
937 data_len
= bsg_job
->request_payload
.payload_len
;
938 fw_buf
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
939 &fw_dma
, GFP_KERNEL
);
941 ql_log(ql_log_warn
, vha
, 0x7035,
942 "DMA alloc failed for fw_buf.\n");
947 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
948 bsg_job
->request_payload
.sg_cnt
, fw_buf
, data_len
);
950 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
952 ql_log(ql_log_warn
, vha
, 0x7036,
953 "DMA alloc failed for fw buffer.\n");
955 goto done_free_fw_buf
;
958 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
959 fw_ver
= le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf
+ 2)));
961 memset(mn
, 0, sizeof(struct access_chip_84xx
));
962 mn
->entry_type
= VERIFY_CHIP_IOCB_TYPE
;
965 options
= VCO_FORCE_UPDATE
| VCO_END_OF_DATA
;
966 if (flag
== A84_ISSUE_UPDATE_DIAGFW_CMD
)
967 options
|= VCO_DIAG_FW
;
969 mn
->options
= cpu_to_le16(options
);
970 mn
->fw_ver
= cpu_to_le32(fw_ver
);
971 mn
->fw_size
= cpu_to_le32(data_len
);
972 mn
->fw_seq_size
= cpu_to_le32(data_len
);
973 mn
->dseg_address
[0] = cpu_to_le32(LSD(fw_dma
));
974 mn
->dseg_address
[1] = cpu_to_le32(MSD(fw_dma
));
975 mn
->dseg_length
= cpu_to_le32(data_len
);
976 mn
->data_seg_cnt
= cpu_to_le16(1);
978 rval
= qla2x00_issue_iocb_timeout(vha
, mn
, mn_dma
, 0, 120);
981 ql_log(ql_log_warn
, vha
, 0x7037,
982 "Vendor request 84xx updatefw failed.\n");
984 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
985 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
988 ql_dbg(ql_dbg_user
, vha
, 0x7038,
989 "Vendor request 84xx updatefw completed.\n");
991 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
992 bsg_job
->reply
->result
= DID_OK
;
995 bsg_job
->job_done(bsg_job
);
996 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
999 dma_free_coherent(&ha
->pdev
->dev
, data_len
, fw_buf
, fw_dma
);
1002 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1003 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1009 qla84xx_mgmt_cmd(struct fc_bsg_job
*bsg_job
)
1011 struct Scsi_Host
*host
= bsg_job
->shost
;
1012 scsi_qla_host_t
*vha
= shost_priv(host
);
1013 struct qla_hw_data
*ha
= vha
->hw
;
1014 struct access_chip_84xx
*mn
= NULL
;
1015 dma_addr_t mn_dma
, mgmt_dma
;
1016 void *mgmt_b
= NULL
;
1018 struct qla_bsg_a84_mgmt
*ql84_mgmt
;
1020 uint32_t data_len
= 0;
1021 uint32_t dma_direction
= DMA_NONE
;
1023 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1024 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1025 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
1026 ql_log(ql_log_warn
, vha
, 0x7039,
1027 "Abort active or needed.\n");
1031 if (!IS_QLA84XX(ha
)) {
1032 ql_log(ql_log_warn
, vha
, 0x703a,
1033 "Not 84xx, exiting.\n");
1037 ql84_mgmt
= (struct qla_bsg_a84_mgmt
*)((char *)bsg_job
->request
+
1038 sizeof(struct fc_bsg_request
));
1040 ql_log(ql_log_warn
, vha
, 0x703b,
1041 "MGMT header not provided, exiting.\n");
1045 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1047 ql_log(ql_log_warn
, vha
, 0x703c,
1048 "DMA alloc failed for fw buffer.\n");
1052 memset(mn
, 0, sizeof(struct access_chip_84xx
));
1053 mn
->entry_type
= ACCESS_CHIP_IOCB_TYPE
;
1054 mn
->entry_count
= 1;
1056 switch (ql84_mgmt
->mgmt
.cmd
) {
1057 case QLA84_MGMT_READ_MEM
:
1058 case QLA84_MGMT_GET_INFO
:
1059 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1060 bsg_job
->reply_payload
.sg_list
,
1061 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1063 ql_log(ql_log_warn
, vha
, 0x703d,
1064 "dma_map_sg returned %d for reply.\n", sg_cnt
);
1069 dma_direction
= DMA_FROM_DEVICE
;
1071 if (sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
) {
1072 ql_log(ql_log_warn
, vha
, 0x703e,
1073 "DMA mapping resulted in different sg counts, "
1074 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1075 bsg_job
->reply_payload
.sg_cnt
, sg_cnt
);
1080 data_len
= bsg_job
->reply_payload
.payload_len
;
1082 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1083 &mgmt_dma
, GFP_KERNEL
);
1085 ql_log(ql_log_warn
, vha
, 0x703f,
1086 "DMA alloc failed for mgmt_b.\n");
1091 if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) {
1092 mn
->options
= cpu_to_le16(ACO_DUMP_MEMORY
);
1095 ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1097 } else if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
) {
1098 mn
->options
= cpu_to_le16(ACO_REQUEST_INFO
);
1100 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.info
.type
);
1104 ql84_mgmt
->mgmt
.mgmtp
.u
.info
.context
);
1108 case QLA84_MGMT_WRITE_MEM
:
1109 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1110 bsg_job
->request_payload
.sg_list
,
1111 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1114 ql_log(ql_log_warn
, vha
, 0x7040,
1115 "dma_map_sg returned %d.\n", sg_cnt
);
1120 dma_direction
= DMA_TO_DEVICE
;
1122 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1123 ql_log(ql_log_warn
, vha
, 0x7041,
1124 "DMA mapping resulted in different sg counts, "
1125 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1126 bsg_job
->request_payload
.sg_cnt
, sg_cnt
);
1131 data_len
= bsg_job
->request_payload
.payload_len
;
1132 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1133 &mgmt_dma
, GFP_KERNEL
);
1135 ql_log(ql_log_warn
, vha
, 0x7042,
1136 "DMA alloc failed for mgmt_b.\n");
1141 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1142 bsg_job
->request_payload
.sg_cnt
, mgmt_b
, data_len
);
1144 mn
->options
= cpu_to_le16(ACO_LOAD_MEMORY
);
1146 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1149 case QLA84_MGMT_CHNG_CONFIG
:
1150 mn
->options
= cpu_to_le16(ACO_CHANGE_CONFIG_PARAM
);
1152 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.id
);
1155 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param0
);
1158 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param1
);
1166 if (ql84_mgmt
->mgmt
.cmd
!= QLA84_MGMT_CHNG_CONFIG
) {
1167 mn
->total_byte_cnt
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1168 mn
->dseg_count
= cpu_to_le16(1);
1169 mn
->dseg_address
[0] = cpu_to_le32(LSD(mgmt_dma
));
1170 mn
->dseg_address
[1] = cpu_to_le32(MSD(mgmt_dma
));
1171 mn
->dseg_length
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1174 rval
= qla2x00_issue_iocb(vha
, mn
, mn_dma
, 0);
1177 ql_log(ql_log_warn
, vha
, 0x7043,
1178 "Vendor request 84xx mgmt failed.\n");
1180 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
1181 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1184 ql_dbg(ql_dbg_user
, vha
, 0x7044,
1185 "Vendor request 84xx mgmt completed.\n");
1187 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1188 bsg_job
->reply
->result
= DID_OK
;
1190 if ((ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) ||
1191 (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
)) {
1192 bsg_job
->reply
->reply_payload_rcv_len
=
1193 bsg_job
->reply_payload
.payload_len
;
1195 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1196 bsg_job
->reply_payload
.sg_cnt
, mgmt_b
,
1201 bsg_job
->job_done(bsg_job
);
1205 dma_free_coherent(&ha
->pdev
->dev
, data_len
, mgmt_b
, mgmt_dma
);
1207 if (dma_direction
== DMA_TO_DEVICE
)
1208 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1209 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1210 else if (dma_direction
== DMA_FROM_DEVICE
)
1211 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1212 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1215 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1221 qla24xx_iidma(struct fc_bsg_job
*bsg_job
)
1223 struct Scsi_Host
*host
= bsg_job
->shost
;
1224 scsi_qla_host_t
*vha
= shost_priv(host
);
1226 struct qla_port_param
*port_param
= NULL
;
1227 fc_port_t
*fcport
= NULL
;
1228 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
1229 uint8_t *rsp_ptr
= NULL
;
1231 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1233 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1234 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1235 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
1236 ql_log(ql_log_warn
, vha
, 0x7045, "abort active or needed.\n");
1240 if (!IS_IIDMA_CAPABLE(vha
->hw
)) {
1241 ql_log(ql_log_info
, vha
, 0x7046, "iiDMA not supported.\n");
1245 port_param
= (struct qla_port_param
*)((char *)bsg_job
->request
+
1246 sizeof(struct fc_bsg_request
));
1248 ql_log(ql_log_warn
, vha
, 0x7047,
1249 "port_param header not provided.\n");
1253 if (port_param
->fc_scsi_addr
.dest_type
!= EXT_DEF_TYPE_WWPN
) {
1254 ql_log(ql_log_warn
, vha
, 0x7048,
1255 "Invalid destination type.\n");
1259 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1260 if (fcport
->port_type
!= FCT_TARGET
)
1263 if (memcmp(port_param
->fc_scsi_addr
.dest_addr
.wwpn
,
1264 fcport
->port_name
, sizeof(fcport
->port_name
)))
1270 ql_log(ql_log_warn
, vha
, 0x7049,
1271 "Failed to find port.\n");
1275 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1276 ql_log(ql_log_warn
, vha
, 0x704a,
1277 "Port is not online.\n");
1281 if (fcport
->flags
& FCF_LOGIN_NEEDED
) {
1282 ql_log(ql_log_warn
, vha
, 0x704b,
1283 "Remote port not logged in flags = 0x%x.\n", fcport
->flags
);
1287 if (port_param
->mode
)
1288 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
,
1289 port_param
->speed
, mb
);
1291 rval
= qla2x00_get_idma_speed(vha
, fcport
->loop_id
,
1292 &port_param
->speed
, mb
);
1295 ql_log(ql_log_warn
, vha
, 0x704c,
1296 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1297 "%04x %x %04x %04x.\n", fcport
->port_name
[0],
1298 fcport
->port_name
[1], fcport
->port_name
[2],
1299 fcport
->port_name
[3], fcport
->port_name
[4],
1300 fcport
->port_name
[5], fcport
->port_name
[6],
1301 fcport
->port_name
[7], rval
, fcport
->fp_speed
, mb
[0], mb
[1]);
1303 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1306 if (!port_param
->mode
) {
1307 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
1308 sizeof(struct qla_port_param
);
1310 rsp_ptr
= ((uint8_t *)bsg_job
->reply
) +
1311 sizeof(struct fc_bsg_reply
);
1313 memcpy(rsp_ptr
, port_param
,
1314 sizeof(struct qla_port_param
));
1317 bsg_job
->reply
->result
= DID_OK
;
1320 bsg_job
->job_done(bsg_job
);
1325 qla2x00_optrom_setup(struct fc_bsg_job
*bsg_job
, scsi_qla_host_t
*vha
,
1330 struct qla_hw_data
*ha
= vha
->hw
;
1332 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1334 if (unlikely(pci_channel_offline(ha
->pdev
)))
1337 start
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1338 if (start
> ha
->optrom_size
) {
1339 ql_log(ql_log_warn
, vha
, 0x7055,
1340 "start %d > optrom_size %d.\n", start
, ha
->optrom_size
);
1344 if (ha
->optrom_state
!= QLA_SWAITING
) {
1345 ql_log(ql_log_info
, vha
, 0x7056,
1346 "optrom_state %d.\n", ha
->optrom_state
);
1350 ha
->optrom_region_start
= start
;
1351 ql_dbg(ql_dbg_user
, vha
, 0x7057, "is_update=%d.\n", is_update
);
1353 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
1355 else if (start
== (ha
->flt_region_boot
* 4) ||
1356 start
== (ha
->flt_region_fw
* 4))
1358 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) ||
1359 IS_QLA8XXX_TYPE(ha
))
1362 ql_log(ql_log_warn
, vha
, 0x7058,
1363 "Invalid start region 0x%x/0x%x.\n", start
,
1364 bsg_job
->request_payload
.payload_len
);
1368 ha
->optrom_region_size
= start
+
1369 bsg_job
->request_payload
.payload_len
> ha
->optrom_size
?
1370 ha
->optrom_size
- start
:
1371 bsg_job
->request_payload
.payload_len
;
1372 ha
->optrom_state
= QLA_SWRITING
;
1374 ha
->optrom_region_size
= start
+
1375 bsg_job
->reply_payload
.payload_len
> ha
->optrom_size
?
1376 ha
->optrom_size
- start
:
1377 bsg_job
->reply_payload
.payload_len
;
1378 ha
->optrom_state
= QLA_SREADING
;
1381 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
1382 if (!ha
->optrom_buffer
) {
1383 ql_log(ql_log_warn
, vha
, 0x7059,
1384 "Read: Unable to allocate memory for optrom retrieval "
1385 "(%x)\n", ha
->optrom_region_size
);
1387 ha
->optrom_state
= QLA_SWAITING
;
1391 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
1396 qla2x00_read_optrom(struct fc_bsg_job
*bsg_job
)
1398 struct Scsi_Host
*host
= bsg_job
->shost
;
1399 scsi_qla_host_t
*vha
= shost_priv(host
);
1400 struct qla_hw_data
*ha
= vha
->hw
;
1403 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 0);
1407 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
1408 ha
->optrom_region_start
, ha
->optrom_region_size
);
1410 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1411 bsg_job
->reply_payload
.sg_cnt
, ha
->optrom_buffer
,
1412 ha
->optrom_region_size
);
1414 bsg_job
->reply
->reply_payload_rcv_len
= ha
->optrom_region_size
;
1415 bsg_job
->reply
->result
= DID_OK
;
1416 vfree(ha
->optrom_buffer
);
1417 ha
->optrom_buffer
= NULL
;
1418 ha
->optrom_state
= QLA_SWAITING
;
1419 bsg_job
->job_done(bsg_job
);
1424 qla2x00_update_optrom(struct fc_bsg_job
*bsg_job
)
1426 struct Scsi_Host
*host
= bsg_job
->shost
;
1427 scsi_qla_host_t
*vha
= shost_priv(host
);
1428 struct qla_hw_data
*ha
= vha
->hw
;
1431 rval
= qla2x00_optrom_setup(bsg_job
, vha
, 1);
1435 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1436 bsg_job
->request_payload
.sg_cnt
, ha
->optrom_buffer
,
1437 ha
->optrom_region_size
);
1439 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
1440 ha
->optrom_region_start
, ha
->optrom_region_size
);
1442 bsg_job
->reply
->result
= DID_OK
;
1443 vfree(ha
->optrom_buffer
);
1444 ha
->optrom_buffer
= NULL
;
1445 ha
->optrom_state
= QLA_SWAITING
;
1446 bsg_job
->job_done(bsg_job
);
1451 qla2x00_update_fru_versions(struct fc_bsg_job
*bsg_job
)
1453 struct Scsi_Host
*host
= bsg_job
->shost
;
1454 scsi_qla_host_t
*vha
= shost_priv(host
);
1455 struct qla_hw_data
*ha
= vha
->hw
;
1457 uint8_t bsg
[DMA_POOL_SIZE
];
1458 struct qla_image_version_list
*list
= (void *)bsg
;
1459 struct qla_image_version
*image
;
1462 void *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1464 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1465 EXT_STATUS_NO_MEMORY
;
1469 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1470 bsg_job
->request_payload
.sg_cnt
, list
, sizeof(bsg
));
1472 image
= list
->version
;
1473 count
= list
->count
;
1475 memcpy(sfp
, &image
->field_info
, sizeof(image
->field_info
));
1476 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1477 image
->field_address
.device
, image
->field_address
.offset
,
1478 sizeof(image
->field_info
), image
->field_address
.option
);
1480 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1487 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1490 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1493 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1494 bsg_job
->reply
->result
= DID_OK
<< 16;
1495 bsg_job
->job_done(bsg_job
);
1501 qla2x00_read_fru_status(struct fc_bsg_job
*bsg_job
)
1503 struct Scsi_Host
*host
= bsg_job
->shost
;
1504 scsi_qla_host_t
*vha
= shost_priv(host
);
1505 struct qla_hw_data
*ha
= vha
->hw
;
1507 uint8_t bsg
[DMA_POOL_SIZE
];
1508 struct qla_status_reg
*sr
= (void *)bsg
;
1510 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1512 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1513 EXT_STATUS_NO_MEMORY
;
1517 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1518 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1520 rval
= qla2x00_read_sfp(vha
, sfp_dma
, sfp
,
1521 sr
->field_address
.device
, sr
->field_address
.offset
,
1522 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1523 sr
->status_reg
= *sfp
;
1526 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1531 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1532 bsg_job
->reply_payload
.sg_cnt
, sr
, sizeof(*sr
));
1534 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1537 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1540 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1541 bsg_job
->reply
->reply_payload_rcv_len
= sizeof(*sr
);
1542 bsg_job
->reply
->result
= DID_OK
<< 16;
1543 bsg_job
->job_done(bsg_job
);
1549 qla2x00_write_fru_status(struct fc_bsg_job
*bsg_job
)
1551 struct Scsi_Host
*host
= bsg_job
->shost
;
1552 scsi_qla_host_t
*vha
= shost_priv(host
);
1553 struct qla_hw_data
*ha
= vha
->hw
;
1555 uint8_t bsg
[DMA_POOL_SIZE
];
1556 struct qla_status_reg
*sr
= (void *)bsg
;
1558 uint8_t *sfp
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &sfp_dma
);
1560 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1561 EXT_STATUS_NO_MEMORY
;
1565 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1566 bsg_job
->request_payload
.sg_cnt
, sr
, sizeof(*sr
));
1568 *sfp
= sr
->status_reg
;
1569 rval
= qla2x00_write_sfp(vha
, sfp_dma
, sfp
,
1570 sr
->field_address
.device
, sr
->field_address
.offset
,
1571 sizeof(sr
->status_reg
), sr
->field_address
.option
);
1574 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] =
1579 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = 0;
1582 dma_pool_free(ha
->s_dma_pool
, sfp
, sfp_dma
);
1585 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1586 bsg_job
->reply
->result
= DID_OK
<< 16;
1587 bsg_job
->job_done(bsg_job
);
1593 qla2x00_process_vendor_specific(struct fc_bsg_job
*bsg_job
)
1595 switch (bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
1596 case QL_VND_LOOPBACK
:
1597 return qla2x00_process_loopback(bsg_job
);
1599 case QL_VND_A84_RESET
:
1600 return qla84xx_reset(bsg_job
);
1602 case QL_VND_A84_UPDATE_FW
:
1603 return qla84xx_updatefw(bsg_job
);
1605 case QL_VND_A84_MGMT_CMD
:
1606 return qla84xx_mgmt_cmd(bsg_job
);
1609 return qla24xx_iidma(bsg_job
);
1611 case QL_VND_FCP_PRIO_CFG_CMD
:
1612 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job
);
1614 case QL_VND_READ_FLASH
:
1615 return qla2x00_read_optrom(bsg_job
);
1617 case QL_VND_UPDATE_FLASH
:
1618 return qla2x00_update_optrom(bsg_job
);
1620 case QL_VND_SET_FRU_VERSION
:
1621 return qla2x00_update_fru_versions(bsg_job
);
1623 case QL_VND_READ_FRU_STATUS
:
1624 return qla2x00_read_fru_status(bsg_job
);
1626 case QL_VND_WRITE_FRU_STATUS
:
1627 return qla2x00_write_fru_status(bsg_job
);
1630 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1631 bsg_job
->job_done(bsg_job
);
1637 qla24xx_bsg_request(struct fc_bsg_job
*bsg_job
)
1640 struct fc_rport
*rport
;
1641 fc_port_t
*fcport
= NULL
;
1642 struct Scsi_Host
*host
;
1643 scsi_qla_host_t
*vha
;
1645 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
1646 rport
= bsg_job
->rport
;
1647 fcport
= *(fc_port_t
**) rport
->dd_data
;
1648 host
= rport_to_shost(rport
);
1649 vha
= shost_priv(host
);
1651 host
= bsg_job
->shost
;
1652 vha
= shost_priv(host
);
1655 ql_dbg(ql_dbg_user
, vha
, 0x7000,
1656 "Entered %s msgcode=%d.\n", __func__
, bsg_job
->request
->msgcode
);
1658 switch (bsg_job
->request
->msgcode
) {
1659 case FC_BSG_RPT_ELS
:
1660 case FC_BSG_HST_ELS_NOLOGIN
:
1661 ret
= qla2x00_process_els(bsg_job
);
1664 ret
= qla2x00_process_ct(bsg_job
);
1666 case FC_BSG_HST_VENDOR
:
1667 ret
= qla2x00_process_vendor_specific(bsg_job
);
1669 case FC_BSG_HST_ADD_RPORT
:
1670 case FC_BSG_HST_DEL_RPORT
:
1673 ql_log(ql_log_warn
, vha
, 0x705a, "Unsupported BSG request.\n");
1680 qla24xx_bsg_timeout(struct fc_bsg_job
*bsg_job
)
1682 scsi_qla_host_t
*vha
= shost_priv(bsg_job
->shost
);
1683 struct qla_hw_data
*ha
= vha
->hw
;
1686 unsigned long flags
;
1687 struct req_que
*req
;
1688 struct srb_ctx
*sp_bsg
;
1690 /* find the bsg job from the active list of commands */
1691 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1692 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
1693 req
= ha
->req_q_map
[que
];
1697 for (cnt
= 1; cnt
< MAX_OUTSTANDING_COMMANDS
; cnt
++) {
1698 sp
= req
->outstanding_cmds
[cnt
];
1702 if (((sp_bsg
->type
== SRB_CT_CMD
) ||
1703 (sp_bsg
->type
== SRB_ELS_CMD_HST
))
1704 && (sp_bsg
->u
.bsg_job
== bsg_job
)) {
1705 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1706 if (ha
->isp_ops
->abort_command(sp
)) {
1707 ql_log(ql_log_warn
, vha
, 0x7089,
1708 "mbx abort_command "
1710 bsg_job
->req
->errors
=
1711 bsg_job
->reply
->result
= -EIO
;
1713 ql_dbg(ql_dbg_user
, vha
, 0x708a,
1714 "mbx abort_command "
1716 bsg_job
->req
->errors
=
1717 bsg_job
->reply
->result
= 0;
1719 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1725 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1726 ql_log(ql_log_info
, vha
, 0x708b, "SRB not found to abort.\n");
1727 bsg_job
->req
->errors
= bsg_job
->reply
->result
= -ENXIO
;
1731 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1732 if (bsg_job
->request
->msgcode
== FC_BSG_HST_CT
)
1735 mempool_free(sp
, ha
->srb_mempool
);