2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
13 /* BSG support for ELS/CT pass through */
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t
*vha
, fc_port_t
*fcport
, size_t size
)
18 struct qla_hw_data
*ha
= vha
->hw
;
21 sp
= mempool_alloc(ha
->srb_mempool
, GFP_KERNEL
);
24 ctx
= kzalloc(size
, GFP_KERNEL
);
26 mempool_free(sp
, ha
->srb_mempool
);
31 memset(sp
, 0, sizeof(*sp
));
39 qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg
*pri_cfg
, uint8_t flag
)
41 int i
, ret
, num_valid
;
43 struct qla_fcp_prio_entry
*pri_entry
;
44 uint32_t *bcode_val_ptr
, bcode_val
;
48 bcode
= (uint8_t *)pri_cfg
;
49 bcode_val_ptr
= (uint32_t *)pri_cfg
;
50 bcode_val
= (uint32_t)(*bcode_val_ptr
);
52 if (bcode_val
== 0xFFFFFFFF) {
53 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO
55 "%s: No FCP priority config data.\n",
60 if (bcode
[0] != 'H' || bcode
[1] != 'Q' || bcode
[2] != 'O' ||
62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n",
65 __func__
, bcode_val
));
71 pri_entry
= &pri_cfg
->entry
[0];
72 for (i
= 0; i
< pri_cfg
->num_entries
; i
++) {
73 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR
81 "%s: No valid FCP Priority data entries.\n",
85 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO
87 "%s: Valid FCP priority data. num entries = %d\n",
88 __func__
, num_valid
));
95 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job
*bsg_job
)
97 struct Scsi_Host
*host
= bsg_job
->shost
;
98 scsi_qla_host_t
*vha
= shost_priv(host
);
99 struct qla_hw_data
*ha
= vha
->hw
;
104 bsg_job
->reply
->reply_payload_rcv_len
= 0;
106 if (!(IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
))) {
108 goto exit_fcp_prio_cfg
;
111 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
112 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
113 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
115 goto exit_fcp_prio_cfg
;
118 /* Get the sub command */
119 oper
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
121 /* Only set config is allowed if config memory is not allocated */
122 if (!ha
->fcp_prio_cfg
&& (oper
!= QLFC_FCP_PRIO_SET_CONFIG
)) {
124 goto exit_fcp_prio_cfg
;
127 case QLFC_FCP_PRIO_DISABLE
:
128 if (ha
->flags
.fcp_prio_enabled
) {
129 ha
->flags
.fcp_prio_enabled
= 0;
130 ha
->fcp_prio_cfg
->attributes
&=
131 ~FCP_PRIO_ATTR_ENABLE
;
132 qla24xx_update_all_fcp_prio(vha
);
133 bsg_job
->reply
->result
= DID_OK
;
136 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
137 goto exit_fcp_prio_cfg
;
141 case QLFC_FCP_PRIO_ENABLE
:
142 if (!ha
->flags
.fcp_prio_enabled
) {
143 if (ha
->fcp_prio_cfg
) {
144 ha
->flags
.fcp_prio_enabled
= 1;
145 ha
->fcp_prio_cfg
->attributes
|=
146 FCP_PRIO_ATTR_ENABLE
;
147 qla24xx_update_all_fcp_prio(vha
);
148 bsg_job
->reply
->result
= DID_OK
;
151 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
152 goto exit_fcp_prio_cfg
;
157 case QLFC_FCP_PRIO_GET_CONFIG
:
158 len
= bsg_job
->reply_payload
.payload_len
;
159 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
161 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
162 goto exit_fcp_prio_cfg
;
165 bsg_job
->reply
->result
= DID_OK
;
166 bsg_job
->reply
->reply_payload_rcv_len
=
168 bsg_job
->reply_payload
.sg_list
,
169 bsg_job
->reply_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
174 case QLFC_FCP_PRIO_SET_CONFIG
:
175 len
= bsg_job
->request_payload
.payload_len
;
176 if (!len
|| len
> FCP_PRIO_CFG_SIZE
) {
177 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
179 goto exit_fcp_prio_cfg
;
182 if (!ha
->fcp_prio_cfg
) {
183 ha
->fcp_prio_cfg
= vmalloc(FCP_PRIO_CFG_SIZE
);
184 if (!ha
->fcp_prio_cfg
) {
185 qla_printk(KERN_WARNING
, ha
,
186 "Unable to allocate memory "
187 "for fcp prio config data (%x).\n",
189 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
191 goto exit_fcp_prio_cfg
;
195 memset(ha
->fcp_prio_cfg
, 0, FCP_PRIO_CFG_SIZE
);
196 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
197 bsg_job
->request_payload
.sg_cnt
, ha
->fcp_prio_cfg
,
200 /* validate fcp priority data */
201 if (!qla24xx_fcp_prio_cfg_valid(
202 (struct qla_fcp_prio_cfg
*)
203 ha
->fcp_prio_cfg
, 1)) {
204 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
206 /* If buffer was invalidatic int
207 * fcp_prio_cfg is of no use
209 vfree(ha
->fcp_prio_cfg
);
210 ha
->fcp_prio_cfg
= NULL
;
211 goto exit_fcp_prio_cfg
;
214 ha
->flags
.fcp_prio_enabled
= 0;
215 if (ha
->fcp_prio_cfg
->attributes
& FCP_PRIO_ATTR_ENABLE
)
216 ha
->flags
.fcp_prio_enabled
= 1;
217 qla24xx_update_all_fcp_prio(vha
);
218 bsg_job
->reply
->result
= DID_OK
;
225 bsg_job
->job_done(bsg_job
);
229 qla2x00_process_els(struct fc_bsg_job
*bsg_job
)
231 struct fc_rport
*rport
;
232 fc_port_t
*fcport
= NULL
;
233 struct Scsi_Host
*host
;
234 scsi_qla_host_t
*vha
;
235 struct qla_hw_data
*ha
;
238 int req_sg_cnt
, rsp_sg_cnt
;
239 int rval
= (DRIVER_ERROR
<< 16);
240 uint16_t nextlid
= 0;
243 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
244 rport
= bsg_job
->rport
;
245 fcport
= *(fc_port_t
**) rport
->dd_data
;
246 host
= rport_to_shost(rport
);
247 vha
= shost_priv(host
);
249 type
= "FC_BSG_RPT_ELS";
251 host
= bsg_job
->shost
;
252 vha
= shost_priv(host
);
254 type
= "FC_BSG_HST_ELS_NOLOGIN";
257 /* pass through is supported only for ISP 4Gb or higher */
258 if (!IS_FWI2_CAPABLE(ha
)) {
259 DEBUG2(qla_printk(KERN_INFO
, ha
,
260 "scsi(%ld):ELS passthru not supported for ISP23xx based "
261 "adapters\n", vha
->host_no
));
266 /* Multiple SG's are not supported for ELS requests */
267 if (bsg_job
->request_payload
.sg_cnt
> 1 ||
268 bsg_job
->reply_payload
.sg_cnt
> 1) {
269 DEBUG2(printk(KERN_INFO
270 "multiple SG's are not supported for ELS requests"
271 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
272 bsg_job
->request_payload
.sg_cnt
,
273 bsg_job
->reply_payload
.sg_cnt
));
278 /* ELS request for rport */
279 if (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
) {
280 /* make sure the rport is logged in,
281 * if not perform fabric login
283 if (qla2x00_fabric_login(vha
, fcport
, &nextlid
)) {
284 DEBUG2(qla_printk(KERN_WARNING
, ha
,
285 "failed to login port %06X for ELS passthru\n",
291 /* Allocate a dummy fcport structure, since functions
292 * preparing the IOCB and mailbox command retrieves port
293 * specific information from fcport structure. For Host based
294 * ELS commands there will be no fcport structure allocated
296 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
302 /* Initialize all required fields of fcport */
304 fcport
->vp_idx
= vha
->vp_idx
;
305 fcport
->d_id
.b
.al_pa
=
306 bsg_job
->request
->rqst_data
.h_els
.port_id
[0];
307 fcport
->d_id
.b
.area
=
308 bsg_job
->request
->rqst_data
.h_els
.port_id
[1];
309 fcport
->d_id
.b
.domain
=
310 bsg_job
->request
->rqst_data
.h_els
.port_id
[2];
312 (fcport
->d_id
.b
.al_pa
== 0xFD) ?
313 NPH_FABRIC_CONTROLLER
: NPH_F_PORT
;
316 if (!vha
->flags
.online
) {
317 DEBUG2(qla_printk(KERN_WARNING
, ha
,
318 "host not online\n"));
324 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
325 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
328 goto done_free_fcport
;
331 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
332 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
335 goto done_free_fcport
;
338 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
339 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
340 DEBUG2(printk(KERN_INFO
341 "dma mapping resulted in different sg counts \
342 [request_sg_cnt: %x dma_request_sg_cnt: %x\
343 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
344 bsg_job
->request_payload
.sg_cnt
, req_sg_cnt
,
345 bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
));
350 /* Alloc SRB structure */
351 sp
= qla2x00_get_ctx_bsg_sp(vha
, fcport
, sizeof(struct srb_ctx
));
359 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
360 SRB_ELS_CMD_RPT
: SRB_ELS_CMD_HST
);
362 (bsg_job
->request
->msgcode
== FC_BSG_RPT_ELS
?
363 "bsg_els_rpt" : "bsg_els_hst");
364 els
->u
.bsg_job
= bsg_job
;
366 DEBUG2(qla_printk(KERN_INFO
, ha
,
367 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
368 "portid=%02x%02x%02x.\n", vha
->host_no
, sp
->handle
, type
,
369 bsg_job
->request
->rqst_data
.h_els
.command_code
,
370 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
371 fcport
->d_id
.b
.al_pa
));
373 rval
= qla2x00_start_sp(sp
);
374 if (rval
!= QLA_SUCCESS
) {
376 mempool_free(sp
, ha
->srb_mempool
);
383 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
384 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
385 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
386 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
387 goto done_free_fcport
;
390 if (bsg_job
->request
->msgcode
== FC_BSG_HST_ELS_NOLOGIN
)
397 qla2x00_process_ct(struct fc_bsg_job
*bsg_job
)
400 struct Scsi_Host
*host
= bsg_job
->shost
;
401 scsi_qla_host_t
*vha
= shost_priv(host
);
402 struct qla_hw_data
*ha
= vha
->hw
;
403 int rval
= (DRIVER_ERROR
<< 16);
404 int req_sg_cnt
, rsp_sg_cnt
;
406 struct fc_port
*fcport
;
407 char *type
= "FC_BSG_HST_CT";
411 dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
412 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
418 rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
419 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
425 if ((req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
426 (rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
427 DEBUG2(qla_printk(KERN_WARNING
, ha
,
428 "[request_sg_cnt: %x dma_request_sg_cnt: %x\
429 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
430 bsg_job
->request_payload
.sg_cnt
, req_sg_cnt
,
431 bsg_job
->reply_payload
.sg_cnt
, rsp_sg_cnt
));
436 if (!vha
->flags
.online
) {
437 DEBUG2(qla_printk(KERN_WARNING
, ha
,
438 "host not online\n"));
444 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word1
& 0xFF000000)
448 loop_id
= cpu_to_le16(NPH_SNS
);
451 loop_id
= vha
->mgmt_svr_loop_id
;
454 DEBUG2(qla_printk(KERN_INFO
, ha
,
455 "Unknown loop id: %x\n", loop_id
));
460 /* Allocate a dummy fcport structure, since functions preparing the
461 * IOCB and mailbox command retrieves port specific information
462 * from fcport structure. For Host based ELS commands there will be
463 * no fcport structure allocated
465 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
471 /* Initialize all required fields of fcport */
473 fcport
->vp_idx
= vha
->vp_idx
;
474 fcport
->d_id
.b
.al_pa
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[0];
475 fcport
->d_id
.b
.area
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[1];
476 fcport
->d_id
.b
.domain
= bsg_job
->request
->rqst_data
.h_ct
.port_id
[2];
477 fcport
->loop_id
= loop_id
;
479 /* Alloc SRB structure */
480 sp
= qla2x00_get_ctx_bsg_sp(vha
, fcport
, sizeof(struct srb_ctx
));
483 goto done_free_fcport
;
487 ct
->type
= SRB_CT_CMD
;
489 ct
->u
.bsg_job
= bsg_job
;
491 DEBUG2(qla_printk(KERN_INFO
, ha
,
492 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
493 "portid=%02x%02x%02x.\n", vha
->host_no
, sp
->handle
, type
,
494 (bsg_job
->request
->rqst_data
.h_ct
.preamble_word2
>> 16),
495 fcport
->loop_id
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
496 fcport
->d_id
.b
.al_pa
));
498 rval
= qla2x00_start_sp(sp
);
499 if (rval
!= QLA_SUCCESS
) {
501 mempool_free(sp
, ha
->srb_mempool
);
503 goto done_free_fcport
;
510 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
511 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
512 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
513 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
518 /* Set the port configuration to enable the
519 * internal loopback on ISP81XX
522 qla81xx_set_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
523 uint16_t *new_config
)
527 struct qla_hw_data
*ha
= vha
->hw
;
530 goto done_set_internal
;
532 new_config
[0] = config
[0] | (ENABLE_INTERNAL_LOOPBACK
<< 1);
533 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
535 ha
->notify_dcbx_comp
= 1;
536 ret
= qla81xx_set_port_config(vha
, new_config
);
537 if (ret
!= QLA_SUCCESS
) {
538 DEBUG2(printk(KERN_ERR
539 "%s(%lu): Set port config failed\n",
540 __func__
, vha
->host_no
));
541 ha
->notify_dcbx_comp
= 0;
543 goto done_set_internal
;
546 /* Wait for DCBX complete event */
547 if (!wait_for_completion_timeout(&ha
->dcbx_comp
, (20 * HZ
))) {
548 DEBUG2(qla_printk(KERN_WARNING
, ha
,
549 "State change notificaition not received.\n"));
551 DEBUG2(qla_printk(KERN_INFO
, ha
,
552 "State change RECEIVED\n"));
554 ha
->notify_dcbx_comp
= 0;
560 /* Set the port configuration to disable the
561 * internal loopback on ISP81XX
564 qla81xx_reset_internal_loopback(scsi_qla_host_t
*vha
, uint16_t *config
,
569 uint16_t new_config
[4];
570 struct qla_hw_data
*ha
= vha
->hw
;
573 goto done_reset_internal
;
575 memset(new_config
, 0 , sizeof(new_config
));
576 if ((config
[0] & INTERNAL_LOOPBACK_MASK
) >> 1 ==
577 ENABLE_INTERNAL_LOOPBACK
) {
578 new_config
[0] = config
[0] & ~INTERNAL_LOOPBACK_MASK
;
579 memcpy(&new_config
[1], &config
[1], sizeof(uint16_t) * 3) ;
581 ha
->notify_dcbx_comp
= wait
;
582 ret
= qla81xx_set_port_config(vha
, new_config
);
583 if (ret
!= QLA_SUCCESS
) {
584 DEBUG2(printk(KERN_ERR
585 "%s(%lu): Set port config failed\n",
586 __func__
, vha
->host_no
));
587 ha
->notify_dcbx_comp
= 0;
589 goto done_reset_internal
;
592 /* Wait for DCBX complete event */
593 if (wait
&& !wait_for_completion_timeout(&ha
->dcbx_comp
,
595 DEBUG2(qla_printk(KERN_WARNING
, ha
,
596 "State change notificaition not received.\n"));
597 ha
->notify_dcbx_comp
= 0;
599 goto done_reset_internal
;
601 DEBUG2(qla_printk(KERN_INFO
, ha
,
602 "State change RECEIVED\n"));
604 ha
->notify_dcbx_comp
= 0;
611 qla2x00_process_loopback(struct fc_bsg_job
*bsg_job
)
613 struct Scsi_Host
*host
= bsg_job
->shost
;
614 scsi_qla_host_t
*vha
= shost_priv(host
);
615 struct qla_hw_data
*ha
= vha
->hw
;
617 uint8_t command_sent
;
619 struct msg_echo_lb elreq
;
620 uint16_t response
[MAILBOX_REGISTER_COUNT
];
621 uint16_t config
[4], new_config
[4];
623 uint8_t *req_data
= NULL
;
624 dma_addr_t req_data_dma
;
625 uint32_t req_data_len
;
626 uint8_t *rsp_data
= NULL
;
627 dma_addr_t rsp_data_dma
;
628 uint32_t rsp_data_len
;
630 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
631 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
632 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
))
635 if (!vha
->flags
.online
) {
636 DEBUG2(qla_printk(KERN_WARNING
, ha
, "host not online\n"));
640 elreq
.req_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
641 bsg_job
->request_payload
.sg_list
, bsg_job
->request_payload
.sg_cnt
,
644 if (!elreq
.req_sg_cnt
)
647 elreq
.rsp_sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
648 bsg_job
->reply_payload
.sg_list
, bsg_job
->reply_payload
.sg_cnt
,
651 if (!elreq
.rsp_sg_cnt
) {
653 goto done_unmap_req_sg
;
656 if ((elreq
.req_sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) ||
657 (elreq
.rsp_sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
)) {
658 DEBUG2(printk(KERN_INFO
659 "dma mapping resulted in different sg counts "
660 "[request_sg_cnt: %x dma_request_sg_cnt: %x "
661 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
662 bsg_job
->request_payload
.sg_cnt
, elreq
.req_sg_cnt
,
663 bsg_job
->reply_payload
.sg_cnt
, elreq
.rsp_sg_cnt
));
667 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
668 req_data
= dma_alloc_coherent(&ha
->pdev
->dev
, req_data_len
,
669 &req_data_dma
, GFP_KERNEL
);
671 DEBUG2(printk(KERN_ERR
"%s: dma alloc for req_data "
672 "failed for host=%lu\n", __func__
, vha
->host_no
));
677 rsp_data
= dma_alloc_coherent(&ha
->pdev
->dev
, rsp_data_len
,
678 &rsp_data_dma
, GFP_KERNEL
);
680 DEBUG2(printk(KERN_ERR
"%s: dma alloc for rsp_data "
681 "failed for host=%lu\n", __func__
, vha
->host_no
));
683 goto done_free_dma_req
;
686 /* Copy the request buffer in req_data now */
687 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
688 bsg_job
->request_payload
.sg_cnt
, req_data
, req_data_len
);
690 elreq
.send_dma
= req_data_dma
;
691 elreq
.rcv_dma
= rsp_data_dma
;
692 elreq
.transfer_size
= req_data_len
;
694 elreq
.options
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
696 if ((ha
->current_topology
== ISP_CFG_F
||
698 le32_to_cpu(*(uint32_t *)req_data
) == ELS_OPCODE_BYTE
699 && req_data_len
== MAX_ELS_FRAME_PAYLOAD
)) &&
700 elreq
.options
== EXTERNAL_LOOPBACK
) {
701 type
= "FC_BSG_HST_VENDOR_ECHO_DIAG";
702 DEBUG2(qla_printk(KERN_INFO
, ha
,
703 "scsi(%ld) bsg rqst type: %s\n", vha
->host_no
, type
));
704 command_sent
= INT_DEF_LB_ECHO_CMD
;
705 rval
= qla2x00_echo_test(vha
, &elreq
, response
);
707 if (IS_QLA81XX(ha
)) {
708 memset(config
, 0, sizeof(config
));
709 memset(new_config
, 0, sizeof(new_config
));
710 if (qla81xx_get_port_config(vha
, config
)) {
711 DEBUG2(printk(KERN_ERR
712 "%s(%lu): Get port config failed\n",
713 __func__
, vha
->host_no
));
714 bsg_job
->reply
->reply_payload_rcv_len
= 0;
715 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
717 goto done_free_dma_req
;
720 if (elreq
.options
!= EXTERNAL_LOOPBACK
) {
721 DEBUG2(qla_printk(KERN_INFO
, ha
,
722 "Internal: current port config = %x\n",
724 if (qla81xx_set_internal_loopback(vha
, config
,
726 bsg_job
->reply
->reply_payload_rcv_len
=
728 bsg_job
->reply
->result
=
731 goto done_free_dma_req
;
734 /* For external loopback to work
735 * ensure internal loopback is disabled
737 if (qla81xx_reset_internal_loopback(vha
,
739 bsg_job
->reply
->reply_payload_rcv_len
=
741 bsg_job
->reply
->result
=
744 goto done_free_dma_req
;
748 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
749 DEBUG2(qla_printk(KERN_INFO
, ha
,
750 "scsi(%ld) bsg rqst type: %s\n",
751 vha
->host_no
, type
));
753 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
754 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
757 /* Revert back to original port config
758 * Also clear internal loopback
760 qla81xx_reset_internal_loopback(vha
,
764 if (response
[0] == MBS_COMMAND_ERROR
&&
765 response
[1] == MBS_LB_RESET
) {
766 DEBUG2(printk(KERN_ERR
"%s(%ld): ABORTing "
767 "ISP\n", __func__
, vha
->host_no
));
768 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
769 qla2xxx_wake_dpc(vha
);
770 qla2x00_wait_for_chip_reset(vha
);
771 /* Also reset the MPI */
772 if (qla81xx_restart_mpi_firmware(vha
) !=
774 qla_printk(KERN_INFO
, ha
,
775 "MPI reset failed for host%ld.\n",
779 bsg_job
->reply
->reply_payload_rcv_len
= 0;
780 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
782 goto done_free_dma_req
;
785 type
= "FC_BSG_HST_VENDOR_LOOPBACK";
786 DEBUG2(qla_printk(KERN_INFO
, ha
,
787 "scsi(%ld) bsg rqst type: %s\n",
788 vha
->host_no
, type
));
789 command_sent
= INT_DEF_LB_LOOPBACK_CMD
;
790 rval
= qla2x00_loopback_test(vha
, &elreq
, response
);
795 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
796 "request %s failed\n", vha
->host_no
, type
));
798 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
799 sizeof(struct fc_bsg_reply
);
801 memcpy(fw_sts_ptr
, response
, sizeof(response
));
802 fw_sts_ptr
+= sizeof(response
);
803 *fw_sts_ptr
= command_sent
;
805 bsg_job
->reply
->reply_payload_rcv_len
= 0;
806 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
808 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
809 "request %s completed\n", vha
->host_no
, type
));
811 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
812 sizeof(response
) + sizeof(uint8_t);
813 bsg_job
->reply
->reply_payload_rcv_len
=
814 bsg_job
->reply_payload
.payload_len
;
815 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
816 sizeof(struct fc_bsg_reply
);
817 memcpy(fw_sts_ptr
, response
, sizeof(response
));
818 fw_sts_ptr
+= sizeof(response
);
819 *fw_sts_ptr
= command_sent
;
820 bsg_job
->reply
->result
= DID_OK
;
821 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
822 bsg_job
->reply_payload
.sg_cnt
, rsp_data
,
825 bsg_job
->job_done(bsg_job
);
827 dma_free_coherent(&ha
->pdev
->dev
, rsp_data_len
,
828 rsp_data
, rsp_data_dma
);
830 dma_free_coherent(&ha
->pdev
->dev
, req_data_len
,
831 req_data
, req_data_dma
);
833 dma_unmap_sg(&ha
->pdev
->dev
,
834 bsg_job
->reply_payload
.sg_list
,
835 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
837 dma_unmap_sg(&ha
->pdev
->dev
,
838 bsg_job
->request_payload
.sg_list
,
839 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
844 qla84xx_reset(struct fc_bsg_job
*bsg_job
)
846 struct Scsi_Host
*host
= bsg_job
->shost
;
847 scsi_qla_host_t
*vha
= shost_priv(host
);
848 struct qla_hw_data
*ha
= vha
->hw
;
852 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
853 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
854 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
))
857 if (!IS_QLA84XX(ha
)) {
858 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld): Not 84xx, "
859 "exiting.\n", vha
->host_no
));
863 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
865 rval
= qla84xx_reset_chip(vha
, flag
== A84_ISSUE_RESET_DIAG_FW
);
868 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
869 "request 84xx reset failed\n", vha
->host_no
));
870 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
871 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
874 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
875 "request 84xx reset completed\n", vha
->host_no
));
876 bsg_job
->reply
->result
= DID_OK
;
879 bsg_job
->job_done(bsg_job
);
884 qla84xx_updatefw(struct fc_bsg_job
*bsg_job
)
886 struct Scsi_Host
*host
= bsg_job
->shost
;
887 scsi_qla_host_t
*vha
= shost_priv(host
);
888 struct qla_hw_data
*ha
= vha
->hw
;
889 struct verify_chip_entry_84xx
*mn
= NULL
;
890 dma_addr_t mn_dma
, fw_dma
;
899 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
900 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
901 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
))
904 if (!IS_QLA84XX(ha
)) {
905 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld): Not 84xx, "
906 "exiting.\n", vha
->host_no
));
910 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
911 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
915 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
916 DEBUG2(printk(KERN_INFO
917 "dma mapping resulted in different sg counts "
918 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
919 bsg_job
->request_payload
.sg_cnt
, sg_cnt
));
924 data_len
= bsg_job
->request_payload
.payload_len
;
925 fw_buf
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
926 &fw_dma
, GFP_KERNEL
);
928 DEBUG2(printk(KERN_ERR
"%s: dma alloc for fw_buf "
929 "failed for host=%lu\n", __func__
, vha
->host_no
));
934 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
935 bsg_job
->request_payload
.sg_cnt
, fw_buf
, data_len
);
937 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
939 DEBUG2(printk(KERN_ERR
"%s: dma alloc for fw buffer "
940 "failed for host=%lu\n", __func__
, vha
->host_no
));
942 goto done_free_fw_buf
;
945 flag
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
946 fw_ver
= le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf
+ 2)));
948 memset(mn
, 0, sizeof(struct access_chip_84xx
));
949 mn
->entry_type
= VERIFY_CHIP_IOCB_TYPE
;
952 options
= VCO_FORCE_UPDATE
| VCO_END_OF_DATA
;
953 if (flag
== A84_ISSUE_UPDATE_DIAGFW_CMD
)
954 options
|= VCO_DIAG_FW
;
956 mn
->options
= cpu_to_le16(options
);
957 mn
->fw_ver
= cpu_to_le32(fw_ver
);
958 mn
->fw_size
= cpu_to_le32(data_len
);
959 mn
->fw_seq_size
= cpu_to_le32(data_len
);
960 mn
->dseg_address
[0] = cpu_to_le32(LSD(fw_dma
));
961 mn
->dseg_address
[1] = cpu_to_le32(MSD(fw_dma
));
962 mn
->dseg_length
= cpu_to_le32(data_len
);
963 mn
->data_seg_cnt
= cpu_to_le16(1);
965 rval
= qla2x00_issue_iocb_timeout(vha
, mn
, mn_dma
, 0, 120);
968 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
969 "request 84xx updatefw failed\n", vha
->host_no
));
971 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
972 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
975 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
976 "request 84xx updatefw completed\n", vha
->host_no
));
978 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
979 bsg_job
->reply
->result
= DID_OK
;
982 bsg_job
->job_done(bsg_job
);
983 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
986 dma_free_coherent(&ha
->pdev
->dev
, data_len
, fw_buf
, fw_dma
);
989 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
990 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
996 qla84xx_mgmt_cmd(struct fc_bsg_job
*bsg_job
)
998 struct Scsi_Host
*host
= bsg_job
->shost
;
999 scsi_qla_host_t
*vha
= shost_priv(host
);
1000 struct qla_hw_data
*ha
= vha
->hw
;
1001 struct access_chip_84xx
*mn
= NULL
;
1002 dma_addr_t mn_dma
, mgmt_dma
;
1003 void *mgmt_b
= NULL
;
1005 struct qla_bsg_a84_mgmt
*ql84_mgmt
;
1007 uint32_t data_len
= 0;
1008 uint32_t dma_direction
= DMA_NONE
;
1010 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1011 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1012 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
))
1015 if (!IS_QLA84XX(ha
)) {
1016 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld): Not 84xx, "
1017 "exiting.\n", vha
->host_no
));
1021 ql84_mgmt
= (struct qla_bsg_a84_mgmt
*)((char *)bsg_job
->request
+
1022 sizeof(struct fc_bsg_request
));
1024 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
1025 __func__
, vha
->host_no
));
1029 mn
= dma_pool_alloc(ha
->s_dma_pool
, GFP_KERNEL
, &mn_dma
);
1031 DEBUG2(printk(KERN_ERR
"%s: dma alloc for fw buffer "
1032 "failed for host=%lu\n", __func__
, vha
->host_no
));
1036 memset(mn
, 0, sizeof(struct access_chip_84xx
));
1037 mn
->entry_type
= ACCESS_CHIP_IOCB_TYPE
;
1038 mn
->entry_count
= 1;
1040 switch (ql84_mgmt
->mgmt
.cmd
) {
1041 case QLA84_MGMT_READ_MEM
:
1042 case QLA84_MGMT_GET_INFO
:
1043 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1044 bsg_job
->reply_payload
.sg_list
,
1045 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1051 dma_direction
= DMA_FROM_DEVICE
;
1053 if (sg_cnt
!= bsg_job
->reply_payload
.sg_cnt
) {
1054 DEBUG2(printk(KERN_INFO
1055 "dma mapping resulted in different sg counts "
1056 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
1057 bsg_job
->reply_payload
.sg_cnt
, sg_cnt
));
1062 data_len
= bsg_job
->reply_payload
.payload_len
;
1064 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1065 &mgmt_dma
, GFP_KERNEL
);
1067 DEBUG2(printk(KERN_ERR
"%s: dma alloc for mgmt_b "
1068 "failed for host=%lu\n",
1069 __func__
, vha
->host_no
));
1074 if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) {
1075 mn
->options
= cpu_to_le16(ACO_DUMP_MEMORY
);
1078 ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1080 } else if (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
) {
1081 mn
->options
= cpu_to_le16(ACO_REQUEST_INFO
);
1083 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.info
.type
);
1087 ql84_mgmt
->mgmt
.mgmtp
.u
.info
.context
);
1091 case QLA84_MGMT_WRITE_MEM
:
1092 sg_cnt
= dma_map_sg(&ha
->pdev
->dev
,
1093 bsg_job
->request_payload
.sg_list
,
1094 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1101 dma_direction
= DMA_TO_DEVICE
;
1103 if (sg_cnt
!= bsg_job
->request_payload
.sg_cnt
) {
1104 DEBUG2(printk(KERN_INFO
1105 "dma mapping resulted in different sg counts "
1106 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
1107 bsg_job
->request_payload
.sg_cnt
, sg_cnt
));
1112 data_len
= bsg_job
->request_payload
.payload_len
;
1113 mgmt_b
= dma_alloc_coherent(&ha
->pdev
->dev
, data_len
,
1114 &mgmt_dma
, GFP_KERNEL
);
1116 DEBUG2(printk(KERN_ERR
"%s: dma alloc for mgmt_b "
1117 "failed for host=%lu\n",
1118 __func__
, vha
->host_no
));
1123 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1124 bsg_job
->request_payload
.sg_cnt
, mgmt_b
, data_len
);
1126 mn
->options
= cpu_to_le16(ACO_LOAD_MEMORY
);
1128 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.mem
.start_addr
);
1131 case QLA84_MGMT_CHNG_CONFIG
:
1132 mn
->options
= cpu_to_le16(ACO_CHANGE_CONFIG_PARAM
);
1134 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.id
);
1137 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param0
);
1140 cpu_to_le32(ql84_mgmt
->mgmt
.mgmtp
.u
.config
.param1
);
1148 if (ql84_mgmt
->mgmt
.cmd
!= QLA84_MGMT_CHNG_CONFIG
) {
1149 mn
->total_byte_cnt
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1150 mn
->dseg_count
= cpu_to_le16(1);
1151 mn
->dseg_address
[0] = cpu_to_le32(LSD(mgmt_dma
));
1152 mn
->dseg_address
[1] = cpu_to_le32(MSD(mgmt_dma
));
1153 mn
->dseg_length
= cpu_to_le32(ql84_mgmt
->mgmt
.len
);
1156 rval
= qla2x00_issue_iocb(vha
, mn
, mn_dma
, 0);
1159 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
1160 "request 84xx mgmt failed\n", vha
->host_no
));
1162 rval
= bsg_job
->reply
->reply_payload_rcv_len
= 0;
1163 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1166 DEBUG2(qla_printk(KERN_WARNING
, ha
, "scsi(%ld) Vendor "
1167 "request 84xx mgmt completed\n", vha
->host_no
));
1169 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1170 bsg_job
->reply
->result
= DID_OK
;
1172 if ((ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_READ_MEM
) ||
1173 (ql84_mgmt
->mgmt
.cmd
== QLA84_MGMT_GET_INFO
)) {
1174 bsg_job
->reply
->reply_payload_rcv_len
=
1175 bsg_job
->reply_payload
.payload_len
;
1177 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1178 bsg_job
->reply_payload
.sg_cnt
, mgmt_b
,
1183 bsg_job
->job_done(bsg_job
);
1187 dma_free_coherent(&ha
->pdev
->dev
, data_len
, mgmt_b
, mgmt_dma
);
1189 if (dma_direction
== DMA_TO_DEVICE
)
1190 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1191 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1192 else if (dma_direction
== DMA_FROM_DEVICE
)
1193 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1194 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1197 dma_pool_free(ha
->s_dma_pool
, mn
, mn_dma
);
1203 qla24xx_iidma(struct fc_bsg_job
*bsg_job
)
1205 struct Scsi_Host
*host
= bsg_job
->shost
;
1206 scsi_qla_host_t
*vha
= shost_priv(host
);
1207 struct qla_hw_data
*ha
= vha
->hw
;
1209 struct qla_port_param
*port_param
= NULL
;
1210 fc_port_t
*fcport
= NULL
;
1211 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
1212 uint8_t *rsp_ptr
= NULL
;
1214 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1216 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
1217 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) ||
1218 test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
))
1221 if (!IS_IIDMA_CAPABLE(vha
->hw
)) {
1222 DEBUG2(qla_printk(KERN_WARNING
, ha
, "%s(%lu): iiDMA not "
1223 "supported\n", __func__
, vha
->host_no
));
1227 port_param
= (struct qla_port_param
*)((char *)bsg_job
->request
+
1228 sizeof(struct fc_bsg_request
));
1230 DEBUG2(printk("%s(%ld): port_param header not provided, "
1231 "exiting.\n", __func__
, vha
->host_no
));
1235 if (port_param
->fc_scsi_addr
.dest_type
!= EXT_DEF_TYPE_WWPN
) {
1236 DEBUG2(printk(KERN_ERR
"%s(%ld): Invalid destination type\n",
1237 __func__
, vha
->host_no
));
1241 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1242 if (fcport
->port_type
!= FCT_TARGET
)
1245 if (memcmp(port_param
->fc_scsi_addr
.dest_addr
.wwpn
,
1246 fcport
->port_name
, sizeof(fcport
->port_name
)))
1252 DEBUG2(printk(KERN_ERR
"%s(%ld): Failed to find port\n",
1253 __func__
, vha
->host_no
));
1257 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1258 DEBUG2(printk(KERN_ERR
"%s(%ld): Port not online\n",
1259 __func__
, vha
->host_no
));
1263 if (fcport
->flags
& FCF_LOGIN_NEEDED
) {
1264 DEBUG2(printk(KERN_ERR
"%s(%ld): Remote port not logged in, "
1266 __func__
, vha
->host_no
, fcport
->flags
));
1270 if (port_param
->mode
)
1271 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
,
1272 port_param
->speed
, mb
);
1274 rval
= qla2x00_get_idma_speed(vha
, fcport
->loop_id
,
1275 &port_param
->speed
, mb
);
1278 DEBUG16(printk(KERN_ERR
"scsi(%ld): iIDMA cmd failed for "
1279 "%02x%02x%02x%02x%02x%02x%02x%02x -- "
1280 "%04x %x %04x %04x.\n",
1281 vha
->host_no
, fcport
->port_name
[0],
1282 fcport
->port_name
[1],
1283 fcport
->port_name
[2], fcport
->port_name
[3],
1284 fcport
->port_name
[4], fcport
->port_name
[5],
1285 fcport
->port_name
[6], fcport
->port_name
[7], rval
,
1286 fcport
->fp_speed
, mb
[0], mb
[1]));
1288 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1291 if (!port_param
->mode
) {
1292 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
1293 sizeof(struct qla_port_param
);
1295 rsp_ptr
= ((uint8_t *)bsg_job
->reply
) +
1296 sizeof(struct fc_bsg_reply
);
1298 memcpy(rsp_ptr
, port_param
,
1299 sizeof(struct qla_port_param
));
1302 bsg_job
->reply
->result
= DID_OK
;
1305 bsg_job
->job_done(bsg_job
);
1310 qla2x00_optrom_setup(struct fc_bsg_job
*bsg_job
, struct qla_hw_data
*ha
,
1316 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1318 if (unlikely(pci_channel_offline(ha
->pdev
)))
1321 start
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1322 if (start
> ha
->optrom_size
)
1325 if (ha
->optrom_state
!= QLA_SWAITING
)
1328 ha
->optrom_region_start
= start
;
1331 if (ha
->optrom_size
== OPTROM_SIZE_2300
&& start
== 0)
1333 else if (start
== (ha
->flt_region_boot
* 4) ||
1334 start
== (ha
->flt_region_fw
* 4))
1336 else if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
) ||
1337 IS_QLA8XXX_TYPE(ha
))
1340 qla_printk(KERN_WARNING
, ha
,
1341 "Invalid start region 0x%x/0x%x.\n",
1342 start
, bsg_job
->request_payload
.payload_len
);
1346 ha
->optrom_region_size
= start
+
1347 bsg_job
->request_payload
.payload_len
> ha
->optrom_size
?
1348 ha
->optrom_size
- start
:
1349 bsg_job
->request_payload
.payload_len
;
1350 ha
->optrom_state
= QLA_SWRITING
;
1352 ha
->optrom_region_size
= start
+
1353 bsg_job
->reply_payload
.payload_len
> ha
->optrom_size
?
1354 ha
->optrom_size
- start
:
1355 bsg_job
->reply_payload
.payload_len
;
1356 ha
->optrom_state
= QLA_SREADING
;
1359 ha
->optrom_buffer
= vmalloc(ha
->optrom_region_size
);
1360 if (!ha
->optrom_buffer
) {
1361 qla_printk(KERN_WARNING
, ha
,
1362 "Read: Unable to allocate memory for optrom retrieval "
1363 "(%x).\n", ha
->optrom_region_size
);
1365 ha
->optrom_state
= QLA_SWAITING
;
1369 memset(ha
->optrom_buffer
, 0, ha
->optrom_region_size
);
1374 qla2x00_read_optrom(struct fc_bsg_job
*bsg_job
)
1376 struct Scsi_Host
*host
= bsg_job
->shost
;
1377 scsi_qla_host_t
*vha
= shost_priv(host
);
1378 struct qla_hw_data
*ha
= vha
->hw
;
1381 rval
= qla2x00_optrom_setup(bsg_job
, ha
, 0);
1385 ha
->isp_ops
->read_optrom(vha
, ha
->optrom_buffer
,
1386 ha
->optrom_region_start
, ha
->optrom_region_size
);
1388 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1389 bsg_job
->reply_payload
.sg_cnt
, ha
->optrom_buffer
,
1390 ha
->optrom_region_size
);
1392 bsg_job
->reply
->reply_payload_rcv_len
= ha
->optrom_region_size
;
1393 bsg_job
->reply
->result
= DID_OK
;
1394 vfree(ha
->optrom_buffer
);
1395 ha
->optrom_buffer
= NULL
;
1396 ha
->optrom_state
= QLA_SWAITING
;
1397 bsg_job
->job_done(bsg_job
);
1402 qla2x00_update_optrom(struct fc_bsg_job
*bsg_job
)
1404 struct Scsi_Host
*host
= bsg_job
->shost
;
1405 scsi_qla_host_t
*vha
= shost_priv(host
);
1406 struct qla_hw_data
*ha
= vha
->hw
;
1409 rval
= qla2x00_optrom_setup(bsg_job
, ha
, 1);
1413 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1414 bsg_job
->request_payload
.sg_cnt
, ha
->optrom_buffer
,
1415 ha
->optrom_region_size
);
1417 ha
->isp_ops
->write_optrom(vha
, ha
->optrom_buffer
,
1418 ha
->optrom_region_start
, ha
->optrom_region_size
);
1420 bsg_job
->reply
->result
= DID_OK
;
1421 vfree(ha
->optrom_buffer
);
1422 ha
->optrom_buffer
= NULL
;
1423 ha
->optrom_state
= QLA_SWAITING
;
1424 bsg_job
->job_done(bsg_job
);
1429 qla2x00_process_vendor_specific(struct fc_bsg_job
*bsg_job
)
1431 switch (bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0]) {
1432 case QL_VND_LOOPBACK
:
1433 return qla2x00_process_loopback(bsg_job
);
1435 case QL_VND_A84_RESET
:
1436 return qla84xx_reset(bsg_job
);
1438 case QL_VND_A84_UPDATE_FW
:
1439 return qla84xx_updatefw(bsg_job
);
1441 case QL_VND_A84_MGMT_CMD
:
1442 return qla84xx_mgmt_cmd(bsg_job
);
1445 return qla24xx_iidma(bsg_job
);
1447 case QL_VND_FCP_PRIO_CFG_CMD
:
1448 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job
);
1450 case QL_VND_READ_FLASH
:
1451 return qla2x00_read_optrom(bsg_job
);
1453 case QL_VND_UPDATE_FLASH
:
1454 return qla2x00_update_optrom(bsg_job
);
1457 bsg_job
->reply
->result
= (DID_ERROR
<< 16);
1458 bsg_job
->job_done(bsg_job
);
1464 qla24xx_bsg_request(struct fc_bsg_job
*bsg_job
)
1468 switch (bsg_job
->request
->msgcode
) {
1469 case FC_BSG_RPT_ELS
:
1470 case FC_BSG_HST_ELS_NOLOGIN
:
1471 ret
= qla2x00_process_els(bsg_job
);
1474 ret
= qla2x00_process_ct(bsg_job
);
1476 case FC_BSG_HST_VENDOR
:
1477 ret
= qla2x00_process_vendor_specific(bsg_job
);
1479 case FC_BSG_HST_ADD_RPORT
:
1480 case FC_BSG_HST_DEL_RPORT
:
1483 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
1490 qla24xx_bsg_timeout(struct fc_bsg_job
*bsg_job
)
1492 scsi_qla_host_t
*vha
= shost_priv(bsg_job
->shost
);
1493 struct qla_hw_data
*ha
= vha
->hw
;
1496 unsigned long flags
;
1497 struct req_que
*req
;
1498 struct srb_ctx
*sp_bsg
;
1500 /* find the bsg job from the active list of commands */
1501 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1502 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
1503 req
= ha
->req_q_map
[que
];
1507 for (cnt
= 1; cnt
< MAX_OUTSTANDING_COMMANDS
; cnt
++) {
1508 sp
= req
->outstanding_cmds
[cnt
];
1512 if (((sp_bsg
->type
== SRB_CT_CMD
) ||
1513 (sp_bsg
->type
== SRB_ELS_CMD_HST
))
1514 && (sp_bsg
->u
.bsg_job
== bsg_job
)) {
1515 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1516 if (ha
->isp_ops
->abort_command(sp
)) {
1517 DEBUG2(qla_printk(KERN_INFO
, ha
,
1519 "abort_command failed\n",
1521 bsg_job
->req
->errors
=
1522 bsg_job
->reply
->result
= -EIO
;
1524 DEBUG2(qla_printk(KERN_INFO
, ha
,
1526 "abort_command success\n",
1528 bsg_job
->req
->errors
=
1529 bsg_job
->reply
->result
= 0;
1531 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1537 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1538 DEBUG2(qla_printk(KERN_INFO
, ha
,
1539 "scsi(%ld) SRB not found to abort\n", vha
->host_no
));
1540 bsg_job
->req
->errors
= bsg_job
->reply
->result
= -ENXIO
;
1544 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1545 if (bsg_job
->request
->msgcode
== FC_BSG_HST_CT
)
1548 mempool_free(sp
, ha
->srb_mempool
);