2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/moduleparam.h>
11 #include <linux/vmalloc.h>
12 #include <linux/list.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsicam.h>
16 #include <linux/delay.h>
19 qla2x00_vp_stop_timer(scsi_qla_host_t
*vha
)
21 if (vha
->vp_idx
&& vha
->timer_active
) {
22 del_timer_sync(&vha
->timer
);
23 vha
->timer_active
= 0;
28 qla24xx_allocate_vp_id(scsi_qla_host_t
*vha
)
31 struct qla_hw_data
*ha
= vha
->hw
;
33 /* Find an empty slot and assign an vp_id */
34 mutex_lock(&ha
->vport_lock
);
35 vp_id
= find_first_zero_bit(ha
->vp_idx_map
, ha
->max_npiv_vports
+ 1);
36 if (vp_id
> ha
->max_npiv_vports
) {
37 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
38 vp_id
, ha
->max_npiv_vports
));
39 mutex_unlock(&ha
->vport_lock
);
43 set_bit(vp_id
, ha
->vp_idx_map
);
46 list_add_tail(&vha
->list
, &ha
->vp_list
);
47 mutex_unlock(&ha
->vport_lock
);
52 qla24xx_deallocate_vp_id(scsi_qla_host_t
*vha
)
55 struct qla_hw_data
*ha
= vha
->hw
;
57 mutex_lock(&ha
->vport_lock
);
60 clear_bit(vp_id
, ha
->vp_idx_map
);
62 mutex_unlock(&ha
->vport_lock
);
65 static scsi_qla_host_t
*
66 qla24xx_find_vhost_by_name(struct qla_hw_data
*ha
, uint8_t *port_name
)
69 struct scsi_qla_host
*tvha
;
71 /* Locate matching device in database. */
72 list_for_each_entry_safe(vha
, tvha
, &ha
->vp_list
, list
) {
73 if (!memcmp(port_name
, vha
->port_name
, WWN_SIZE
))
80 * qla2x00_mark_vp_devices_dead
81 * Updates fcport state when device goes offline.
84 * ha = adapter block pointer.
85 * fcport = port structure pointer.
93 qla2x00_mark_vp_devices_dead(scsi_qla_host_t
*vha
)
97 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
98 DEBUG15(printk("scsi(%ld): Marking port dead, "
99 "loop_id=0x%04x :%x\n",
100 vha
->host_no
, fcport
->loop_id
, fcport
->vp_idx
));
102 atomic_set(&fcport
->state
, FCS_DEVICE_DEAD
);
103 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
104 atomic_set(&fcport
->state
, FCS_UNCONFIGURED
);
109 qla24xx_disable_vp(scsi_qla_host_t
*vha
)
113 ret
= qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
114 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
115 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
117 qla2x00_mark_vp_devices_dead(vha
);
118 atomic_set(&vha
->vp_state
, VP_FAILED
);
119 vha
->flags
.management_server_logged_in
= 0;
120 if (ret
== QLA_SUCCESS
) {
121 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_DISABLED
);
123 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
130 qla24xx_enable_vp(scsi_qla_host_t
*vha
)
133 struct qla_hw_data
*ha
= vha
->hw
;
134 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
136 /* Check if physical ha port is Up */
137 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
138 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
) {
139 vha
->vp_err_state
= VP_ERR_PORTDWN
;
140 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_LINKDOWN
);
144 /* Initialize the new vport unless it is a persistent port */
145 mutex_lock(&ha
->vport_lock
);
146 ret
= qla24xx_modify_vp_config(vha
);
147 mutex_unlock(&ha
->vport_lock
);
149 if (ret
!= QLA_SUCCESS
) {
150 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
154 DEBUG15(qla_printk(KERN_INFO
, ha
,
155 "Virtual port with id: %d - Enabled\n", vha
->vp_idx
));
159 DEBUG15(qla_printk(KERN_INFO
, ha
,
160 "Virtual port with id: %d - Disabled\n", vha
->vp_idx
));
165 qla24xx_configure_vp(scsi_qla_host_t
*vha
)
167 struct fc_vport
*fc_vport
;
170 fc_vport
= vha
->fc_vport
;
172 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
173 vha
->host_no
, __func__
));
174 ret
= qla2x00_send_change_request(vha
, 0x3, vha
->vp_idx
);
175 if (ret
!= QLA_SUCCESS
) {
176 DEBUG15(qla_printk(KERN_ERR
, vha
->hw
, "Failed to enable "
177 "receiving of RSCN requests: 0x%x\n", ret
));
180 /* Corresponds to SCR enabled */
181 clear_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
184 vha
->flags
.online
= 1;
185 if (qla24xx_configure_vhba(vha
))
188 atomic_set(&vha
->vp_state
, VP_ACTIVE
);
189 fc_vport_set_state(fc_vport
, FC_VPORT_ACTIVE
);
193 qla2x00_alert_all_vps(struct rsp_que
*rsp
, uint16_t *mb
)
195 scsi_qla_host_t
*vha
, *tvha
;
196 struct qla_hw_data
*ha
= rsp
->hw
;
199 list_for_each_entry_safe(vha
, tvha
, &ha
->vp_list
, list
) {
202 case MBA_LIP_OCCURRED
:
206 case MBA_POINT_TO_POINT
:
207 case MBA_CHG_IN_CONNECTION
:
208 case MBA_PORT_UPDATE
:
209 case MBA_RSCN_UPDATE
:
210 DEBUG15(printk("scsi(%ld)%s: Async_event for"
211 " VP[%d], mb = 0x%x, vha=%p\n",
212 vha
->host_no
, __func__
, i
, *mb
, vha
));
213 qla2x00_async_event(vha
, rsp
, mb
);
222 qla2x00_vp_abort_isp(scsi_qla_host_t
*vha
)
225 * Physical port will do most of the abort and recovery work. We can
226 * just treat it as a loop down
228 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
229 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
230 qla2x00_mark_all_devices_lost(vha
, 0);
232 if (!atomic_read(&vha
->loop_down_timer
))
233 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
237 * To exclusively reset vport, we need to log it out first. Note: this
238 * control_vp can fail if ISP reset is already issued, this is
239 * expected, as the vp would be already logged out due to ISP reset.
241 if (!test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
))
242 qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
244 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
245 vha
->host_no
, vha
->vp_idx
));
246 return qla24xx_enable_vp(vha
);
250 qla2x00_do_dpc_vp(scsi_qla_host_t
*vha
)
252 qla2x00_do_work(vha
);
254 if (test_and_clear_bit(VP_IDX_ACQUIRED
, &vha
->vp_flags
)) {
255 /* VP acquired. complete port configuration */
256 qla24xx_configure_vp(vha
);
260 if (test_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
)) {
261 qla2x00_update_fcports(vha
);
262 clear_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
);
265 if ((test_and_clear_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
)) &&
266 !test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
) &&
267 atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
269 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
271 qla2x00_relogin(vha
);
273 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
277 if (test_and_clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
) &&
278 (!(test_and_set_bit(RESET_ACTIVE
, &vha
->dpc_flags
)))) {
279 clear_bit(RESET_ACTIVE
, &vha
->dpc_flags
);
282 if (test_and_clear_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
283 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
))) {
284 qla2x00_loop_resync(vha
);
285 clear_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
);
293 qla2x00_do_dpc_all_vps(scsi_qla_host_t
*vha
)
296 struct qla_hw_data
*ha
= vha
->hw
;
298 struct scsi_qla_host
*tvp
;
302 if (list_empty(&ha
->vp_list
))
305 clear_bit(VP_DPC_NEEDED
, &vha
->dpc_flags
);
307 if (!(ha
->current_topology
& ISP_CFG_F
))
310 list_for_each_entry_safe(vp
, tvp
, &ha
->vp_list
, list
) {
312 ret
= qla2x00_do_dpc_vp(vp
);
317 qla24xx_vport_create_req_sanity_check(struct fc_vport
*fc_vport
)
319 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
320 struct qla_hw_data
*ha
= base_vha
->hw
;
321 scsi_qla_host_t
*vha
;
322 uint8_t port_name
[WWN_SIZE
];
324 if (fc_vport
->roles
!= FC_PORT_ROLE_FCP_INITIATOR
)
325 return VPCERR_UNSUPPORTED
;
327 /* Check up the F/W and H/W support NPIV */
328 if (!ha
->flags
.npiv_supported
)
329 return VPCERR_UNSUPPORTED
;
331 /* Check up whether npiv supported switch presented */
332 if (!(ha
->switch_cap
& FLOGI_MID_SUPPORT
))
333 return VPCERR_NO_FABRIC_SUPP
;
335 /* Check up unique WWPN */
336 u64_to_wwn(fc_vport
->port_name
, port_name
);
337 if (!memcmp(port_name
, base_vha
->port_name
, WWN_SIZE
))
338 return VPCERR_BAD_WWN
;
339 vha
= qla24xx_find_vhost_by_name(ha
, port_name
);
341 return VPCERR_BAD_WWN
;
343 /* Check up max-npiv-supports */
344 if (ha
->num_vhosts
> ha
->max_npiv_vports
) {
345 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
346 "max_npv_vports %ud.\n", base_vha
->host_no
,
347 ha
->num_vhosts
, ha
->max_npiv_vports
));
348 return VPCERR_UNSUPPORTED
;
354 qla24xx_create_vhost(struct fc_vport
*fc_vport
)
356 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
357 struct qla_hw_data
*ha
= base_vha
->hw
;
358 scsi_qla_host_t
*vha
;
359 struct scsi_host_template
*sht
= &qla2xxx_driver_template
;
360 struct Scsi_Host
*host
;
362 vha
= qla2x00_create_host(sht
, ha
);
364 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
369 fc_vport
->dd_data
= vha
;
371 u64_to_wwn(fc_vport
->node_name
, vha
->node_name
);
372 u64_to_wwn(fc_vport
->port_name
, vha
->port_name
);
374 vha
->fc_vport
= fc_vport
;
375 vha
->device_flags
= 0;
376 vha
->vp_idx
= qla24xx_allocate_vp_id(vha
);
377 if (vha
->vp_idx
> ha
->max_npiv_vports
) {
378 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
380 goto create_vhost_failed
;
382 vha
->mgmt_svr_loop_id
= 10 + vha
->vp_idx
;
387 * To fix the issue of processing a parent's RSCN for the vport before
388 * its SCR is complete.
390 set_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
391 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
392 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
394 qla2x00_start_timer(vha
, qla2x00_timer
, WATCH_INTERVAL
);
396 vha
->req
= base_vha
->req
;
397 host
->can_queue
= base_vha
->req
->length
+ 128;
399 host
->cmd_per_lun
= 3;
400 host
->max_cmd_len
= MAX_CMDSZ
;
401 host
->max_channel
= MAX_BUSES
- 1;
402 host
->max_lun
= MAX_LUNS
;
403 host
->unique_id
= host
->host_no
;
404 host
->max_id
= MAX_TARGETS_2200
;
405 host
->transportt
= qla2xxx_transport_vport_template
;
407 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
410 vha
->flags
.init_done
= 1;
412 mutex_lock(&ha
->vport_lock
);
413 set_bit(vha
->vp_idx
, ha
->vp_idx_map
);
414 ha
->cur_vport_count
++;
415 mutex_unlock(&ha
->vport_lock
);
424 qla25xx_free_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
426 struct qla_hw_data
*ha
= vha
->hw
;
427 uint16_t que_id
= req
->id
;
429 dma_free_coherent(&ha
->pdev
->dev
, (req
->length
+ 1) *
430 sizeof(request_t
), req
->ring
, req
->dma
);
434 ha
->req_q_map
[que_id
] = NULL
;
435 mutex_lock(&ha
->vport_lock
);
436 clear_bit(que_id
, ha
->req_qid_map
);
437 mutex_unlock(&ha
->vport_lock
);
444 qla25xx_free_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
446 struct qla_hw_data
*ha
= vha
->hw
;
447 uint16_t que_id
= rsp
->id
;
449 if (rsp
->msix
&& rsp
->msix
->have_irq
) {
450 free_irq(rsp
->msix
->vector
, rsp
);
451 rsp
->msix
->have_irq
= 0;
452 rsp
->msix
->rsp
= NULL
;
454 dma_free_coherent(&ha
->pdev
->dev
, (rsp
->length
+ 1) *
455 sizeof(response_t
), rsp
->ring
, rsp
->dma
);
459 ha
->rsp_q_map
[que_id
] = NULL
;
460 mutex_lock(&ha
->vport_lock
);
461 clear_bit(que_id
, ha
->rsp_qid_map
);
462 mutex_unlock(&ha
->vport_lock
);
469 qla25xx_delete_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
474 req
->options
|= BIT_0
;
475 ret
= qla25xx_init_req_que(vha
, req
);
477 if (ret
== QLA_SUCCESS
)
478 qla25xx_free_req_que(vha
, req
);
484 qla25xx_delete_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
489 rsp
->options
|= BIT_0
;
490 ret
= qla25xx_init_rsp_que(vha
, rsp
);
492 if (ret
== QLA_SUCCESS
)
493 qla25xx_free_rsp_que(vha
, rsp
);
498 int qla25xx_update_req_que(struct scsi_qla_host
*vha
, uint8_t que
, uint8_t qos
)
501 struct qla_hw_data
*ha
= vha
->hw
;
502 struct req_que
*req
= ha
->req_q_map
[que
];
504 req
->options
|= BIT_3
;
506 ret
= qla25xx_init_req_que(vha
, req
);
507 if (ret
!= QLA_SUCCESS
)
508 DEBUG2_17(printk(KERN_WARNING
"%s failed\n", __func__
));
509 /* restore options bit */
510 req
->options
&= ~BIT_3
;
515 /* Delete all queues for a given vhost */
517 qla25xx_delete_queues(struct scsi_qla_host
*vha
)
520 struct req_que
*req
= NULL
;
521 struct rsp_que
*rsp
= NULL
;
522 struct qla_hw_data
*ha
= vha
->hw
;
524 /* Delete request queues */
525 for (cnt
= 1; cnt
< ha
->max_req_queues
; cnt
++) {
526 req
= ha
->req_q_map
[cnt
];
528 ret
= qla25xx_delete_req_que(vha
, req
);
529 if (ret
!= QLA_SUCCESS
) {
530 qla_printk(KERN_WARNING
, ha
,
531 "Couldn't delete req que %d\n",
538 /* Delete response queues */
539 for (cnt
= 1; cnt
< ha
->max_rsp_queues
; cnt
++) {
540 rsp
= ha
->rsp_q_map
[cnt
];
542 ret
= qla25xx_delete_rsp_que(vha
, rsp
);
543 if (ret
!= QLA_SUCCESS
) {
544 qla_printk(KERN_WARNING
, ha
,
545 "Couldn't delete rsp que %d\n",
555 qla25xx_create_req_que(struct qla_hw_data
*ha
, uint16_t options
,
556 uint8_t vp_idx
, uint16_t rid
, int rsp_que
, uint8_t qos
)
559 struct req_que
*req
= NULL
;
560 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
562 device_reg_t __iomem
*reg
;
565 req
= kzalloc(sizeof(struct req_que
), GFP_KERNEL
);
567 qla_printk(KERN_WARNING
, ha
, "could not allocate memory"
568 "for request que\n");
572 req
->length
= REQUEST_ENTRY_CNT_24XX
;
573 req
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
574 (req
->length
+ 1) * sizeof(request_t
),
575 &req
->dma
, GFP_KERNEL
);
576 if (req
->ring
== NULL
) {
577 qla_printk(KERN_WARNING
, ha
,
578 "Memory Allocation failed - request_ring\n");
582 mutex_lock(&ha
->vport_lock
);
583 que_id
= find_first_zero_bit(ha
->req_qid_map
, ha
->max_req_queues
);
584 if (que_id
>= ha
->max_req_queues
) {
585 mutex_unlock(&ha
->vport_lock
);
586 qla_printk(KERN_INFO
, ha
, "No resources to create "
587 "additional request queue\n");
590 set_bit(que_id
, ha
->req_qid_map
);
591 ha
->req_q_map
[que_id
] = req
;
593 req
->vp_idx
= vp_idx
;
599 req
->rsp
= ha
->rsp_q_map
[rsp_que
];
600 /* Use alternate PCI bus number */
603 /* Use alternate PCI devfn */
606 req
->options
= options
;
608 for (cnt
= 1; cnt
< MAX_OUTSTANDING_COMMANDS
; cnt
++)
609 req
->outstanding_cmds
[cnt
] = NULL
;
610 req
->current_outstanding_cmd
= 1;
612 req
->ring_ptr
= req
->ring
;
614 req
->cnt
= req
->length
;
616 reg
= ISP_QUE_REG(ha
, que_id
);
617 req
->max_q_depth
= ha
->req_q_map
[0]->max_q_depth
;
618 mutex_unlock(&ha
->vport_lock
);
620 ret
= qla25xx_init_req_que(base_vha
, req
);
621 if (ret
!= QLA_SUCCESS
) {
622 qla_printk(KERN_WARNING
, ha
, "%s failed\n", __func__
);
623 mutex_lock(&ha
->vport_lock
);
624 clear_bit(que_id
, ha
->req_qid_map
);
625 mutex_unlock(&ha
->vport_lock
);
632 qla25xx_free_req_que(base_vha
, req
);
637 static void qla_do_work(struct work_struct
*work
)
640 struct rsp_que
*rsp
= container_of(work
, struct rsp_que
, q_work
);
641 struct scsi_qla_host
*vha
;
642 struct qla_hw_data
*ha
= rsp
->hw
;
644 spin_lock_irqsave(&rsp
->hw
->hardware_lock
, flags
);
645 vha
= pci_get_drvdata(ha
->pdev
);
646 qla24xx_process_response_queue(vha
, rsp
);
647 spin_unlock_irqrestore(&rsp
->hw
->hardware_lock
, flags
);
650 /* create response queue */
652 qla25xx_create_rsp_que(struct qla_hw_data
*ha
, uint16_t options
,
653 uint8_t vp_idx
, uint16_t rid
, int req
)
656 struct rsp_que
*rsp
= NULL
;
657 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
659 device_reg_t __iomem
*reg
;
661 rsp
= kzalloc(sizeof(struct rsp_que
), GFP_KERNEL
);
663 qla_printk(KERN_WARNING
, ha
, "could not allocate memory for"
668 rsp
->length
= RESPONSE_ENTRY_CNT_MQ
;
669 rsp
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
670 (rsp
->length
+ 1) * sizeof(response_t
),
671 &rsp
->dma
, GFP_KERNEL
);
672 if (rsp
->ring
== NULL
) {
673 qla_printk(KERN_WARNING
, ha
,
674 "Memory Allocation failed - response_ring\n");
678 mutex_lock(&ha
->vport_lock
);
679 que_id
= find_first_zero_bit(ha
->rsp_qid_map
, ha
->max_rsp_queues
);
680 if (que_id
>= ha
->max_rsp_queues
) {
681 mutex_unlock(&ha
->vport_lock
);
682 qla_printk(KERN_INFO
, ha
, "No resources to create "
683 "additional response queue\n");
686 set_bit(que_id
, ha
->rsp_qid_map
);
688 if (ha
->flags
.msix_enabled
)
689 rsp
->msix
= &ha
->msix_entries
[que_id
+ 1];
691 qla_printk(KERN_WARNING
, ha
, "msix not enabled\n");
693 ha
->rsp_q_map
[que_id
] = rsp
;
695 rsp
->vp_idx
= vp_idx
;
697 /* Use alternate PCI bus number */
700 /* Use alternate PCI devfn */
703 /* Enable MSIX handshake mode on for uncapable adapters */
704 if (!IS_MSIX_NACK_CAPABLE(ha
))
707 rsp
->options
= options
;
709 reg
= ISP_QUE_REG(ha
, que_id
);
710 rsp
->rsp_q_in
= ®
->isp25mq
.rsp_q_in
;
711 rsp
->rsp_q_out
= ®
->isp25mq
.rsp_q_out
;
712 mutex_unlock(&ha
->vport_lock
);
714 ret
= qla25xx_request_irq(rsp
);
718 ret
= qla25xx_init_rsp_que(base_vha
, rsp
);
719 if (ret
!= QLA_SUCCESS
) {
720 qla_printk(KERN_WARNING
, ha
, "%s failed\n", __func__
);
721 mutex_lock(&ha
->vport_lock
);
722 clear_bit(que_id
, ha
->rsp_qid_map
);
723 mutex_unlock(&ha
->vport_lock
);
727 rsp
->req
= ha
->req_q_map
[req
];
731 qla2x00_init_response_q_entries(rsp
);
733 INIT_WORK(&rsp
->q_work
, qla_do_work
);
737 qla25xx_free_rsp_que(base_vha
, rsp
);
743 qla25xx_create_queues(struct scsi_qla_host
*vha
, uint8_t qos
)
745 uint16_t options
= 0;
747 struct qla_hw_data
*ha
= vha
->hw
;
751 ret
= qla25xx_create_rsp_que(ha
, options
, vha
->vp_idx
, 0, -1);
753 qla_printk(KERN_WARNING
, ha
, "Response Que create failed\n");
756 qla_printk(KERN_INFO
, ha
, "Response Que:%d created.\n", ret
);
757 rsp
= ha
->rsp_q_map
[ret
];
762 ret
= qla25xx_create_req_que(ha
, options
, vha
->vp_idx
, 0, ret
,
765 vha
->req
= ha
->req_q_map
[ret
];
766 qla_printk(KERN_INFO
, ha
, "Request Que:%d created.\n", ret
);
768 qla_printk(KERN_WARNING
, ha
, "Request Que create failed\n");
769 rsp
->req
= ha
->req_q_map
[ret
];