2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/moduleparam.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
15 #include <scsi/scsi_tcq.h>
16 #include <scsi/scsicam.h>
17 #include <linux/delay.h>
20 qla2x00_vp_stop_timer(scsi_qla_host_t
*vha
)
22 if (vha
->vp_idx
&& vha
->timer_active
) {
23 del_timer_sync(&vha
->timer
);
24 vha
->timer_active
= 0;
29 qla24xx_allocate_vp_id(scsi_qla_host_t
*vha
)
32 struct qla_hw_data
*ha
= vha
->hw
;
35 /* Find an empty slot and assign an vp_id */
36 mutex_lock(&ha
->vport_lock
);
37 vp_id
= find_first_zero_bit(ha
->vp_idx_map
, ha
->max_npiv_vports
+ 1);
38 if (vp_id
> ha
->max_npiv_vports
) {
39 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
40 vp_id
, ha
->max_npiv_vports
));
41 mutex_unlock(&ha
->vport_lock
);
45 set_bit(vp_id
, ha
->vp_idx_map
);
49 spin_lock_irqsave(&ha
->vport_slock
, flags
);
50 list_add_tail(&vha
->list
, &ha
->vp_list
);
51 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
53 mutex_unlock(&ha
->vport_lock
);
58 qla24xx_deallocate_vp_id(scsi_qla_host_t
*vha
)
61 struct qla_hw_data
*ha
= vha
->hw
;
62 unsigned long flags
= 0;
64 mutex_lock(&ha
->vport_lock
);
66 * Wait for all pending activities to finish before removing vport from
68 * Lock needs to be held for safe removal from the list (it
69 * ensures no active vp_list traversal while the vport is removed
72 spin_lock_irqsave(&ha
->vport_slock
, flags
);
73 while (atomic_read(&vha
->vref_count
)) {
74 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
78 spin_lock_irqsave(&ha
->vport_slock
, flags
);
81 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
85 clear_bit(vp_id
, ha
->vp_idx_map
);
87 mutex_unlock(&ha
->vport_lock
);
90 static scsi_qla_host_t
*
91 qla24xx_find_vhost_by_name(struct qla_hw_data
*ha
, uint8_t *port_name
)
94 struct scsi_qla_host
*tvha
;
97 spin_lock_irqsave(&ha
->vport_slock
, flags
);
98 /* Locate matching device in database. */
99 list_for_each_entry_safe(vha
, tvha
, &ha
->vp_list
, list
) {
100 if (!memcmp(port_name
, vha
->port_name
, WWN_SIZE
)) {
101 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
105 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
110 * qla2x00_mark_vp_devices_dead
111 * Updates fcport state when device goes offline.
114 * ha = adapter block pointer.
115 * fcport = port structure pointer.
123 qla2x00_mark_vp_devices_dead(scsi_qla_host_t
*vha
)
127 * This function, if called in contexts other than vp create, disable
128 * or delete, please make sure this is synchronized with the
133 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
134 DEBUG15(printk("scsi(%ld): Marking port dead, "
135 "loop_id=0x%04x :%x\n",
136 vha
->host_no
, fcport
->loop_id
, fcport
->vp_idx
));
138 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
139 qla2x00_set_fcport_state(fcport
, FCS_UNCONFIGURED
);
144 qla24xx_disable_vp(scsi_qla_host_t
*vha
)
148 ret
= qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
149 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
150 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
152 qla2x00_mark_vp_devices_dead(vha
);
153 atomic_set(&vha
->vp_state
, VP_FAILED
);
154 vha
->flags
.management_server_logged_in
= 0;
155 if (ret
== QLA_SUCCESS
) {
156 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_DISABLED
);
158 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
165 qla24xx_enable_vp(scsi_qla_host_t
*vha
)
168 struct qla_hw_data
*ha
= vha
->hw
;
169 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
171 /* Check if physical ha port is Up */
172 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
173 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
||
174 !(ha
->current_topology
& ISP_CFG_F
)) {
175 vha
->vp_err_state
= VP_ERR_PORTDWN
;
176 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_LINKDOWN
);
180 /* Initialize the new vport unless it is a persistent port */
181 mutex_lock(&ha
->vport_lock
);
182 ret
= qla24xx_modify_vp_config(vha
);
183 mutex_unlock(&ha
->vport_lock
);
185 if (ret
!= QLA_SUCCESS
) {
186 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
190 DEBUG15(qla_printk(KERN_INFO
, ha
,
191 "Virtual port with id: %d - Enabled\n", vha
->vp_idx
));
195 DEBUG15(qla_printk(KERN_INFO
, ha
,
196 "Virtual port with id: %d - Disabled\n", vha
->vp_idx
));
201 qla24xx_configure_vp(scsi_qla_host_t
*vha
)
203 struct fc_vport
*fc_vport
;
206 fc_vport
= vha
->fc_vport
;
208 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
209 vha
->host_no
, __func__
));
210 ret
= qla2x00_send_change_request(vha
, 0x3, vha
->vp_idx
);
211 if (ret
!= QLA_SUCCESS
) {
212 DEBUG15(qla_printk(KERN_ERR
, vha
->hw
, "Failed to enable "
213 "receiving of RSCN requests: 0x%x\n", ret
));
216 /* Corresponds to SCR enabled */
217 clear_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
220 vha
->flags
.online
= 1;
221 if (qla24xx_configure_vhba(vha
))
224 atomic_set(&vha
->vp_state
, VP_ACTIVE
);
225 fc_vport_set_state(fc_vport
, FC_VPORT_ACTIVE
);
229 qla2x00_alert_all_vps(struct rsp_que
*rsp
, uint16_t *mb
)
231 scsi_qla_host_t
*vha
;
232 struct qla_hw_data
*ha
= rsp
->hw
;
236 spin_lock_irqsave(&ha
->vport_slock
, flags
);
237 list_for_each_entry(vha
, &ha
->vp_list
, list
) {
239 atomic_inc(&vha
->vref_count
);
240 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
243 case MBA_LIP_OCCURRED
:
247 case MBA_POINT_TO_POINT
:
248 case MBA_CHG_IN_CONNECTION
:
249 case MBA_PORT_UPDATE
:
250 case MBA_RSCN_UPDATE
:
251 DEBUG15(printk("scsi(%ld)%s: Async_event for"
252 " VP[%d], mb = 0x%x, vha=%p\n",
253 vha
->host_no
, __func__
, i
, *mb
, vha
));
254 qla2x00_async_event(vha
, rsp
, mb
);
258 spin_lock_irqsave(&ha
->vport_slock
, flags
);
259 atomic_dec(&vha
->vref_count
);
263 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
267 qla2x00_vp_abort_isp(scsi_qla_host_t
*vha
)
270 * Physical port will do most of the abort and recovery work. We can
271 * just treat it as a loop down
273 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
274 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
275 qla2x00_mark_all_devices_lost(vha
, 0);
277 if (!atomic_read(&vha
->loop_down_timer
))
278 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
282 * To exclusively reset vport, we need to log it out first. Note: this
283 * control_vp can fail if ISP reset is already issued, this is
284 * expected, as the vp would be already logged out due to ISP reset.
286 if (!test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
))
287 qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
289 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
290 vha
->host_no
, vha
->vp_idx
));
291 return qla24xx_enable_vp(vha
);
295 qla2x00_do_dpc_vp(scsi_qla_host_t
*vha
)
297 qla2x00_do_work(vha
);
299 if (test_and_clear_bit(VP_IDX_ACQUIRED
, &vha
->vp_flags
)) {
300 /* VP acquired. complete port configuration */
301 qla24xx_configure_vp(vha
);
305 if (test_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
)) {
306 qla2x00_update_fcports(vha
);
307 clear_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
);
310 if ((test_and_clear_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
)) &&
311 !test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
) &&
312 atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
314 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
316 qla2x00_relogin(vha
);
318 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
322 if (test_and_clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
) &&
323 (!(test_and_set_bit(RESET_ACTIVE
, &vha
->dpc_flags
)))) {
324 clear_bit(RESET_ACTIVE
, &vha
->dpc_flags
);
327 if (test_and_clear_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
328 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
))) {
329 qla2x00_loop_resync(vha
);
330 clear_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
);
338 qla2x00_do_dpc_all_vps(scsi_qla_host_t
*vha
)
341 struct qla_hw_data
*ha
= vha
->hw
;
343 unsigned long flags
= 0;
347 if (list_empty(&ha
->vp_list
))
350 clear_bit(VP_DPC_NEEDED
, &vha
->dpc_flags
);
352 if (!(ha
->current_topology
& ISP_CFG_F
))
355 spin_lock_irqsave(&ha
->vport_slock
, flags
);
356 list_for_each_entry(vp
, &ha
->vp_list
, list
) {
358 atomic_inc(&vp
->vref_count
);
359 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
361 ret
= qla2x00_do_dpc_vp(vp
);
363 spin_lock_irqsave(&ha
->vport_slock
, flags
);
364 atomic_dec(&vp
->vref_count
);
367 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
371 qla24xx_vport_create_req_sanity_check(struct fc_vport
*fc_vport
)
373 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
374 struct qla_hw_data
*ha
= base_vha
->hw
;
375 scsi_qla_host_t
*vha
;
376 uint8_t port_name
[WWN_SIZE
];
378 if (fc_vport
->roles
!= FC_PORT_ROLE_FCP_INITIATOR
)
379 return VPCERR_UNSUPPORTED
;
381 /* Check up the F/W and H/W support NPIV */
382 if (!ha
->flags
.npiv_supported
)
383 return VPCERR_UNSUPPORTED
;
385 /* Check up whether npiv supported switch presented */
386 if (!(ha
->switch_cap
& FLOGI_MID_SUPPORT
))
387 return VPCERR_NO_FABRIC_SUPP
;
389 /* Check up unique WWPN */
390 u64_to_wwn(fc_vport
->port_name
, port_name
);
391 if (!memcmp(port_name
, base_vha
->port_name
, WWN_SIZE
))
392 return VPCERR_BAD_WWN
;
393 vha
= qla24xx_find_vhost_by_name(ha
, port_name
);
395 return VPCERR_BAD_WWN
;
397 /* Check up max-npiv-supports */
398 if (ha
->num_vhosts
> ha
->max_npiv_vports
) {
399 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
400 "max_npv_vports %ud.\n", base_vha
->host_no
,
401 ha
->num_vhosts
, ha
->max_npiv_vports
));
402 return VPCERR_UNSUPPORTED
;
408 qla24xx_create_vhost(struct fc_vport
*fc_vport
)
410 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
411 struct qla_hw_data
*ha
= base_vha
->hw
;
412 scsi_qla_host_t
*vha
;
413 struct scsi_host_template
*sht
= &qla2xxx_driver_template
;
414 struct Scsi_Host
*host
;
416 vha
= qla2x00_create_host(sht
, ha
);
418 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
423 fc_vport
->dd_data
= vha
;
425 u64_to_wwn(fc_vport
->node_name
, vha
->node_name
);
426 u64_to_wwn(fc_vport
->port_name
, vha
->port_name
);
428 vha
->fc_vport
= fc_vport
;
429 vha
->device_flags
= 0;
430 vha
->vp_idx
= qla24xx_allocate_vp_id(vha
);
431 if (vha
->vp_idx
> ha
->max_npiv_vports
) {
432 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
434 goto create_vhost_failed
;
436 vha
->mgmt_svr_loop_id
= 10 + vha
->vp_idx
;
441 * To fix the issue of processing a parent's RSCN for the vport before
442 * its SCR is complete.
444 set_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
445 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
446 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
448 qla2x00_start_timer(vha
, qla2x00_timer
, WATCH_INTERVAL
);
450 vha
->req
= base_vha
->req
;
451 host
->can_queue
= base_vha
->req
->length
+ 128;
453 host
->cmd_per_lun
= 3;
454 if ((IS_QLA25XX(ha
) || IS_QLA81XX(ha
)) && ql2xenabledif
)
455 host
->max_cmd_len
= 32;
457 host
->max_cmd_len
= MAX_CMDSZ
;
458 host
->max_channel
= MAX_BUSES
- 1;
459 host
->max_lun
= ql2xmaxlun
;
460 host
->unique_id
= host
->host_no
;
461 host
->max_id
= MAX_TARGETS_2200
;
462 host
->transportt
= qla2xxx_transport_vport_template
;
464 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
467 vha
->flags
.init_done
= 1;
469 mutex_lock(&ha
->vport_lock
);
470 set_bit(vha
->vp_idx
, ha
->vp_idx_map
);
471 ha
->cur_vport_count
++;
472 mutex_unlock(&ha
->vport_lock
);
481 qla25xx_free_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
483 struct qla_hw_data
*ha
= vha
->hw
;
484 uint16_t que_id
= req
->id
;
486 dma_free_coherent(&ha
->pdev
->dev
, (req
->length
+ 1) *
487 sizeof(request_t
), req
->ring
, req
->dma
);
491 ha
->req_q_map
[que_id
] = NULL
;
492 mutex_lock(&ha
->vport_lock
);
493 clear_bit(que_id
, ha
->req_qid_map
);
494 mutex_unlock(&ha
->vport_lock
);
501 qla25xx_free_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
503 struct qla_hw_data
*ha
= vha
->hw
;
504 uint16_t que_id
= rsp
->id
;
506 if (rsp
->msix
&& rsp
->msix
->have_irq
) {
507 free_irq(rsp
->msix
->vector
, rsp
);
508 rsp
->msix
->have_irq
= 0;
509 rsp
->msix
->rsp
= NULL
;
511 dma_free_coherent(&ha
->pdev
->dev
, (rsp
->length
+ 1) *
512 sizeof(response_t
), rsp
->ring
, rsp
->dma
);
516 ha
->rsp_q_map
[que_id
] = NULL
;
517 mutex_lock(&ha
->vport_lock
);
518 clear_bit(que_id
, ha
->rsp_qid_map
);
519 mutex_unlock(&ha
->vport_lock
);
526 qla25xx_delete_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
531 req
->options
|= BIT_0
;
532 ret
= qla25xx_init_req_que(vha
, req
);
534 if (ret
== QLA_SUCCESS
)
535 qla25xx_free_req_que(vha
, req
);
541 qla25xx_delete_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
546 rsp
->options
|= BIT_0
;
547 ret
= qla25xx_init_rsp_que(vha
, rsp
);
549 if (ret
== QLA_SUCCESS
)
550 qla25xx_free_rsp_que(vha
, rsp
);
555 /* Delete all queues for a given vhost */
557 qla25xx_delete_queues(struct scsi_qla_host
*vha
)
560 struct req_que
*req
= NULL
;
561 struct rsp_que
*rsp
= NULL
;
562 struct qla_hw_data
*ha
= vha
->hw
;
564 /* Delete request queues */
565 for (cnt
= 1; cnt
< ha
->max_req_queues
; cnt
++) {
566 req
= ha
->req_q_map
[cnt
];
568 ret
= qla25xx_delete_req_que(vha
, req
);
569 if (ret
!= QLA_SUCCESS
) {
570 qla_printk(KERN_WARNING
, ha
,
571 "Couldn't delete req que %d\n",
578 /* Delete response queues */
579 for (cnt
= 1; cnt
< ha
->max_rsp_queues
; cnt
++) {
580 rsp
= ha
->rsp_q_map
[cnt
];
582 ret
= qla25xx_delete_rsp_que(vha
, rsp
);
583 if (ret
!= QLA_SUCCESS
) {
584 qla_printk(KERN_WARNING
, ha
,
585 "Couldn't delete rsp que %d\n",
595 qla25xx_create_req_que(struct qla_hw_data
*ha
, uint16_t options
,
596 uint8_t vp_idx
, uint16_t rid
, int rsp_que
, uint8_t qos
)
599 struct req_que
*req
= NULL
;
600 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
602 device_reg_t __iomem
*reg
;
605 req
= kzalloc(sizeof(struct req_que
), GFP_KERNEL
);
607 qla_printk(KERN_WARNING
, ha
, "could not allocate memory"
608 "for request que\n");
612 req
->length
= REQUEST_ENTRY_CNT_24XX
;
613 req
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
614 (req
->length
+ 1) * sizeof(request_t
),
615 &req
->dma
, GFP_KERNEL
);
616 if (req
->ring
== NULL
) {
617 qla_printk(KERN_WARNING
, ha
,
618 "Memory Allocation failed - request_ring\n");
622 mutex_lock(&ha
->vport_lock
);
623 que_id
= find_first_zero_bit(ha
->req_qid_map
, ha
->max_req_queues
);
624 if (que_id
>= ha
->max_req_queues
) {
625 mutex_unlock(&ha
->vport_lock
);
626 qla_printk(KERN_INFO
, ha
, "No resources to create "
627 "additional request queue\n");
630 set_bit(que_id
, ha
->req_qid_map
);
631 ha
->req_q_map
[que_id
] = req
;
633 req
->vp_idx
= vp_idx
;
639 req
->rsp
= ha
->rsp_q_map
[rsp_que
];
640 /* Use alternate PCI bus number */
643 /* Use alternate PCI devfn */
646 req
->options
= options
;
648 for (cnt
= 1; cnt
< MAX_OUTSTANDING_COMMANDS
; cnt
++)
649 req
->outstanding_cmds
[cnt
] = NULL
;
650 req
->current_outstanding_cmd
= 1;
652 req
->ring_ptr
= req
->ring
;
654 req
->cnt
= req
->length
;
656 reg
= ISP_QUE_REG(ha
, que_id
);
657 req
->max_q_depth
= ha
->req_q_map
[0]->max_q_depth
;
658 mutex_unlock(&ha
->vport_lock
);
660 ret
= qla25xx_init_req_que(base_vha
, req
);
661 if (ret
!= QLA_SUCCESS
) {
662 qla_printk(KERN_WARNING
, ha
, "%s failed\n", __func__
);
663 mutex_lock(&ha
->vport_lock
);
664 clear_bit(que_id
, ha
->req_qid_map
);
665 mutex_unlock(&ha
->vport_lock
);
672 qla25xx_free_req_que(base_vha
, req
);
677 static void qla_do_work(struct work_struct
*work
)
680 struct rsp_que
*rsp
= container_of(work
, struct rsp_que
, q_work
);
681 struct scsi_qla_host
*vha
;
682 struct qla_hw_data
*ha
= rsp
->hw
;
684 spin_lock_irqsave(&rsp
->hw
->hardware_lock
, flags
);
685 vha
= pci_get_drvdata(ha
->pdev
);
686 qla24xx_process_response_queue(vha
, rsp
);
687 spin_unlock_irqrestore(&rsp
->hw
->hardware_lock
, flags
);
690 /* create response queue */
692 qla25xx_create_rsp_que(struct qla_hw_data
*ha
, uint16_t options
,
693 uint8_t vp_idx
, uint16_t rid
, int req
)
696 struct rsp_que
*rsp
= NULL
;
697 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
699 device_reg_t __iomem
*reg
;
701 rsp
= kzalloc(sizeof(struct rsp_que
), GFP_KERNEL
);
703 qla_printk(KERN_WARNING
, ha
, "could not allocate memory for"
708 rsp
->length
= RESPONSE_ENTRY_CNT_MQ
;
709 rsp
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
710 (rsp
->length
+ 1) * sizeof(response_t
),
711 &rsp
->dma
, GFP_KERNEL
);
712 if (rsp
->ring
== NULL
) {
713 qla_printk(KERN_WARNING
, ha
,
714 "Memory Allocation failed - response_ring\n");
718 mutex_lock(&ha
->vport_lock
);
719 que_id
= find_first_zero_bit(ha
->rsp_qid_map
, ha
->max_rsp_queues
);
720 if (que_id
>= ha
->max_rsp_queues
) {
721 mutex_unlock(&ha
->vport_lock
);
722 qla_printk(KERN_INFO
, ha
, "No resources to create "
723 "additional response queue\n");
726 set_bit(que_id
, ha
->rsp_qid_map
);
728 if (ha
->flags
.msix_enabled
)
729 rsp
->msix
= &ha
->msix_entries
[que_id
+ 1];
731 qla_printk(KERN_WARNING
, ha
, "msix not enabled\n");
733 ha
->rsp_q_map
[que_id
] = rsp
;
735 rsp
->vp_idx
= vp_idx
;
737 /* Use alternate PCI bus number */
740 /* Use alternate PCI devfn */
743 /* Enable MSIX handshake mode on for uncapable adapters */
744 if (!IS_MSIX_NACK_CAPABLE(ha
))
747 rsp
->options
= options
;
749 reg
= ISP_QUE_REG(ha
, que_id
);
750 rsp
->rsp_q_in
= ®
->isp25mq
.rsp_q_in
;
751 rsp
->rsp_q_out
= ®
->isp25mq
.rsp_q_out
;
752 mutex_unlock(&ha
->vport_lock
);
754 ret
= qla25xx_request_irq(rsp
);
758 ret
= qla25xx_init_rsp_que(base_vha
, rsp
);
759 if (ret
!= QLA_SUCCESS
) {
760 qla_printk(KERN_WARNING
, ha
, "%s failed\n", __func__
);
761 mutex_lock(&ha
->vport_lock
);
762 clear_bit(que_id
, ha
->rsp_qid_map
);
763 mutex_unlock(&ha
->vport_lock
);
767 rsp
->req
= ha
->req_q_map
[req
];
771 qla2x00_init_response_q_entries(rsp
);
773 INIT_WORK(&rsp
->q_work
, qla_do_work
);
777 qla25xx_free_rsp_que(base_vha
, rsp
);