2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/moduleparam.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
15 #include <scsi/scsi_tcq.h>
16 #include <scsi/scsicam.h>
17 #include <linux/delay.h>
20 qla2x00_vp_stop_timer(scsi_qla_host_t
*vha
)
22 if (vha
->vp_idx
&& vha
->timer_active
) {
23 del_timer_sync(&vha
->timer
);
24 vha
->timer_active
= 0;
29 qla24xx_allocate_vp_id(scsi_qla_host_t
*vha
)
32 struct qla_hw_data
*ha
= vha
->hw
;
35 /* Find an empty slot and assign an vp_id */
36 mutex_lock(&ha
->vport_lock
);
37 vp_id
= find_first_zero_bit(ha
->vp_idx_map
, ha
->max_npiv_vports
+ 1);
38 if (vp_id
> ha
->max_npiv_vports
) {
39 ql_dbg(ql_dbg_vport
, vha
, 0xa000,
40 "vp_id %d is bigger than max-supported %d.\n",
41 vp_id
, ha
->max_npiv_vports
);
42 mutex_unlock(&ha
->vport_lock
);
46 set_bit(vp_id
, ha
->vp_idx_map
);
50 spin_lock_irqsave(&ha
->vport_slock
, flags
);
51 list_add_tail(&vha
->list
, &ha
->vp_list
);
52 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
54 mutex_unlock(&ha
->vport_lock
);
59 qla24xx_deallocate_vp_id(scsi_qla_host_t
*vha
)
62 struct qla_hw_data
*ha
= vha
->hw
;
63 unsigned long flags
= 0;
65 mutex_lock(&ha
->vport_lock
);
67 * Wait for all pending activities to finish before removing vport from
69 * Lock needs to be held for safe removal from the list (it
70 * ensures no active vp_list traversal while the vport is removed
73 spin_lock_irqsave(&ha
->vport_slock
, flags
);
74 while (atomic_read(&vha
->vref_count
)) {
75 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
79 spin_lock_irqsave(&ha
->vport_slock
, flags
);
82 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
86 clear_bit(vp_id
, ha
->vp_idx_map
);
88 mutex_unlock(&ha
->vport_lock
);
91 static scsi_qla_host_t
*
92 qla24xx_find_vhost_by_name(struct qla_hw_data
*ha
, uint8_t *port_name
)
95 struct scsi_qla_host
*tvha
;
98 spin_lock_irqsave(&ha
->vport_slock
, flags
);
99 /* Locate matching device in database. */
100 list_for_each_entry_safe(vha
, tvha
, &ha
->vp_list
, list
) {
101 if (!memcmp(port_name
, vha
->port_name
, WWN_SIZE
)) {
102 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
106 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
111 * qla2x00_mark_vp_devices_dead
112 * Updates fcport state when device goes offline.
115 * ha = adapter block pointer.
116 * fcport = port structure pointer.
124 qla2x00_mark_vp_devices_dead(scsi_qla_host_t
*vha
)
128 * This function, if called in contexts other than vp create, disable
129 * or delete, please make sure this is synchronized with the
134 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
135 ql_dbg(ql_dbg_vport
, vha
, 0xa001,
136 "Marking port dead, loop_id=0x%04x : %x.\n",
137 fcport
->loop_id
, fcport
->vp_idx
);
139 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
140 qla2x00_set_fcport_state(fcport
, FCS_UNCONFIGURED
);
145 qla24xx_disable_vp(scsi_qla_host_t
*vha
)
149 ret
= qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
150 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
151 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
153 qla2x00_mark_vp_devices_dead(vha
);
154 atomic_set(&vha
->vp_state
, VP_FAILED
);
155 vha
->flags
.management_server_logged_in
= 0;
156 if (ret
== QLA_SUCCESS
) {
157 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_DISABLED
);
159 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
166 qla24xx_enable_vp(scsi_qla_host_t
*vha
)
169 struct qla_hw_data
*ha
= vha
->hw
;
170 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
172 /* Check if physical ha port is Up */
173 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
174 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
||
175 !(ha
->current_topology
& ISP_CFG_F
)) {
176 vha
->vp_err_state
= VP_ERR_PORTDWN
;
177 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_LINKDOWN
);
181 /* Initialize the new vport unless it is a persistent port */
182 mutex_lock(&ha
->vport_lock
);
183 ret
= qla24xx_modify_vp_config(vha
);
184 mutex_unlock(&ha
->vport_lock
);
186 if (ret
!= QLA_SUCCESS
) {
187 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
191 ql_dbg(ql_dbg_taskm
, vha
, 0x801a,
192 "Virtual port with id: %d - Enabled.\n", vha
->vp_idx
);
196 ql_dbg(ql_dbg_taskm
, vha
, 0x801b,
197 "Virtual port with id: %d - Disabled.\n", vha
->vp_idx
);
202 qla24xx_configure_vp(scsi_qla_host_t
*vha
)
204 struct fc_vport
*fc_vport
;
207 fc_vport
= vha
->fc_vport
;
209 ql_dbg(ql_dbg_vport
, vha
, 0xa002,
210 "%s: change request #3.\n", __func__
);
211 ret
= qla2x00_send_change_request(vha
, 0x3, vha
->vp_idx
);
212 if (ret
!= QLA_SUCCESS
) {
213 ql_dbg(ql_dbg_vport
, vha
, 0xa003, "Failed to enable "
214 "receiving of RSCN requests: 0x%x.\n", ret
);
217 /* Corresponds to SCR enabled */
218 clear_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
221 vha
->flags
.online
= 1;
222 if (qla24xx_configure_vhba(vha
))
225 atomic_set(&vha
->vp_state
, VP_ACTIVE
);
226 fc_vport_set_state(fc_vport
, FC_VPORT_ACTIVE
);
230 qla2x00_alert_all_vps(struct rsp_que
*rsp
, uint16_t *mb
)
232 scsi_qla_host_t
*vha
;
233 struct qla_hw_data
*ha
= rsp
->hw
;
237 spin_lock_irqsave(&ha
->vport_slock
, flags
);
238 list_for_each_entry(vha
, &ha
->vp_list
, list
) {
240 atomic_inc(&vha
->vref_count
);
241 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
244 case MBA_LIP_OCCURRED
:
248 case MBA_POINT_TO_POINT
:
249 case MBA_CHG_IN_CONNECTION
:
250 case MBA_PORT_UPDATE
:
251 case MBA_RSCN_UPDATE
:
252 ql_dbg(ql_dbg_async
, vha
, 0x5024,
253 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
255 qla2x00_async_event(vha
, rsp
, mb
);
259 spin_lock_irqsave(&ha
->vport_slock
, flags
);
260 atomic_dec(&vha
->vref_count
);
264 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
268 qla2x00_vp_abort_isp(scsi_qla_host_t
*vha
)
271 * Physical port will do most of the abort and recovery work. We can
272 * just treat it as a loop down
274 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
275 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
276 qla2x00_mark_all_devices_lost(vha
, 0);
278 if (!atomic_read(&vha
->loop_down_timer
))
279 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
283 * To exclusively reset vport, we need to log it out first. Note: this
284 * control_vp can fail if ISP reset is already issued, this is
285 * expected, as the vp would be already logged out due to ISP reset.
287 if (!test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
))
288 qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
290 ql_dbg(ql_dbg_taskm
, vha
, 0x801d,
291 "Scheduling enable of Vport %d.\n", vha
->vp_idx
);
292 return qla24xx_enable_vp(vha
);
296 qla2x00_do_dpc_vp(scsi_qla_host_t
*vha
)
298 ql_dbg(ql_dbg_dpc
, vha
, 0x4012,
299 "Entering %s.\n", __func__
);
300 ql_dbg(ql_dbg_dpc
, vha
, 0x4013,
301 "vp_flags: 0x%lx.\n", vha
->vp_flags
);
303 qla2x00_do_work(vha
);
305 if (test_and_clear_bit(VP_IDX_ACQUIRED
, &vha
->vp_flags
)) {
306 /* VP acquired. complete port configuration */
307 ql_dbg(ql_dbg_dpc
, vha
, 0x4014,
308 "Configure VP scheduled.\n");
309 qla24xx_configure_vp(vha
);
310 ql_dbg(ql_dbg_dpc
, vha
, 0x4015,
311 "Configure VP end.\n");
315 if (test_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
)) {
316 ql_dbg(ql_dbg_dpc
, vha
, 0x4016,
317 "FCPort update scheduled.\n");
318 qla2x00_update_fcports(vha
);
319 clear_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
);
320 ql_dbg(ql_dbg_dpc
, vha
, 0x4017,
321 "FCPort update end.\n");
324 if ((test_and_clear_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
)) &&
325 !test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
) &&
326 atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
328 ql_dbg(ql_dbg_dpc
, vha
, 0x4018,
329 "Relogin needed scheduled.\n");
330 qla2x00_relogin(vha
);
331 ql_dbg(ql_dbg_dpc
, vha
, 0x4019,
332 "Relogin needed end.\n");
335 if (test_and_clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
) &&
336 (!(test_and_set_bit(RESET_ACTIVE
, &vha
->dpc_flags
)))) {
337 clear_bit(RESET_ACTIVE
, &vha
->dpc_flags
);
340 if (test_and_clear_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
341 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
))) {
342 ql_dbg(ql_dbg_dpc
, vha
, 0x401a,
343 "Loop resync scheduled.\n");
344 qla2x00_loop_resync(vha
);
345 clear_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
);
346 ql_dbg(ql_dbg_dpc
, vha
, 0x401b,
347 "Loop resync end.\n");
351 ql_dbg(ql_dbg_dpc
, vha
, 0x401c,
352 "Exiting %s.\n", __func__
);
357 qla2x00_do_dpc_all_vps(scsi_qla_host_t
*vha
)
360 struct qla_hw_data
*ha
= vha
->hw
;
362 unsigned long flags
= 0;
366 if (list_empty(&ha
->vp_list
))
369 clear_bit(VP_DPC_NEEDED
, &vha
->dpc_flags
);
371 if (!(ha
->current_topology
& ISP_CFG_F
))
374 spin_lock_irqsave(&ha
->vport_slock
, flags
);
375 list_for_each_entry(vp
, &ha
->vp_list
, list
) {
377 atomic_inc(&vp
->vref_count
);
378 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
380 ret
= qla2x00_do_dpc_vp(vp
);
382 spin_lock_irqsave(&ha
->vport_slock
, flags
);
383 atomic_dec(&vp
->vref_count
);
386 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
390 qla24xx_vport_create_req_sanity_check(struct fc_vport
*fc_vport
)
392 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
393 struct qla_hw_data
*ha
= base_vha
->hw
;
394 scsi_qla_host_t
*vha
;
395 uint8_t port_name
[WWN_SIZE
];
397 if (fc_vport
->roles
!= FC_PORT_ROLE_FCP_INITIATOR
)
398 return VPCERR_UNSUPPORTED
;
400 /* Check up the F/W and H/W support NPIV */
401 if (!ha
->flags
.npiv_supported
)
402 return VPCERR_UNSUPPORTED
;
404 /* Check up whether npiv supported switch presented */
405 if (!(ha
->switch_cap
& FLOGI_MID_SUPPORT
))
406 return VPCERR_NO_FABRIC_SUPP
;
408 /* Check up unique WWPN */
409 u64_to_wwn(fc_vport
->port_name
, port_name
);
410 if (!memcmp(port_name
, base_vha
->port_name
, WWN_SIZE
))
411 return VPCERR_BAD_WWN
;
412 vha
= qla24xx_find_vhost_by_name(ha
, port_name
);
414 return VPCERR_BAD_WWN
;
416 /* Check up max-npiv-supports */
417 if (ha
->num_vhosts
> ha
->max_npiv_vports
) {
418 ql_dbg(ql_dbg_vport
, vha
, 0xa004,
419 "num_vhosts %ud is bigger "
420 "than max_npiv_vports %ud.\n",
421 ha
->num_vhosts
, ha
->max_npiv_vports
);
422 return VPCERR_UNSUPPORTED
;
428 qla24xx_create_vhost(struct fc_vport
*fc_vport
)
430 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
431 struct qla_hw_data
*ha
= base_vha
->hw
;
432 scsi_qla_host_t
*vha
;
433 struct scsi_host_template
*sht
= &qla2xxx_driver_template
;
434 struct Scsi_Host
*host
;
436 vha
= qla2x00_create_host(sht
, ha
);
438 ql_log(ql_log_warn
, vha
, 0xa005,
439 "scsi_host_alloc() failed for vport.\n");
444 fc_vport
->dd_data
= vha
;
446 u64_to_wwn(fc_vport
->node_name
, vha
->node_name
);
447 u64_to_wwn(fc_vport
->port_name
, vha
->port_name
);
449 vha
->fc_vport
= fc_vport
;
450 vha
->device_flags
= 0;
451 vha
->vp_idx
= qla24xx_allocate_vp_id(vha
);
452 if (vha
->vp_idx
> ha
->max_npiv_vports
) {
453 ql_dbg(ql_dbg_vport
, vha
, 0xa006,
454 "Couldn't allocate vp_id.\n");
455 goto create_vhost_failed
;
457 vha
->mgmt_svr_loop_id
= 10 + vha
->vp_idx
;
462 * To fix the issue of processing a parent's RSCN for the vport before
463 * its SCR is complete.
465 set_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
466 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
467 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
469 qla2x00_start_timer(vha
, qla2x00_timer
, WATCH_INTERVAL
);
471 vha
->req
= base_vha
->req
;
472 host
->can_queue
= base_vha
->req
->length
+ 128;
474 host
->cmd_per_lun
= 3;
475 if ((IS_QLA25XX(ha
) || IS_QLA81XX(ha
)) && ql2xenabledif
)
476 host
->max_cmd_len
= 32;
478 host
->max_cmd_len
= MAX_CMDSZ
;
479 host
->max_channel
= MAX_BUSES
- 1;
480 host
->max_lun
= ql2xmaxlun
;
481 host
->unique_id
= host
->host_no
;
482 host
->max_id
= MAX_TARGETS_2200
;
483 host
->transportt
= qla2xxx_transport_vport_template
;
485 ql_dbg(ql_dbg_vport
, vha
, 0xa007,
486 "Detect vport hba %ld at address = %p.\n",
489 vha
->flags
.init_done
= 1;
491 mutex_lock(&ha
->vport_lock
);
492 set_bit(vha
->vp_idx
, ha
->vp_idx_map
);
493 ha
->cur_vport_count
++;
494 mutex_unlock(&ha
->vport_lock
);
503 qla25xx_free_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
505 struct qla_hw_data
*ha
= vha
->hw
;
506 uint16_t que_id
= req
->id
;
508 dma_free_coherent(&ha
->pdev
->dev
, (req
->length
+ 1) *
509 sizeof(request_t
), req
->ring
, req
->dma
);
513 ha
->req_q_map
[que_id
] = NULL
;
514 mutex_lock(&ha
->vport_lock
);
515 clear_bit(que_id
, ha
->req_qid_map
);
516 mutex_unlock(&ha
->vport_lock
);
523 qla25xx_free_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
525 struct qla_hw_data
*ha
= vha
->hw
;
526 uint16_t que_id
= rsp
->id
;
528 if (rsp
->msix
&& rsp
->msix
->have_irq
) {
529 free_irq(rsp
->msix
->vector
, rsp
);
530 rsp
->msix
->have_irq
= 0;
531 rsp
->msix
->rsp
= NULL
;
533 dma_free_coherent(&ha
->pdev
->dev
, (rsp
->length
+ 1) *
534 sizeof(response_t
), rsp
->ring
, rsp
->dma
);
538 ha
->rsp_q_map
[que_id
] = NULL
;
539 mutex_lock(&ha
->vport_lock
);
540 clear_bit(que_id
, ha
->rsp_qid_map
);
541 mutex_unlock(&ha
->vport_lock
);
548 qla25xx_delete_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
553 req
->options
|= BIT_0
;
554 ret
= qla25xx_init_req_que(vha
, req
);
556 if (ret
== QLA_SUCCESS
)
557 qla25xx_free_req_que(vha
, req
);
563 qla25xx_delete_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
568 rsp
->options
|= BIT_0
;
569 ret
= qla25xx_init_rsp_que(vha
, rsp
);
571 if (ret
== QLA_SUCCESS
)
572 qla25xx_free_rsp_que(vha
, rsp
);
577 /* Delete all queues for a given vhost */
579 qla25xx_delete_queues(struct scsi_qla_host
*vha
)
582 struct req_que
*req
= NULL
;
583 struct rsp_que
*rsp
= NULL
;
584 struct qla_hw_data
*ha
= vha
->hw
;
586 /* Delete request queues */
587 for (cnt
= 1; cnt
< ha
->max_req_queues
; cnt
++) {
588 req
= ha
->req_q_map
[cnt
];
590 ret
= qla25xx_delete_req_que(vha
, req
);
591 if (ret
!= QLA_SUCCESS
) {
592 ql_log(ql_log_warn
, vha
, 0x00ea,
593 "Couldn't delete req que %d.\n",
600 /* Delete response queues */
601 for (cnt
= 1; cnt
< ha
->max_rsp_queues
; cnt
++) {
602 rsp
= ha
->rsp_q_map
[cnt
];
604 ret
= qla25xx_delete_rsp_que(vha
, rsp
);
605 if (ret
!= QLA_SUCCESS
) {
606 ql_log(ql_log_warn
, vha
, 0x00eb,
607 "Couldn't delete rsp que %d.\n",
617 qla25xx_create_req_que(struct qla_hw_data
*ha
, uint16_t options
,
618 uint8_t vp_idx
, uint16_t rid
, int rsp_que
, uint8_t qos
)
621 struct req_que
*req
= NULL
;
622 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
624 device_reg_t __iomem
*reg
;
627 req
= kzalloc(sizeof(struct req_que
), GFP_KERNEL
);
629 ql_log(ql_log_fatal
, base_vha
, 0x00d9,
630 "Failed to allocate memory for request queue.\n");
634 req
->length
= REQUEST_ENTRY_CNT_24XX
;
635 req
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
636 (req
->length
+ 1) * sizeof(request_t
),
637 &req
->dma
, GFP_KERNEL
);
638 if (req
->ring
== NULL
) {
639 ql_log(ql_log_fatal
, base_vha
, 0x00da,
640 "Failed to allocte memory for request_ring.\n");
644 mutex_lock(&ha
->vport_lock
);
645 que_id
= find_first_zero_bit(ha
->req_qid_map
, ha
->max_req_queues
);
646 if (que_id
>= ha
->max_req_queues
) {
647 mutex_unlock(&ha
->vport_lock
);
648 ql_log(ql_log_warn
, base_vha
, 0x00db,
649 "No resources to create additional request queue.\n");
652 set_bit(que_id
, ha
->req_qid_map
);
653 ha
->req_q_map
[que_id
] = req
;
655 req
->vp_idx
= vp_idx
;
658 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc002,
659 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
660 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
661 ql_dbg(ql_dbg_init
, base_vha
, 0x00dc,
662 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
663 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
667 req
->rsp
= ha
->rsp_q_map
[rsp_que
];
668 /* Use alternate PCI bus number */
671 /* Use alternate PCI devfn */
674 req
->options
= options
;
676 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc003,
677 "options=0x%x.\n", req
->options
);
678 ql_dbg(ql_dbg_init
, base_vha
, 0x00dd,
679 "options=0x%x.\n", req
->options
);
680 for (cnt
= 1; cnt
< MAX_OUTSTANDING_COMMANDS
; cnt
++)
681 req
->outstanding_cmds
[cnt
] = NULL
;
682 req
->current_outstanding_cmd
= 1;
684 req
->ring_ptr
= req
->ring
;
686 req
->cnt
= req
->length
;
688 reg
= ISP_QUE_REG(ha
, que_id
);
689 req
->max_q_depth
= ha
->req_q_map
[0]->max_q_depth
;
690 mutex_unlock(&ha
->vport_lock
);
691 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc004,
692 "ring_ptr=%p ring_index=%d, "
693 "cnt=%d id=%d max_q_depth=%d.\n",
694 req
->ring_ptr
, req
->ring_index
,
695 req
->cnt
, req
->id
, req
->max_q_depth
);
696 ql_dbg(ql_dbg_init
, base_vha
, 0x00de,
697 "ring_ptr=%p ring_index=%d, "
698 "cnt=%d id=%d max_q_depth=%d.\n",
699 req
->ring_ptr
, req
->ring_index
, req
->cnt
,
700 req
->id
, req
->max_q_depth
);
702 ret
= qla25xx_init_req_que(base_vha
, req
);
703 if (ret
!= QLA_SUCCESS
) {
704 ql_log(ql_log_fatal
, base_vha
, 0x00df,
705 "%s failed.\n", __func__
);
706 mutex_lock(&ha
->vport_lock
);
707 clear_bit(que_id
, ha
->req_qid_map
);
708 mutex_unlock(&ha
->vport_lock
);
715 qla25xx_free_req_que(base_vha
, req
);
720 static void qla_do_work(struct work_struct
*work
)
723 struct rsp_que
*rsp
= container_of(work
, struct rsp_que
, q_work
);
724 struct scsi_qla_host
*vha
;
725 struct qla_hw_data
*ha
= rsp
->hw
;
727 spin_lock_irqsave(&rsp
->hw
->hardware_lock
, flags
);
728 vha
= pci_get_drvdata(ha
->pdev
);
729 qla24xx_process_response_queue(vha
, rsp
);
730 spin_unlock_irqrestore(&rsp
->hw
->hardware_lock
, flags
);
733 /* create response queue */
735 qla25xx_create_rsp_que(struct qla_hw_data
*ha
, uint16_t options
,
736 uint8_t vp_idx
, uint16_t rid
, int req
)
739 struct rsp_que
*rsp
= NULL
;
740 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
742 device_reg_t __iomem
*reg
;
744 rsp
= kzalloc(sizeof(struct rsp_que
), GFP_KERNEL
);
746 ql_log(ql_log_warn
, base_vha
, 0x0066,
747 "Failed to allocate memory for response queue.\n");
751 rsp
->length
= RESPONSE_ENTRY_CNT_MQ
;
752 rsp
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
753 (rsp
->length
+ 1) * sizeof(response_t
),
754 &rsp
->dma
, GFP_KERNEL
);
755 if (rsp
->ring
== NULL
) {
756 ql_log(ql_log_warn
, base_vha
, 0x00e1,
757 "Failed to allocate memory for response ring.\n");
761 mutex_lock(&ha
->vport_lock
);
762 que_id
= find_first_zero_bit(ha
->rsp_qid_map
, ha
->max_rsp_queues
);
763 if (que_id
>= ha
->max_rsp_queues
) {
764 mutex_unlock(&ha
->vport_lock
);
765 ql_log(ql_log_warn
, base_vha
, 0x00e2,
766 "No resources to create additional request queue.\n");
769 set_bit(que_id
, ha
->rsp_qid_map
);
771 if (ha
->flags
.msix_enabled
)
772 rsp
->msix
= &ha
->msix_entries
[que_id
+ 1];
774 ql_log(ql_log_warn
, base_vha
, 0x00e3,
775 "MSIX not enalbled.\n");
777 ha
->rsp_q_map
[que_id
] = rsp
;
779 rsp
->vp_idx
= vp_idx
;
781 ql_dbg(ql_dbg_init
, base_vha
, 0x00e4,
782 "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
783 que_id
, rsp
->rid
, rsp
->vp_idx
, rsp
->hw
);
784 /* Use alternate PCI bus number */
787 /* Use alternate PCI devfn */
790 /* Enable MSIX handshake mode on for uncapable adapters */
791 if (!IS_MSIX_NACK_CAPABLE(ha
))
794 rsp
->options
= options
;
796 reg
= ISP_QUE_REG(ha
, que_id
);
797 rsp
->rsp_q_in
= ®
->isp25mq
.rsp_q_in
;
798 rsp
->rsp_q_out
= ®
->isp25mq
.rsp_q_out
;
799 mutex_unlock(&ha
->vport_lock
);
800 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc00b,
801 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
802 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
804 ql_dbg(ql_dbg_init
, base_vha
, 0x00e5,
805 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
806 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
809 ret
= qla25xx_request_irq(rsp
);
813 ret
= qla25xx_init_rsp_que(base_vha
, rsp
);
814 if (ret
!= QLA_SUCCESS
) {
815 ql_log(ql_log_fatal
, base_vha
, 0x00e7,
816 "%s failed.\n", __func__
);
817 mutex_lock(&ha
->vport_lock
);
818 clear_bit(que_id
, ha
->rsp_qid_map
);
819 mutex_unlock(&ha
->vport_lock
);
823 rsp
->req
= ha
->req_q_map
[req
];
827 qla2x00_init_response_q_entries(rsp
);
829 INIT_WORK(&rsp
->q_work
, qla_do_work
);
833 qla25xx_free_rsp_que(base_vha
, rsp
);