2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include "qla_target.h"
11 #include <linux/moduleparam.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsicam.h>
18 #include <linux/delay.h>
21 qla2x00_vp_stop_timer(scsi_qla_host_t
*vha
)
23 if (vha
->vp_idx
&& vha
->timer_active
) {
24 del_timer_sync(&vha
->timer
);
25 vha
->timer_active
= 0;
30 qla24xx_allocate_vp_id(scsi_qla_host_t
*vha
)
33 struct qla_hw_data
*ha
= vha
->hw
;
36 /* Find an empty slot and assign an vp_id */
37 mutex_lock(&ha
->vport_lock
);
38 vp_id
= find_first_zero_bit(ha
->vp_idx_map
, ha
->max_npiv_vports
+ 1);
39 if (vp_id
> ha
->max_npiv_vports
) {
40 ql_dbg(ql_dbg_vport
, vha
, 0xa000,
41 "vp_id %d is bigger than max-supported %d.\n",
42 vp_id
, ha
->max_npiv_vports
);
43 mutex_unlock(&ha
->vport_lock
);
47 set_bit(vp_id
, ha
->vp_idx_map
);
51 spin_lock_irqsave(&ha
->vport_slock
, flags
);
52 list_add_tail(&vha
->list
, &ha
->vp_list
);
54 qlt_update_vp_map(vha
, SET_VP_IDX
);
56 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
58 mutex_unlock(&ha
->vport_lock
);
63 qla24xx_deallocate_vp_id(scsi_qla_host_t
*vha
)
66 struct qla_hw_data
*ha
= vha
->hw
;
67 unsigned long flags
= 0;
69 mutex_lock(&ha
->vport_lock
);
71 * Wait for all pending activities to finish before removing vport from
73 * Lock needs to be held for safe removal from the list (it
74 * ensures no active vp_list traversal while the vport is removed
77 wait_event_timeout(vha
->vref_waitq
, !atomic_read(&vha
->vref_count
),
80 spin_lock_irqsave(&ha
->vport_slock
, flags
);
81 if (atomic_read(&vha
->vref_count
)) {
82 ql_dbg(ql_dbg_vport
, vha
, 0xfffa,
83 "vha->vref_count=%u timeout\n", vha
->vref_count
.counter
);
84 vha
->vref_count
= (atomic_t
)ATOMIC_INIT(0);
87 qlt_update_vp_map(vha
, RESET_VP_IDX
);
88 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
92 clear_bit(vp_id
, ha
->vp_idx_map
);
94 mutex_unlock(&ha
->vport_lock
);
97 static scsi_qla_host_t
*
98 qla24xx_find_vhost_by_name(struct qla_hw_data
*ha
, uint8_t *port_name
)
100 scsi_qla_host_t
*vha
;
101 struct scsi_qla_host
*tvha
;
104 spin_lock_irqsave(&ha
->vport_slock
, flags
);
105 /* Locate matching device in database. */
106 list_for_each_entry_safe(vha
, tvha
, &ha
->vp_list
, list
) {
107 if (!memcmp(port_name
, vha
->port_name
, WWN_SIZE
)) {
108 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
112 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
117 * qla2x00_mark_vp_devices_dead
118 * Updates fcport state when device goes offline.
121 * ha = adapter block pointer.
122 * fcport = port structure pointer.
130 qla2x00_mark_vp_devices_dead(scsi_qla_host_t
*vha
)
134 * This function, if called in contexts other than vp create, disable
135 * or delete, please make sure this is synchronized with the
140 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
141 ql_dbg(ql_dbg_vport
, vha
, 0xa001,
142 "Marking port dead, loop_id=0x%04x : %x.\n",
143 fcport
->loop_id
, fcport
->vha
->vp_idx
);
145 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
146 qla2x00_set_fcport_state(fcport
, FCS_UNCONFIGURED
);
151 qla24xx_disable_vp(scsi_qla_host_t
*vha
)
156 ret
= qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
157 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
158 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
160 /* Remove port id from vp target map */
161 spin_lock_irqsave(&vha
->hw
->vport_slock
, flags
);
162 qlt_update_vp_map(vha
, RESET_AL_PA
);
163 spin_unlock_irqrestore(&vha
->hw
->vport_slock
, flags
);
165 qla2x00_mark_vp_devices_dead(vha
);
166 atomic_set(&vha
->vp_state
, VP_FAILED
);
167 vha
->flags
.management_server_logged_in
= 0;
168 if (ret
== QLA_SUCCESS
) {
169 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_DISABLED
);
171 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
178 qla24xx_enable_vp(scsi_qla_host_t
*vha
)
181 struct qla_hw_data
*ha
= vha
->hw
;
182 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
184 /* Check if physical ha port is Up */
185 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
186 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
||
187 !(ha
->current_topology
& ISP_CFG_F
)) {
188 vha
->vp_err_state
= VP_ERR_PORTDWN
;
189 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_LINKDOWN
);
193 /* Initialize the new vport unless it is a persistent port */
194 mutex_lock(&ha
->vport_lock
);
195 ret
= qla24xx_modify_vp_config(vha
);
196 mutex_unlock(&ha
->vport_lock
);
198 if (ret
!= QLA_SUCCESS
) {
199 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
203 ql_dbg(ql_dbg_taskm
, vha
, 0x801a,
204 "Virtual port with id: %d - Enabled.\n", vha
->vp_idx
);
208 ql_dbg(ql_dbg_taskm
, vha
, 0x801b,
209 "Virtual port with id: %d - Disabled.\n", vha
->vp_idx
);
214 qla24xx_configure_vp(scsi_qla_host_t
*vha
)
216 struct fc_vport
*fc_vport
;
219 fc_vport
= vha
->fc_vport
;
221 ql_dbg(ql_dbg_vport
, vha
, 0xa002,
222 "%s: change request #3.\n", __func__
);
223 ret
= qla2x00_send_change_request(vha
, 0x3, vha
->vp_idx
);
224 if (ret
!= QLA_SUCCESS
) {
225 ql_dbg(ql_dbg_vport
, vha
, 0xa003, "Failed to enable "
226 "receiving of RSCN requests: 0x%x.\n", ret
);
229 /* Corresponds to SCR enabled */
230 clear_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
233 vha
->flags
.online
= 1;
234 if (qla24xx_configure_vhba(vha
))
237 atomic_set(&vha
->vp_state
, VP_ACTIVE
);
238 fc_vport_set_state(fc_vport
, FC_VPORT_ACTIVE
);
242 qla2x00_alert_all_vps(struct rsp_que
*rsp
, uint16_t *mb
)
244 scsi_qla_host_t
*vha
;
245 struct qla_hw_data
*ha
= rsp
->hw
;
249 spin_lock_irqsave(&ha
->vport_slock
, flags
);
250 list_for_each_entry(vha
, &ha
->vp_list
, list
) {
252 atomic_inc(&vha
->vref_count
);
253 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
256 case MBA_LIP_OCCURRED
:
260 case MBA_POINT_TO_POINT
:
261 case MBA_CHG_IN_CONNECTION
:
262 case MBA_PORT_UPDATE
:
263 case MBA_RSCN_UPDATE
:
264 ql_dbg(ql_dbg_async
, vha
, 0x5024,
265 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
267 qla2x00_async_event(vha
, rsp
, mb
);
271 spin_lock_irqsave(&ha
->vport_slock
, flags
);
272 atomic_dec(&vha
->vref_count
);
273 wake_up(&vha
->vref_waitq
);
277 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
281 qla2x00_vp_abort_isp(scsi_qla_host_t
*vha
)
284 * Physical port will do most of the abort and recovery work. We can
285 * just treat it as a loop down
287 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
288 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
289 qla2x00_mark_all_devices_lost(vha
, 0);
291 if (!atomic_read(&vha
->loop_down_timer
))
292 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
296 * To exclusively reset vport, we need to log it out first. Note: this
297 * control_vp can fail if ISP reset is already issued, this is
298 * expected, as the vp would be already logged out due to ISP reset.
300 if (!test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
))
301 qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
303 ql_dbg(ql_dbg_taskm
, vha
, 0x801d,
304 "Scheduling enable of Vport %d.\n", vha
->vp_idx
);
305 return qla24xx_enable_vp(vha
);
309 qla2x00_do_dpc_vp(scsi_qla_host_t
*vha
)
311 struct qla_hw_data
*ha
= vha
->hw
;
312 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
314 ql_dbg(ql_dbg_dpc
+ ql_dbg_verbose
, vha
, 0x4012,
315 "Entering %s vp_flags: 0x%lx.\n", __func__
, vha
->vp_flags
);
317 qla2x00_do_work(vha
);
319 /* Check if Fw is ready to configure VP first */
320 if (test_bit(VP_CONFIG_OK
, &base_vha
->vp_flags
)) {
321 if (test_and_clear_bit(VP_IDX_ACQUIRED
, &vha
->vp_flags
)) {
322 /* VP acquired. complete port configuration */
323 ql_dbg(ql_dbg_dpc
, vha
, 0x4014,
324 "Configure VP scheduled.\n");
325 qla24xx_configure_vp(vha
);
326 ql_dbg(ql_dbg_dpc
, vha
, 0x4015,
327 "Configure VP end.\n");
332 if (test_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
)) {
333 ql_dbg(ql_dbg_dpc
, vha
, 0x4016,
334 "FCPort update scheduled.\n");
335 qla2x00_update_fcports(vha
);
336 clear_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
);
337 ql_dbg(ql_dbg_dpc
, vha
, 0x4017,
338 "FCPort update end.\n");
341 if ((test_and_clear_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
)) &&
342 !test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
) &&
343 atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
345 ql_dbg(ql_dbg_dpc
, vha
, 0x4018,
346 "Relogin needed scheduled.\n");
347 qla2x00_relogin(vha
);
348 ql_dbg(ql_dbg_dpc
, vha
, 0x4019,
349 "Relogin needed end.\n");
352 if (test_and_clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
) &&
353 (!(test_and_set_bit(RESET_ACTIVE
, &vha
->dpc_flags
)))) {
354 clear_bit(RESET_ACTIVE
, &vha
->dpc_flags
);
357 if (test_and_clear_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
358 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
))) {
359 ql_dbg(ql_dbg_dpc
, vha
, 0x401a,
360 "Loop resync scheduled.\n");
361 qla2x00_loop_resync(vha
);
362 clear_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
);
363 ql_dbg(ql_dbg_dpc
, vha
, 0x401b,
364 "Loop resync end.\n");
368 ql_dbg(ql_dbg_dpc
+ ql_dbg_verbose
, vha
, 0x401c,
369 "Exiting %s.\n", __func__
);
374 qla2x00_do_dpc_all_vps(scsi_qla_host_t
*vha
)
376 struct qla_hw_data
*ha
= vha
->hw
;
378 unsigned long flags
= 0;
382 if (list_empty(&ha
->vp_list
))
385 clear_bit(VP_DPC_NEEDED
, &vha
->dpc_flags
);
387 if (!(ha
->current_topology
& ISP_CFG_F
))
390 spin_lock_irqsave(&ha
->vport_slock
, flags
);
391 list_for_each_entry(vp
, &ha
->vp_list
, list
) {
393 atomic_inc(&vp
->vref_count
);
394 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
396 qla2x00_do_dpc_vp(vp
);
398 spin_lock_irqsave(&ha
->vport_slock
, flags
);
399 atomic_dec(&vp
->vref_count
);
402 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
406 qla24xx_vport_create_req_sanity_check(struct fc_vport
*fc_vport
)
408 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
409 struct qla_hw_data
*ha
= base_vha
->hw
;
410 scsi_qla_host_t
*vha
;
411 uint8_t port_name
[WWN_SIZE
];
413 if (fc_vport
->roles
!= FC_PORT_ROLE_FCP_INITIATOR
)
414 return VPCERR_UNSUPPORTED
;
416 /* Check up the F/W and H/W support NPIV */
417 if (!ha
->flags
.npiv_supported
)
418 return VPCERR_UNSUPPORTED
;
420 /* Check up whether npiv supported switch presented */
421 if (!(ha
->switch_cap
& FLOGI_MID_SUPPORT
))
422 return VPCERR_NO_FABRIC_SUPP
;
424 /* Check up unique WWPN */
425 u64_to_wwn(fc_vport
->port_name
, port_name
);
426 if (!memcmp(port_name
, base_vha
->port_name
, WWN_SIZE
))
427 return VPCERR_BAD_WWN
;
428 vha
= qla24xx_find_vhost_by_name(ha
, port_name
);
430 return VPCERR_BAD_WWN
;
432 /* Check up max-npiv-supports */
433 if (ha
->num_vhosts
> ha
->max_npiv_vports
) {
434 ql_dbg(ql_dbg_vport
, vha
, 0xa004,
435 "num_vhosts %ud is bigger "
436 "than max_npiv_vports %ud.\n",
437 ha
->num_vhosts
, ha
->max_npiv_vports
);
438 return VPCERR_UNSUPPORTED
;
444 qla24xx_create_vhost(struct fc_vport
*fc_vport
)
446 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
447 struct qla_hw_data
*ha
= base_vha
->hw
;
448 scsi_qla_host_t
*vha
;
449 struct scsi_host_template
*sht
= &qla2xxx_driver_template
;
450 struct Scsi_Host
*host
;
452 vha
= qla2x00_create_host(sht
, ha
);
454 ql_log(ql_log_warn
, vha
, 0xa005,
455 "scsi_host_alloc() failed for vport.\n");
460 fc_vport
->dd_data
= vha
;
462 u64_to_wwn(fc_vport
->node_name
, vha
->node_name
);
463 u64_to_wwn(fc_vport
->port_name
, vha
->port_name
);
465 vha
->fc_vport
= fc_vport
;
466 vha
->device_flags
= 0;
467 vha
->vp_idx
= qla24xx_allocate_vp_id(vha
);
468 if (vha
->vp_idx
> ha
->max_npiv_vports
) {
469 ql_dbg(ql_dbg_vport
, vha
, 0xa006,
470 "Couldn't allocate vp_id.\n");
471 goto create_vhost_failed
;
473 vha
->mgmt_svr_loop_id
= 10 + vha
->vp_idx
;
478 * To fix the issue of processing a parent's RSCN for the vport before
479 * its SCR is complete.
481 set_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
482 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
483 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
485 qla2x00_start_timer(vha
, qla2x00_timer
, WATCH_INTERVAL
);
487 vha
->req
= base_vha
->req
;
488 host
->can_queue
= base_vha
->req
->length
+ 128;
489 host
->cmd_per_lun
= 3;
490 if (IS_T10_PI_CAPABLE(ha
) && ql2xenabledif
)
491 host
->max_cmd_len
= 32;
493 host
->max_cmd_len
= MAX_CMDSZ
;
494 host
->max_channel
= MAX_BUSES
- 1;
495 host
->max_lun
= ql2xmaxlun
;
496 host
->unique_id
= host
->host_no
;
497 host
->max_id
= ha
->max_fibre_devices
;
498 host
->transportt
= qla2xxx_transport_vport_template
;
500 ql_dbg(ql_dbg_vport
, vha
, 0xa007,
501 "Detect vport hba %ld at address = %p.\n",
504 vha
->flags
.init_done
= 1;
506 mutex_lock(&ha
->vport_lock
);
507 set_bit(vha
->vp_idx
, ha
->vp_idx_map
);
508 ha
->cur_vport_count
++;
509 mutex_unlock(&ha
->vport_lock
);
518 qla25xx_free_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
520 struct qla_hw_data
*ha
= vha
->hw
;
521 uint16_t que_id
= req
->id
;
523 dma_free_coherent(&ha
->pdev
->dev
, (req
->length
+ 1) *
524 sizeof(request_t
), req
->ring
, req
->dma
);
528 ha
->req_q_map
[que_id
] = NULL
;
529 mutex_lock(&ha
->vport_lock
);
530 clear_bit(que_id
, ha
->req_qid_map
);
531 mutex_unlock(&ha
->vport_lock
);
533 kfree(req
->outstanding_cmds
);
539 qla25xx_free_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
541 struct qla_hw_data
*ha
= vha
->hw
;
542 uint16_t que_id
= rsp
->id
;
544 if (rsp
->msix
&& rsp
->msix
->have_irq
) {
545 free_irq(rsp
->msix
->vector
, rsp
);
546 rsp
->msix
->have_irq
= 0;
547 rsp
->msix
->rsp
= NULL
;
549 dma_free_coherent(&ha
->pdev
->dev
, (rsp
->length
+ 1) *
550 sizeof(response_t
), rsp
->ring
, rsp
->dma
);
554 ha
->rsp_q_map
[que_id
] = NULL
;
555 mutex_lock(&ha
->vport_lock
);
556 clear_bit(que_id
, ha
->rsp_qid_map
);
557 mutex_unlock(&ha
->vport_lock
);
564 qla25xx_delete_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
569 req
->options
|= BIT_0
;
570 ret
= qla25xx_init_req_que(vha
, req
);
572 if (ret
== QLA_SUCCESS
)
573 qla25xx_free_req_que(vha
, req
);
579 qla25xx_delete_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
584 rsp
->options
|= BIT_0
;
585 ret
= qla25xx_init_rsp_que(vha
, rsp
);
587 if (ret
== QLA_SUCCESS
)
588 qla25xx_free_rsp_que(vha
, rsp
);
593 /* Delete all queues for a given vhost */
595 qla25xx_delete_queues(struct scsi_qla_host
*vha
)
598 struct req_que
*req
= NULL
;
599 struct rsp_que
*rsp
= NULL
;
600 struct qla_hw_data
*ha
= vha
->hw
;
602 /* Delete request queues */
603 for (cnt
= 1; cnt
< ha
->max_req_queues
; cnt
++) {
604 req
= ha
->req_q_map
[cnt
];
605 if (req
&& test_bit(cnt
, ha
->req_qid_map
)) {
606 ret
= qla25xx_delete_req_que(vha
, req
);
607 if (ret
!= QLA_SUCCESS
) {
608 ql_log(ql_log_warn
, vha
, 0x00ea,
609 "Couldn't delete req que %d.\n",
616 /* Delete response queues */
617 for (cnt
= 1; cnt
< ha
->max_rsp_queues
; cnt
++) {
618 rsp
= ha
->rsp_q_map
[cnt
];
619 if (rsp
&& test_bit(cnt
, ha
->rsp_qid_map
)) {
620 ret
= qla25xx_delete_rsp_que(vha
, rsp
);
621 if (ret
!= QLA_SUCCESS
) {
622 ql_log(ql_log_warn
, vha
, 0x00eb,
623 "Couldn't delete rsp que %d.\n",
633 qla25xx_create_req_que(struct qla_hw_data
*ha
, uint16_t options
,
634 uint8_t vp_idx
, uint16_t rid
, int rsp_que
, uint8_t qos
)
637 struct req_que
*req
= NULL
;
638 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
643 req
= kzalloc(sizeof(struct req_que
), GFP_KERNEL
);
645 ql_log(ql_log_fatal
, base_vha
, 0x00d9,
646 "Failed to allocate memory for request queue.\n");
650 req
->length
= REQUEST_ENTRY_CNT_24XX
;
651 req
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
652 (req
->length
+ 1) * sizeof(request_t
),
653 &req
->dma
, GFP_KERNEL
);
654 if (req
->ring
== NULL
) {
655 ql_log(ql_log_fatal
, base_vha
, 0x00da,
656 "Failed to allocate memory for request_ring.\n");
660 ret
= qla2x00_alloc_outstanding_cmds(ha
, req
);
661 if (ret
!= QLA_SUCCESS
)
664 mutex_lock(&ha
->vport_lock
);
665 que_id
= find_first_zero_bit(ha
->req_qid_map
, ha
->max_req_queues
);
666 if (que_id
>= ha
->max_req_queues
) {
667 mutex_unlock(&ha
->vport_lock
);
668 ql_log(ql_log_warn
, base_vha
, 0x00db,
669 "No resources to create additional request queue.\n");
672 set_bit(que_id
, ha
->req_qid_map
);
673 ha
->req_q_map
[que_id
] = req
;
675 req
->vp_idx
= vp_idx
;
678 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc002,
679 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
680 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
681 ql_dbg(ql_dbg_init
, base_vha
, 0x00dc,
682 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
683 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
687 req
->rsp
= ha
->rsp_q_map
[rsp_que
];
688 /* Use alternate PCI bus number */
691 /* Use alternate PCI devfn */
694 req
->options
= options
;
696 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc003,
697 "options=0x%x.\n", req
->options
);
698 ql_dbg(ql_dbg_init
, base_vha
, 0x00dd,
699 "options=0x%x.\n", req
->options
);
700 for (cnt
= 1; cnt
< req
->num_outstanding_cmds
; cnt
++)
701 req
->outstanding_cmds
[cnt
] = NULL
;
702 req
->current_outstanding_cmd
= 1;
704 req
->ring_ptr
= req
->ring
;
706 req
->cnt
= req
->length
;
708 reg
= ISP_QUE_REG(ha
, que_id
);
709 req
->req_q_in
= ®
->isp25mq
.req_q_in
;
710 req
->req_q_out
= ®
->isp25mq
.req_q_out
;
711 req
->max_q_depth
= ha
->req_q_map
[0]->max_q_depth
;
712 req
->out_ptr
= (void *)(req
->ring
+ req
->length
);
713 mutex_unlock(&ha
->vport_lock
);
714 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc004,
715 "ring_ptr=%p ring_index=%d, "
716 "cnt=%d id=%d max_q_depth=%d.\n",
717 req
->ring_ptr
, req
->ring_index
,
718 req
->cnt
, req
->id
, req
->max_q_depth
);
719 ql_dbg(ql_dbg_init
, base_vha
, 0x00de,
720 "ring_ptr=%p ring_index=%d, "
721 "cnt=%d id=%d max_q_depth=%d.\n",
722 req
->ring_ptr
, req
->ring_index
, req
->cnt
,
723 req
->id
, req
->max_q_depth
);
725 ret
= qla25xx_init_req_que(base_vha
, req
);
726 if (ret
!= QLA_SUCCESS
) {
727 ql_log(ql_log_fatal
, base_vha
, 0x00df,
728 "%s failed.\n", __func__
);
729 mutex_lock(&ha
->vport_lock
);
730 clear_bit(que_id
, ha
->req_qid_map
);
731 mutex_unlock(&ha
->vport_lock
);
738 qla25xx_free_req_que(base_vha
, req
);
743 static void qla_do_work(struct work_struct
*work
)
746 struct rsp_que
*rsp
= container_of(work
, struct rsp_que
, q_work
);
747 struct scsi_qla_host
*vha
;
748 struct qla_hw_data
*ha
= rsp
->hw
;
750 spin_lock_irqsave(&rsp
->hw
->hardware_lock
, flags
);
751 vha
= pci_get_drvdata(ha
->pdev
);
752 qla24xx_process_response_queue(vha
, rsp
);
753 spin_unlock_irqrestore(&rsp
->hw
->hardware_lock
, flags
);
756 /* create response queue */
758 qla25xx_create_rsp_que(struct qla_hw_data
*ha
, uint16_t options
,
759 uint8_t vp_idx
, uint16_t rid
, int req
)
762 struct rsp_que
*rsp
= NULL
;
763 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
767 rsp
= kzalloc(sizeof(struct rsp_que
), GFP_KERNEL
);
769 ql_log(ql_log_warn
, base_vha
, 0x0066,
770 "Failed to allocate memory for response queue.\n");
774 rsp
->length
= RESPONSE_ENTRY_CNT_MQ
;
775 rsp
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
776 (rsp
->length
+ 1) * sizeof(response_t
),
777 &rsp
->dma
, GFP_KERNEL
);
778 if (rsp
->ring
== NULL
) {
779 ql_log(ql_log_warn
, base_vha
, 0x00e1,
780 "Failed to allocate memory for response ring.\n");
784 mutex_lock(&ha
->vport_lock
);
785 que_id
= find_first_zero_bit(ha
->rsp_qid_map
, ha
->max_rsp_queues
);
786 if (que_id
>= ha
->max_rsp_queues
) {
787 mutex_unlock(&ha
->vport_lock
);
788 ql_log(ql_log_warn
, base_vha
, 0x00e2,
789 "No resources to create additional request queue.\n");
792 set_bit(que_id
, ha
->rsp_qid_map
);
794 if (ha
->flags
.msix_enabled
)
795 rsp
->msix
= &ha
->msix_entries
[que_id
+ 1];
797 ql_log(ql_log_warn
, base_vha
, 0x00e3,
798 "MSIX not enabled.\n");
800 ha
->rsp_q_map
[que_id
] = rsp
;
802 rsp
->vp_idx
= vp_idx
;
804 ql_dbg(ql_dbg_init
, base_vha
, 0x00e4,
805 "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
806 que_id
, rsp
->rid
, rsp
->vp_idx
, rsp
->hw
);
807 /* Use alternate PCI bus number */
810 /* Use alternate PCI devfn */
813 /* Enable MSIX handshake mode on for uncapable adapters */
814 if (!IS_MSIX_NACK_CAPABLE(ha
))
817 rsp
->options
= options
;
819 reg
= ISP_QUE_REG(ha
, que_id
);
820 rsp
->rsp_q_in
= ®
->isp25mq
.rsp_q_in
;
821 rsp
->rsp_q_out
= ®
->isp25mq
.rsp_q_out
;
822 rsp
->in_ptr
= (void *)(rsp
->ring
+ rsp
->length
);
823 mutex_unlock(&ha
->vport_lock
);
824 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc00b,
825 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
826 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
828 ql_dbg(ql_dbg_init
, base_vha
, 0x00e5,
829 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
830 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
833 ret
= qla25xx_request_irq(rsp
);
837 ret
= qla25xx_init_rsp_que(base_vha
, rsp
);
838 if (ret
!= QLA_SUCCESS
) {
839 ql_log(ql_log_fatal
, base_vha
, 0x00e7,
840 "%s failed.\n", __func__
);
841 mutex_lock(&ha
->vport_lock
);
842 clear_bit(que_id
, ha
->rsp_qid_map
);
843 mutex_unlock(&ha
->vport_lock
);
847 rsp
->req
= ha
->req_q_map
[req
];
851 qla2x00_init_response_q_entries(rsp
);
853 INIT_WORK(&rsp
->q_work
, qla_do_work
);
857 qla25xx_free_rsp_que(base_vha
, rsp
);