2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include "qla_target.h"
11 #include <linux/moduleparam.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsicam.h>
18 #include <linux/delay.h>
21 qla2x00_vp_stop_timer(scsi_qla_host_t
*vha
)
23 if (vha
->vp_idx
&& vha
->timer_active
) {
24 del_timer_sync(&vha
->timer
);
25 vha
->timer_active
= 0;
30 qla24xx_allocate_vp_id(scsi_qla_host_t
*vha
)
33 struct qla_hw_data
*ha
= vha
->hw
;
36 /* Find an empty slot and assign an vp_id */
37 mutex_lock(&ha
->vport_lock
);
38 vp_id
= find_first_zero_bit(ha
->vp_idx_map
, ha
->max_npiv_vports
+ 1);
39 if (vp_id
> ha
->max_npiv_vports
) {
40 ql_dbg(ql_dbg_vport
, vha
, 0xa000,
41 "vp_id %d is bigger than max-supported %d.\n",
42 vp_id
, ha
->max_npiv_vports
);
43 mutex_unlock(&ha
->vport_lock
);
47 set_bit(vp_id
, ha
->vp_idx_map
);
51 spin_lock_irqsave(&ha
->vport_slock
, flags
);
52 list_add_tail(&vha
->list
, &ha
->vp_list
);
53 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
55 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
56 qlt_update_vp_map(vha
, SET_VP_IDX
);
57 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
59 mutex_unlock(&ha
->vport_lock
);
64 qla24xx_deallocate_vp_id(scsi_qla_host_t
*vha
)
67 struct qla_hw_data
*ha
= vha
->hw
;
68 unsigned long flags
= 0;
71 mutex_lock(&ha
->vport_lock
);
73 * Wait for all pending activities to finish before removing vport from
75 * Lock needs to be held for safe removal from the list (it
76 * ensures no active vp_list traversal while the vport is removed
79 for (i
= 0; i
< 10; i
++) {
80 if (wait_event_timeout(vha
->vref_waitq
,
81 !atomic_read(&vha
->vref_count
), HZ
) > 0)
85 spin_lock_irqsave(&ha
->vport_slock
, flags
);
86 if (atomic_read(&vha
->vref_count
)) {
87 ql_dbg(ql_dbg_vport
, vha
, 0xfffa,
88 "vha->vref_count=%u timeout\n", vha
->vref_count
.counter
);
89 vha
->vref_count
= (atomic_t
)ATOMIC_INIT(0);
92 qlt_update_vp_map(vha
, RESET_VP_IDX
);
93 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
97 clear_bit(vp_id
, ha
->vp_idx_map
);
99 mutex_unlock(&ha
->vport_lock
);
102 static scsi_qla_host_t
*
103 qla24xx_find_vhost_by_name(struct qla_hw_data
*ha
, uint8_t *port_name
)
105 scsi_qla_host_t
*vha
;
106 struct scsi_qla_host
*tvha
;
109 spin_lock_irqsave(&ha
->vport_slock
, flags
);
110 /* Locate matching device in database. */
111 list_for_each_entry_safe(vha
, tvha
, &ha
->vp_list
, list
) {
112 if (!memcmp(port_name
, vha
->port_name
, WWN_SIZE
)) {
113 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
117 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
122 * qla2x00_mark_vp_devices_dead
123 * Updates fcport state when device goes offline.
126 * ha = adapter block pointer.
127 * fcport = port structure pointer.
135 qla2x00_mark_vp_devices_dead(scsi_qla_host_t
*vha
)
139 * This function, if called in contexts other than vp create, disable
140 * or delete, please make sure this is synchronized with the
145 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
146 ql_dbg(ql_dbg_vport
, vha
, 0xa001,
147 "Marking port dead, loop_id=0x%04x : %x.\n",
148 fcport
->loop_id
, fcport
->vha
->vp_idx
);
150 qla2x00_mark_device_lost(vha
, fcport
, 0);
151 qla2x00_set_fcport_state(fcport
, FCS_UNCONFIGURED
);
156 qla24xx_disable_vp(scsi_qla_host_t
*vha
)
159 int ret
= QLA_SUCCESS
;
162 if (vha
->hw
->flags
.fw_started
)
163 ret
= qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
165 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
166 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
167 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
)
168 fcport
->logout_on_delete
= 0;
170 qla2x00_mark_all_devices_lost(vha
);
172 /* Remove port id from vp target map */
173 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
174 qlt_update_vp_map(vha
, RESET_AL_PA
);
175 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
177 qla2x00_mark_vp_devices_dead(vha
);
178 atomic_set(&vha
->vp_state
, VP_FAILED
);
179 vha
->flags
.management_server_logged_in
= 0;
180 if (ret
== QLA_SUCCESS
) {
181 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_DISABLED
);
183 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
190 qla24xx_enable_vp(scsi_qla_host_t
*vha
)
193 struct qla_hw_data
*ha
= vha
->hw
;
194 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
196 /* Check if physical ha port is Up */
197 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
198 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
||
199 !(ha
->current_topology
& ISP_CFG_F
)) {
200 vha
->vp_err_state
= VP_ERR_PORTDWN
;
201 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_LINKDOWN
);
202 ql_dbg(ql_dbg_taskm
, vha
, 0x800b,
203 "%s skip enable. loop_state %x topo %x\n",
204 __func__
, base_vha
->loop_state
.counter
,
205 ha
->current_topology
);
210 /* Initialize the new vport unless it is a persistent port */
211 mutex_lock(&ha
->vport_lock
);
212 ret
= qla24xx_modify_vp_config(vha
);
213 mutex_unlock(&ha
->vport_lock
);
215 if (ret
!= QLA_SUCCESS
) {
216 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
220 ql_dbg(ql_dbg_taskm
, vha
, 0x801a,
221 "Virtual port with id: %d - Enabled.\n", vha
->vp_idx
);
225 ql_dbg(ql_dbg_taskm
, vha
, 0x801b,
226 "Virtual port with id: %d - Disabled.\n", vha
->vp_idx
);
231 qla24xx_configure_vp(scsi_qla_host_t
*vha
)
233 struct fc_vport
*fc_vport
;
236 fc_vport
= vha
->fc_vport
;
238 ql_dbg(ql_dbg_vport
, vha
, 0xa002,
239 "%s: change request #3.\n", __func__
);
240 ret
= qla2x00_send_change_request(vha
, 0x3, vha
->vp_idx
);
241 if (ret
!= QLA_SUCCESS
) {
242 ql_dbg(ql_dbg_vport
, vha
, 0xa003, "Failed to enable "
243 "receiving of RSCN requests: 0x%x.\n", ret
);
246 /* Corresponds to SCR enabled */
247 clear_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
250 vha
->flags
.online
= 1;
251 if (qla24xx_configure_vhba(vha
))
254 atomic_set(&vha
->vp_state
, VP_ACTIVE
);
255 fc_vport_set_state(fc_vport
, FC_VPORT_ACTIVE
);
259 qla2x00_alert_all_vps(struct rsp_que
*rsp
, uint16_t *mb
)
261 scsi_qla_host_t
*vha
;
262 struct qla_hw_data
*ha
= rsp
->hw
;
266 spin_lock_irqsave(&ha
->vport_slock
, flags
);
267 list_for_each_entry(vha
, &ha
->vp_list
, list
) {
269 if (test_bit(VPORT_DELETE
, &vha
->dpc_flags
))
272 atomic_inc(&vha
->vref_count
);
273 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
276 case MBA_LIP_OCCURRED
:
280 case MBA_POINT_TO_POINT
:
281 case MBA_CHG_IN_CONNECTION
:
282 ql_dbg(ql_dbg_async
, vha
, 0x5024,
283 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
285 qla2x00_async_event(vha
, rsp
, mb
);
287 case MBA_PORT_UPDATE
:
288 case MBA_RSCN_UPDATE
:
289 if ((mb
[3] & 0xff) == vha
->vp_idx
) {
290 ql_dbg(ql_dbg_async
, vha
, 0x5024,
291 "Async_event for VP[%d], mb=0x%x vha=%p\n",
293 qla2x00_async_event(vha
, rsp
, mb
);
298 spin_lock_irqsave(&ha
->vport_slock
, flags
);
299 atomic_dec(&vha
->vref_count
);
300 wake_up(&vha
->vref_waitq
);
304 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
308 qla2x00_vp_abort_isp(scsi_qla_host_t
*vha
)
313 * To exclusively reset vport, we need to log it out first.
314 * Note: This control_vp can fail if ISP reset is already
315 * issued, this is expected, as the vp would be already
316 * logged out due to ISP reset.
318 if (!test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)) {
319 qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
320 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
)
321 fcport
->logout_on_delete
= 0;
325 * Physical port will do most of the abort and recovery work. We can
326 * just treat it as a loop down
328 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
329 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
330 qla2x00_mark_all_devices_lost(vha
);
332 if (!atomic_read(&vha
->loop_down_timer
))
333 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
336 ql_dbg(ql_dbg_taskm
, vha
, 0x801d,
337 "Scheduling enable of Vport %d.\n", vha
->vp_idx
);
339 return qla24xx_enable_vp(vha
);
343 qla2x00_do_dpc_vp(scsi_qla_host_t
*vha
)
345 struct qla_hw_data
*ha
= vha
->hw
;
346 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
348 ql_dbg(ql_dbg_dpc
+ ql_dbg_verbose
, vha
, 0x4012,
349 "Entering %s vp_flags: 0x%lx.\n", __func__
, vha
->vp_flags
);
351 /* Check if Fw is ready to configure VP first */
352 if (test_bit(VP_CONFIG_OK
, &base_vha
->vp_flags
)) {
353 if (test_and_clear_bit(VP_IDX_ACQUIRED
, &vha
->vp_flags
)) {
354 /* VP acquired. complete port configuration */
355 ql_dbg(ql_dbg_dpc
, vha
, 0x4014,
356 "Configure VP scheduled.\n");
357 qla24xx_configure_vp(vha
);
358 ql_dbg(ql_dbg_dpc
, vha
, 0x4015,
359 "Configure VP end.\n");
364 if (test_bit(PROCESS_PUREX_IOCB
, &vha
->dpc_flags
)) {
365 if (atomic_read(&vha
->loop_state
) == LOOP_READY
) {
366 qla24xx_process_purex_list(&vha
->purex_list
);
367 clear_bit(PROCESS_PUREX_IOCB
, &vha
->dpc_flags
);
371 if (test_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
)) {
372 ql_dbg(ql_dbg_dpc
, vha
, 0x4016,
373 "FCPort update scheduled.\n");
374 qla2x00_update_fcports(vha
);
375 clear_bit(FCPORT_UPDATE_NEEDED
, &vha
->dpc_flags
);
376 ql_dbg(ql_dbg_dpc
, vha
, 0x4017,
377 "FCPort update end.\n");
380 if (test_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
) &&
381 !test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
) &&
382 atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
384 if (!vha
->relogin_jif
||
385 time_after_eq(jiffies
, vha
->relogin_jif
)) {
386 vha
->relogin_jif
= jiffies
+ HZ
;
387 clear_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
389 ql_dbg(ql_dbg_dpc
, vha
, 0x4018,
390 "Relogin needed scheduled.\n");
391 qla24xx_post_relogin_work(vha
);
395 if (test_and_clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
) &&
396 (!(test_and_set_bit(RESET_ACTIVE
, &vha
->dpc_flags
)))) {
397 clear_bit(RESET_ACTIVE
, &vha
->dpc_flags
);
400 if (test_and_clear_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
401 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
))) {
402 ql_dbg(ql_dbg_dpc
, vha
, 0x401a,
403 "Loop resync scheduled.\n");
404 qla2x00_loop_resync(vha
);
405 clear_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
);
406 ql_dbg(ql_dbg_dpc
, vha
, 0x401b,
407 "Loop resync end.\n");
411 ql_dbg(ql_dbg_dpc
+ ql_dbg_verbose
, vha
, 0x401c,
412 "Exiting %s.\n", __func__
);
417 qla2x00_do_dpc_all_vps(scsi_qla_host_t
*vha
)
419 struct qla_hw_data
*ha
= vha
->hw
;
421 unsigned long flags
= 0;
425 if (list_empty(&ha
->vp_list
))
428 clear_bit(VP_DPC_NEEDED
, &vha
->dpc_flags
);
430 if (!(ha
->current_topology
& ISP_CFG_F
))
433 spin_lock_irqsave(&ha
->vport_slock
, flags
);
434 list_for_each_entry(vp
, &ha
->vp_list
, list
) {
436 atomic_inc(&vp
->vref_count
);
437 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
439 qla2x00_do_dpc_vp(vp
);
441 spin_lock_irqsave(&ha
->vport_slock
, flags
);
442 atomic_dec(&vp
->vref_count
);
445 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
449 qla24xx_vport_create_req_sanity_check(struct fc_vport
*fc_vport
)
451 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
452 struct qla_hw_data
*ha
= base_vha
->hw
;
453 scsi_qla_host_t
*vha
;
454 uint8_t port_name
[WWN_SIZE
];
456 if (fc_vport
->roles
!= FC_PORT_ROLE_FCP_INITIATOR
)
457 return VPCERR_UNSUPPORTED
;
459 /* Check up the F/W and H/W support NPIV */
460 if (!ha
->flags
.npiv_supported
)
461 return VPCERR_UNSUPPORTED
;
463 /* Check up whether npiv supported switch presented */
464 if (!(ha
->switch_cap
& FLOGI_MID_SUPPORT
))
465 return VPCERR_NO_FABRIC_SUPP
;
467 /* Check up unique WWPN */
468 u64_to_wwn(fc_vport
->port_name
, port_name
);
469 if (!memcmp(port_name
, base_vha
->port_name
, WWN_SIZE
))
470 return VPCERR_BAD_WWN
;
471 vha
= qla24xx_find_vhost_by_name(ha
, port_name
);
473 return VPCERR_BAD_WWN
;
475 /* Check up max-npiv-supports */
476 if (ha
->num_vhosts
> ha
->max_npiv_vports
) {
477 ql_dbg(ql_dbg_vport
, vha
, 0xa004,
478 "num_vhosts %ud is bigger "
479 "than max_npiv_vports %ud.\n",
480 ha
->num_vhosts
, ha
->max_npiv_vports
);
481 return VPCERR_UNSUPPORTED
;
487 qla24xx_create_vhost(struct fc_vport
*fc_vport
)
489 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
490 struct qla_hw_data
*ha
= base_vha
->hw
;
491 scsi_qla_host_t
*vha
;
492 struct scsi_host_template
*sht
= &qla2xxx_driver_template
;
493 struct Scsi_Host
*host
;
495 vha
= qla2x00_create_host(sht
, ha
);
497 ql_log(ql_log_warn
, vha
, 0xa005,
498 "scsi_host_alloc() failed for vport.\n");
503 fc_vport
->dd_data
= vha
;
505 u64_to_wwn(fc_vport
->node_name
, vha
->node_name
);
506 u64_to_wwn(fc_vport
->port_name
, vha
->port_name
);
508 vha
->fc_vport
= fc_vport
;
509 vha
->device_flags
= 0;
510 vha
->vp_idx
= qla24xx_allocate_vp_id(vha
);
511 if (vha
->vp_idx
> ha
->max_npiv_vports
) {
512 ql_dbg(ql_dbg_vport
, vha
, 0xa006,
513 "Couldn't allocate vp_id.\n");
514 goto create_vhost_failed
;
516 vha
->mgmt_svr_loop_id
= qla2x00_reserve_mgmt_server_loop_id(vha
);
520 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
521 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
524 * To fix the issue of processing a parent's RSCN for the vport before
525 * its SCR is complete.
527 set_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
528 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
529 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
531 qla2x00_start_timer(vha
, WATCH_INTERVAL
);
533 vha
->req
= base_vha
->req
;
534 vha
->flags
.nvme_enabled
= base_vha
->flags
.nvme_enabled
;
535 host
->can_queue
= base_vha
->req
->length
+ 128;
536 host
->cmd_per_lun
= 3;
537 if (IS_T10_PI_CAPABLE(ha
) && ql2xenabledif
)
538 host
->max_cmd_len
= 32;
540 host
->max_cmd_len
= MAX_CMDSZ
;
541 host
->max_channel
= MAX_BUSES
- 1;
542 host
->max_lun
= ql2xmaxlun
;
543 host
->unique_id
= host
->host_no
;
544 host
->max_id
= ha
->max_fibre_devices
;
545 host
->transportt
= qla2xxx_transport_vport_template
;
547 ql_dbg(ql_dbg_vport
, vha
, 0xa007,
548 "Detect vport hba %ld at address = %p.\n",
551 vha
->flags
.init_done
= 1;
553 mutex_lock(&ha
->vport_lock
);
554 set_bit(vha
->vp_idx
, ha
->vp_idx_map
);
555 ha
->cur_vport_count
++;
556 mutex_unlock(&ha
->vport_lock
);
565 qla25xx_free_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
567 struct qla_hw_data
*ha
= vha
->hw
;
568 uint16_t que_id
= req
->id
;
570 dma_free_coherent(&ha
->pdev
->dev
, (req
->length
+ 1) *
571 sizeof(request_t
), req
->ring
, req
->dma
);
575 ha
->req_q_map
[que_id
] = NULL
;
576 mutex_lock(&ha
->vport_lock
);
577 clear_bit(que_id
, ha
->req_qid_map
);
578 mutex_unlock(&ha
->vport_lock
);
580 kfree(req
->outstanding_cmds
);
586 qla25xx_free_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
588 struct qla_hw_data
*ha
= vha
->hw
;
589 uint16_t que_id
= rsp
->id
;
591 if (rsp
->msix
&& rsp
->msix
->have_irq
) {
592 free_irq(rsp
->msix
->vector
, rsp
->msix
->handle
);
593 rsp
->msix
->have_irq
= 0;
594 rsp
->msix
->in_use
= 0;
595 rsp
->msix
->handle
= NULL
;
597 dma_free_coherent(&ha
->pdev
->dev
, (rsp
->length
+ 1) *
598 sizeof(response_t
), rsp
->ring
, rsp
->dma
);
602 ha
->rsp_q_map
[que_id
] = NULL
;
603 mutex_lock(&ha
->vport_lock
);
604 clear_bit(que_id
, ha
->rsp_qid_map
);
605 mutex_unlock(&ha
->vport_lock
);
612 qla25xx_delete_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
614 int ret
= QLA_SUCCESS
;
616 if (req
&& vha
->flags
.qpairs_req_created
) {
617 req
->options
|= BIT_0
;
618 ret
= qla25xx_init_req_que(vha
, req
);
619 if (ret
!= QLA_SUCCESS
)
620 return QLA_FUNCTION_FAILED
;
622 qla25xx_free_req_que(vha
, req
);
629 qla25xx_delete_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
631 int ret
= QLA_SUCCESS
;
633 if (rsp
&& vha
->flags
.qpairs_rsp_created
) {
634 rsp
->options
|= BIT_0
;
635 ret
= qla25xx_init_rsp_que(vha
, rsp
);
636 if (ret
!= QLA_SUCCESS
)
637 return QLA_FUNCTION_FAILED
;
639 qla25xx_free_rsp_que(vha
, rsp
);
645 /* Delete all queues for a given vhost */
647 qla25xx_delete_queues(struct scsi_qla_host
*vha
)
650 struct req_que
*req
= NULL
;
651 struct rsp_que
*rsp
= NULL
;
652 struct qla_hw_data
*ha
= vha
->hw
;
653 struct qla_qpair
*qpair
, *tqpair
;
655 if (ql2xmqsupport
|| ql2xnvmeenable
) {
656 list_for_each_entry_safe(qpair
, tqpair
, &vha
->qp_list
,
658 qla2xxx_delete_qpair(vha
, qpair
);
660 /* Delete request queues */
661 for (cnt
= 1; cnt
< ha
->max_req_queues
; cnt
++) {
662 req
= ha
->req_q_map
[cnt
];
663 if (req
&& test_bit(cnt
, ha
->req_qid_map
)) {
664 ret
= qla25xx_delete_req_que(vha
, req
);
665 if (ret
!= QLA_SUCCESS
) {
666 ql_log(ql_log_warn
, vha
, 0x00ea,
667 "Couldn't delete req que %d.\n",
674 /* Delete response queues */
675 for (cnt
= 1; cnt
< ha
->max_rsp_queues
; cnt
++) {
676 rsp
= ha
->rsp_q_map
[cnt
];
677 if (rsp
&& test_bit(cnt
, ha
->rsp_qid_map
)) {
678 ret
= qla25xx_delete_rsp_que(vha
, rsp
);
679 if (ret
!= QLA_SUCCESS
) {
680 ql_log(ql_log_warn
, vha
, 0x00eb,
681 "Couldn't delete rsp que %d.\n",
693 qla25xx_create_req_que(struct qla_hw_data
*ha
, uint16_t options
,
694 uint8_t vp_idx
, uint16_t rid
, int rsp_que
, uint8_t qos
, bool startqp
)
697 struct req_que
*req
= NULL
;
698 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
699 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
704 req
= kzalloc(sizeof(struct req_que
), GFP_KERNEL
);
706 ql_log(ql_log_fatal
, base_vha
, 0x00d9,
707 "Failed to allocate memory for request queue.\n");
711 req
->length
= REQUEST_ENTRY_CNT_24XX
;
712 req
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
713 (req
->length
+ 1) * sizeof(request_t
),
714 &req
->dma
, GFP_KERNEL
);
715 if (req
->ring
== NULL
) {
716 ql_log(ql_log_fatal
, base_vha
, 0x00da,
717 "Failed to allocate memory for request_ring.\n");
721 ret
= qla2x00_alloc_outstanding_cmds(ha
, req
);
722 if (ret
!= QLA_SUCCESS
)
725 mutex_lock(&ha
->mq_lock
);
726 que_id
= find_first_zero_bit(ha
->req_qid_map
, ha
->max_req_queues
);
727 if (que_id
>= ha
->max_req_queues
) {
728 mutex_unlock(&ha
->mq_lock
);
729 ql_log(ql_log_warn
, base_vha
, 0x00db,
730 "No resources to create additional request queue.\n");
733 set_bit(que_id
, ha
->req_qid_map
);
734 ha
->req_q_map
[que_id
] = req
;
736 req
->vp_idx
= vp_idx
;
739 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc002,
740 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
741 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
742 ql_dbg(ql_dbg_init
, base_vha
, 0x00dc,
743 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
744 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
748 req
->rsp
= ha
->rsp_q_map
[rsp_que
];
749 /* Use alternate PCI bus number */
752 /* Use alternate PCI devfn */
755 req
->options
= options
;
757 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc003,
758 "options=0x%x.\n", req
->options
);
759 ql_dbg(ql_dbg_init
, base_vha
, 0x00dd,
760 "options=0x%x.\n", req
->options
);
761 for (cnt
= 1; cnt
< req
->num_outstanding_cmds
; cnt
++)
762 req
->outstanding_cmds
[cnt
] = NULL
;
763 req
->current_outstanding_cmd
= 1;
765 req
->ring_ptr
= req
->ring
;
767 req
->cnt
= req
->length
;
769 reg
= ISP_QUE_REG(ha
, que_id
);
770 req
->req_q_in
= ®
->isp25mq
.req_q_in
;
771 req
->req_q_out
= ®
->isp25mq
.req_q_out
;
772 req
->max_q_depth
= ha
->req_q_map
[0]->max_q_depth
;
773 req
->out_ptr
= (void *)(req
->ring
+ req
->length
);
774 mutex_unlock(&ha
->mq_lock
);
775 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc004,
776 "ring_ptr=%p ring_index=%d, "
777 "cnt=%d id=%d max_q_depth=%d.\n",
778 req
->ring_ptr
, req
->ring_index
,
779 req
->cnt
, req
->id
, req
->max_q_depth
);
780 ql_dbg(ql_dbg_init
, base_vha
, 0x00de,
781 "ring_ptr=%p ring_index=%d, "
782 "cnt=%d id=%d max_q_depth=%d.\n",
783 req
->ring_ptr
, req
->ring_index
, req
->cnt
,
784 req
->id
, req
->max_q_depth
);
787 ret
= qla25xx_init_req_que(base_vha
, req
);
788 if (ret
!= QLA_SUCCESS
) {
789 ql_log(ql_log_fatal
, base_vha
, 0x00df,
790 "%s failed.\n", __func__
);
791 mutex_lock(&ha
->mq_lock
);
792 clear_bit(que_id
, ha
->req_qid_map
);
793 mutex_unlock(&ha
->mq_lock
);
796 vha
->flags
.qpairs_req_created
= 1;
802 qla25xx_free_req_que(base_vha
, req
);
807 static void qla_do_work(struct work_struct
*work
)
810 struct qla_qpair
*qpair
= container_of(work
, struct qla_qpair
, q_work
);
811 struct scsi_qla_host
*vha
;
812 struct qla_hw_data
*ha
= qpair
->hw
;
814 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
815 vha
= pci_get_drvdata(ha
->pdev
);
816 qla24xx_process_response_queue(vha
, qpair
->rsp
);
817 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
821 /* create response queue */
823 qla25xx_create_rsp_que(struct qla_hw_data
*ha
, uint16_t options
,
824 uint8_t vp_idx
, uint16_t rid
, struct qla_qpair
*qpair
, bool startqp
)
827 struct rsp_que
*rsp
= NULL
;
828 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
829 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
833 rsp
= kzalloc(sizeof(struct rsp_que
), GFP_KERNEL
);
835 ql_log(ql_log_warn
, base_vha
, 0x0066,
836 "Failed to allocate memory for response queue.\n");
840 rsp
->length
= RESPONSE_ENTRY_CNT_MQ
;
841 rsp
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
842 (rsp
->length
+ 1) * sizeof(response_t
),
843 &rsp
->dma
, GFP_KERNEL
);
844 if (rsp
->ring
== NULL
) {
845 ql_log(ql_log_warn
, base_vha
, 0x00e1,
846 "Failed to allocate memory for response ring.\n");
850 mutex_lock(&ha
->mq_lock
);
851 que_id
= find_first_zero_bit(ha
->rsp_qid_map
, ha
->max_rsp_queues
);
852 if (que_id
>= ha
->max_rsp_queues
) {
853 mutex_unlock(&ha
->mq_lock
);
854 ql_log(ql_log_warn
, base_vha
, 0x00e2,
855 "No resources to create additional request queue.\n");
858 set_bit(que_id
, ha
->rsp_qid_map
);
860 rsp
->msix
= qpair
->msix
;
862 ha
->rsp_q_map
[que_id
] = rsp
;
864 rsp
->vp_idx
= vp_idx
;
866 ql_dbg(ql_dbg_init
, base_vha
, 0x00e4,
867 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
868 que_id
, rsp
->rid
, rsp
->vp_idx
, rsp
->hw
);
869 /* Use alternate PCI bus number */
872 /* Use alternate PCI devfn */
875 /* Enable MSIX handshake mode on for uncapable adapters */
876 if (!IS_MSIX_NACK_CAPABLE(ha
))
879 /* Set option to indicate response queue creation */
882 rsp
->options
= options
;
884 reg
= ISP_QUE_REG(ha
, que_id
);
885 rsp
->rsp_q_in
= ®
->isp25mq
.rsp_q_in
;
886 rsp
->rsp_q_out
= ®
->isp25mq
.rsp_q_out
;
887 rsp
->in_ptr
= (void *)(rsp
->ring
+ rsp
->length
);
888 mutex_unlock(&ha
->mq_lock
);
889 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc00b,
890 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
891 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
893 ql_dbg(ql_dbg_init
, base_vha
, 0x00e5,
894 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
895 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
898 ret
= qla25xx_request_irq(ha
, qpair
, qpair
->msix
,
899 ha
->flags
.disable_msix_handshake
?
900 QLA_MSIX_QPAIR_MULTIQ_RSP_Q
: QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS
);
905 ret
= qla25xx_init_rsp_que(base_vha
, rsp
);
906 if (ret
!= QLA_SUCCESS
) {
907 ql_log(ql_log_fatal
, base_vha
, 0x00e7,
908 "%s failed.\n", __func__
);
909 mutex_lock(&ha
->mq_lock
);
910 clear_bit(que_id
, ha
->rsp_qid_map
);
911 mutex_unlock(&ha
->mq_lock
);
914 vha
->flags
.qpairs_rsp_created
= 1;
918 qla2x00_init_response_q_entries(rsp
);
920 INIT_WORK(&qpair
->q_work
, qla_do_work
);
924 qla25xx_free_rsp_que(base_vha
, rsp
);
929 static void qla_ctrlvp_sp_done(srb_t
*sp
, int res
)
933 /* don't free sp here. Let the caller do the free */
937 * qla24xx_control_vp() - Enable a virtual port for given host
938 * @vha: adapter block pointer
939 * @cmd: command type to be sent for enable virtual port
941 * Return: qla2xxx local function return status code.
943 int qla24xx_control_vp(scsi_qla_host_t
*vha
, int cmd
)
945 int rval
= QLA_MEMORY_ALLOC_FAILED
;
946 struct qla_hw_data
*ha
= vha
->hw
;
947 int vp_index
= vha
->vp_idx
;
948 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
949 DECLARE_COMPLETION_ONSTACK(comp
);
952 ql_dbg(ql_dbg_vport
, vha
, 0x10c1,
953 "Entered %s cmd %x index %d.\n", __func__
, cmd
, vp_index
);
955 if (vp_index
== 0 || vp_index
>= ha
->max_npiv_vports
)
956 return QLA_PARAMETER_ERROR
;
958 sp
= qla2x00_get_sp(base_vha
, NULL
, GFP_KERNEL
);
962 sp
->type
= SRB_CTRL_VP
;
963 sp
->name
= "ctrl_vp";
965 sp
->done
= qla_ctrlvp_sp_done
;
966 sp
->u
.iocb_cmd
.timeout
= qla2x00_async_iocb_timeout
;
967 qla2x00_init_timer(sp
, qla2x00_get_async_timeout(vha
) + 2);
968 sp
->u
.iocb_cmd
.u
.ctrlvp
.cmd
= cmd
;
969 sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
= vp_index
;
971 rval
= qla2x00_start_sp(sp
);
972 if (rval
!= QLA_SUCCESS
) {
973 ql_dbg(ql_dbg_async
, vha
, 0xffff,
974 "%s: %s Failed submission. %x.\n",
975 __func__
, sp
->name
, rval
);
979 ql_dbg(ql_dbg_vport
, vha
, 0x113f, "%s hndl %x submitted\n",
980 sp
->name
, sp
->handle
);
982 wait_for_completion(&comp
);
987 case QLA_FUNCTION_TIMEOUT
:
988 ql_dbg(ql_dbg_vport
, vha
, 0xffff, "%s: %s Timeout. %x.\n",
989 __func__
, sp
->name
, rval
);
992 ql_dbg(ql_dbg_vport
, vha
, 0xffff, "%s: %s done.\n",
996 ql_dbg(ql_dbg_vport
, vha
, 0xffff, "%s: %s Failed. %x.\n",
997 __func__
, sp
->name
, rval
);