1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
8 #include "qla_target.h"
10 #include <linux/moduleparam.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
15 #include <scsi/scsi_tcq.h>
16 #include <scsi/scsicam.h>
17 #include <linux/delay.h>
20 qla2x00_vp_stop_timer(scsi_qla_host_t
*vha
)
22 if (vha
->vp_idx
&& vha
->timer_active
) {
23 del_timer_sync(&vha
->timer
);
24 vha
->timer_active
= 0;
29 qla24xx_allocate_vp_id(scsi_qla_host_t
*vha
)
32 struct qla_hw_data
*ha
= vha
->hw
;
35 /* Find an empty slot and assign an vp_id */
36 mutex_lock(&ha
->vport_lock
);
37 vp_id
= find_first_zero_bit(ha
->vp_idx_map
, ha
->max_npiv_vports
+ 1);
38 if (vp_id
> ha
->max_npiv_vports
) {
39 ql_dbg(ql_dbg_vport
, vha
, 0xa000,
40 "vp_id %d is bigger than max-supported %d.\n",
41 vp_id
, ha
->max_npiv_vports
);
42 mutex_unlock(&ha
->vport_lock
);
46 set_bit(vp_id
, ha
->vp_idx_map
);
50 spin_lock_irqsave(&ha
->vport_slock
, flags
);
51 list_add_tail(&vha
->list
, &ha
->vp_list
);
52 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
54 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
55 qla_update_vp_map(vha
, SET_VP_IDX
);
56 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
58 mutex_unlock(&ha
->vport_lock
);
63 qla24xx_deallocate_vp_id(scsi_qla_host_t
*vha
)
66 struct qla_hw_data
*ha
= vha
->hw
;
67 unsigned long flags
= 0;
70 mutex_lock(&ha
->vport_lock
);
72 * Wait for all pending activities to finish before removing vport from
74 * Lock needs to be held for safe removal from the list (it
75 * ensures no active vp_list traversal while the vport is removed
79 for (i
= 0; i
< 500; i
++) {
80 spin_lock_irqsave(&ha
->vport_slock
, flags
);
81 if (atomic_read(&vha
->vref_count
) == 0) {
83 qla_update_vp_map(vha
, RESET_VP_IDX
);
86 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
94 ql_log(ql_log_info
, vha
, 0xfffa,
95 "vha->vref_count=%u timeout\n", vha
->vref_count
.counter
);
96 spin_lock_irqsave(&ha
->vport_slock
, flags
);
98 qla_update_vp_map(vha
, RESET_VP_IDX
);
99 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
104 clear_bit(vp_id
, ha
->vp_idx_map
);
106 mutex_unlock(&ha
->vport_lock
);
109 static scsi_qla_host_t
*
110 qla24xx_find_vhost_by_name(struct qla_hw_data
*ha
, uint8_t *port_name
)
112 scsi_qla_host_t
*vha
;
113 struct scsi_qla_host
*tvha
;
116 spin_lock_irqsave(&ha
->vport_slock
, flags
);
117 /* Locate matching device in database. */
118 list_for_each_entry_safe(vha
, tvha
, &ha
->vp_list
, list
) {
119 if (!memcmp(port_name
, vha
->port_name
, WWN_SIZE
)) {
120 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
124 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
129 * qla2x00_mark_vp_devices_dead
130 * Updates fcport state when device goes offline.
133 * ha = adapter block pointer.
134 * fcport = port structure pointer.
142 qla2x00_mark_vp_devices_dead(scsi_qla_host_t
*vha
)
146 * This function, if called in contexts other than vp create, disable
147 * or delete, please make sure this is synchronized with the
152 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
153 ql_dbg(ql_dbg_vport
, vha
, 0xa001,
154 "Marking port dead, loop_id=0x%04x : %x.\n",
155 fcport
->loop_id
, fcport
->vha
->vp_idx
);
157 qla2x00_mark_device_lost(vha
, fcport
, 0);
158 qla2x00_set_fcport_state(fcport
, FCS_UNCONFIGURED
);
163 qla24xx_disable_vp(scsi_qla_host_t
*vha
)
166 int ret
= QLA_SUCCESS
;
169 if (vha
->hw
->flags
.edif_enabled
) {
170 if (DBELL_ACTIVE(vha
))
171 qla2x00_post_aen_work(vha
, FCH_EVT_VENDOR_UNIQUE
,
172 FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN
);
173 /* delete sessions and flush sa_indexes */
174 qla2x00_wait_for_sess_deletion(vha
);
177 if (vha
->hw
->flags
.fw_started
)
178 ret
= qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
180 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
181 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
182 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
)
183 fcport
->logout_on_delete
= 1;
185 if (!vha
->hw
->flags
.edif_enabled
)
186 qla2x00_wait_for_sess_deletion(vha
);
188 /* Remove port id from vp target map */
189 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
190 qla_update_vp_map(vha
, RESET_AL_PA
);
191 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
193 qla2x00_mark_vp_devices_dead(vha
);
194 atomic_set(&vha
->vp_state
, VP_FAILED
);
195 vha
->flags
.management_server_logged_in
= 0;
196 if (ret
== QLA_SUCCESS
) {
197 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_DISABLED
);
199 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
206 qla24xx_enable_vp(scsi_qla_host_t
*vha
)
209 struct qla_hw_data
*ha
= vha
->hw
;
210 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
212 /* Check if physical ha port is Up */
213 if (atomic_read(&base_vha
->loop_state
) == LOOP_DOWN
||
214 atomic_read(&base_vha
->loop_state
) == LOOP_DEAD
||
215 !(ha
->current_topology
& ISP_CFG_F
)) {
216 vha
->vp_err_state
= VP_ERR_PORTDWN
;
217 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_LINKDOWN
);
218 ql_dbg(ql_dbg_taskm
, vha
, 0x800b,
219 "%s skip enable. loop_state %x topo %x\n",
220 __func__
, base_vha
->loop_state
.counter
,
221 ha
->current_topology
);
226 /* Initialize the new vport unless it is a persistent port */
227 mutex_lock(&ha
->vport_lock
);
228 ret
= qla24xx_modify_vp_config(vha
);
229 mutex_unlock(&ha
->vport_lock
);
231 if (ret
!= QLA_SUCCESS
) {
232 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
236 ql_dbg(ql_dbg_taskm
, vha
, 0x801a,
237 "Virtual port with id: %d - Enabled.\n", vha
->vp_idx
);
241 ql_dbg(ql_dbg_taskm
, vha
, 0x801b,
242 "Virtual port with id: %d - Disabled.\n", vha
->vp_idx
);
247 qla24xx_configure_vp(scsi_qla_host_t
*vha
)
249 struct fc_vport
*fc_vport
;
252 fc_vport
= vha
->fc_vport
;
254 ql_dbg(ql_dbg_vport
, vha
, 0xa002,
255 "%s: change request #3.\n", __func__
);
256 ret
= qla2x00_send_change_request(vha
, 0x3, vha
->vp_idx
);
257 if (ret
!= QLA_SUCCESS
) {
258 ql_dbg(ql_dbg_vport
, vha
, 0xa003, "Failed to enable "
259 "receiving of RSCN requests: 0x%x.\n", ret
);
262 /* Corresponds to SCR enabled */
263 clear_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
266 vha
->flags
.online
= 1;
267 if (qla24xx_configure_vhba(vha
))
270 atomic_set(&vha
->vp_state
, VP_ACTIVE
);
271 fc_vport_set_state(fc_vport
, FC_VPORT_ACTIVE
);
275 qla2x00_alert_all_vps(struct rsp_que
*rsp
, uint16_t *mb
)
277 scsi_qla_host_t
*vha
, *tvp
;
278 struct qla_hw_data
*ha
= rsp
->hw
;
282 spin_lock_irqsave(&ha
->vport_slock
, flags
);
283 list_for_each_entry_safe(vha
, tvp
, &ha
->vp_list
, list
) {
285 if (test_bit(VPORT_DELETE
, &vha
->dpc_flags
))
288 atomic_inc(&vha
->vref_count
);
289 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
292 case MBA_LIP_OCCURRED
:
296 case MBA_POINT_TO_POINT
:
297 case MBA_CHG_IN_CONNECTION
:
298 ql_dbg(ql_dbg_async
, vha
, 0x5024,
299 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
301 qla2x00_async_event(vha
, rsp
, mb
);
303 case MBA_PORT_UPDATE
:
304 case MBA_RSCN_UPDATE
:
305 if ((mb
[3] & 0xff) == vha
->vp_idx
) {
306 ql_dbg(ql_dbg_async
, vha
, 0x5024,
307 "Async_event for VP[%d], mb=0x%x vha=%p\n",
309 qla2x00_async_event(vha
, rsp
, mb
);
314 spin_lock_irqsave(&ha
->vport_slock
, flags
);
315 atomic_dec(&vha
->vref_count
);
316 wake_up(&vha
->vref_waitq
);
320 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
324 qla2x00_vp_abort_isp(scsi_qla_host_t
*vha
)
329 * To exclusively reset vport, we need to log it out first.
330 * Note: This control_vp can fail if ISP reset is already
331 * issued, this is expected, as the vp would be already
332 * logged out due to ISP reset.
334 if (!test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)) {
335 qla24xx_control_vp(vha
, VCE_COMMAND_DISABLE_VPS_LOGO_ALL
);
336 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
)
337 fcport
->logout_on_delete
= 0;
341 * Physical port will do most of the abort and recovery work. We can
342 * just treat it as a loop down
344 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
345 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
346 qla2x00_mark_all_devices_lost(vha
);
348 if (!atomic_read(&vha
->loop_down_timer
))
349 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
352 ql_dbg(ql_dbg_taskm
, vha
, 0x801d,
353 "Scheduling enable of Vport %d.\n", vha
->vp_idx
);
355 return qla24xx_enable_vp(vha
);
359 qla2x00_do_dpc_vp(scsi_qla_host_t
*vha
)
361 struct qla_hw_data
*ha
= vha
->hw
;
362 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
364 ql_dbg(ql_dbg_dpc
+ ql_dbg_verbose
, vha
, 0x4012,
365 "Entering %s vp_flags: 0x%lx.\n", __func__
, vha
->vp_flags
);
367 /* Check if Fw is ready to configure VP first */
368 if (test_bit(VP_CONFIG_OK
, &base_vha
->vp_flags
)) {
369 if (test_and_clear_bit(VP_IDX_ACQUIRED
, &vha
->vp_flags
)) {
370 /* VP acquired. complete port configuration */
371 ql_dbg(ql_dbg_dpc
, vha
, 0x4014,
372 "Configure VP scheduled.\n");
373 qla24xx_configure_vp(vha
);
374 ql_dbg(ql_dbg_dpc
, vha
, 0x4015,
375 "Configure VP end.\n");
380 if (test_bit(PROCESS_PUREX_IOCB
, &vha
->dpc_flags
)) {
381 if (atomic_read(&vha
->loop_state
) == LOOP_READY
) {
382 qla24xx_process_purex_list(&vha
->purex_list
);
383 clear_bit(PROCESS_PUREX_IOCB
, &vha
->dpc_flags
);
387 if (test_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
) &&
388 !test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
) &&
389 atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
391 if (!vha
->relogin_jif
||
392 time_after_eq(jiffies
, vha
->relogin_jif
)) {
393 vha
->relogin_jif
= jiffies
+ HZ
;
394 clear_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
396 ql_dbg(ql_dbg_dpc
, vha
, 0x4018,
397 "Relogin needed scheduled.\n");
398 qla24xx_post_relogin_work(vha
);
402 if (test_and_clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
) &&
403 (!(test_and_set_bit(RESET_ACTIVE
, &vha
->dpc_flags
)))) {
404 clear_bit(RESET_ACTIVE
, &vha
->dpc_flags
);
407 if (test_and_clear_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
408 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
))) {
409 ql_dbg(ql_dbg_dpc
, vha
, 0x401a,
410 "Loop resync scheduled.\n");
411 qla2x00_loop_resync(vha
);
412 clear_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
);
413 ql_dbg(ql_dbg_dpc
, vha
, 0x401b,
414 "Loop resync end.\n");
418 ql_dbg(ql_dbg_dpc
+ ql_dbg_verbose
, vha
, 0x401c,
419 "Exiting %s.\n", __func__
);
424 qla2x00_do_dpc_all_vps(scsi_qla_host_t
*vha
)
426 struct qla_hw_data
*ha
= vha
->hw
;
427 scsi_qla_host_t
*vp
, *tvp
;
428 unsigned long flags
= 0;
432 if (list_empty(&ha
->vp_list
))
435 clear_bit(VP_DPC_NEEDED
, &vha
->dpc_flags
);
437 if (!(ha
->current_topology
& ISP_CFG_F
))
440 spin_lock_irqsave(&ha
->vport_slock
, flags
);
441 list_for_each_entry_safe(vp
, tvp
, &ha
->vp_list
, list
) {
443 atomic_inc(&vp
->vref_count
);
444 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
446 qla2x00_do_dpc_vp(vp
);
448 spin_lock_irqsave(&ha
->vport_slock
, flags
);
449 atomic_dec(&vp
->vref_count
);
452 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
456 qla24xx_vport_create_req_sanity_check(struct fc_vport
*fc_vport
)
458 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
459 struct qla_hw_data
*ha
= base_vha
->hw
;
460 scsi_qla_host_t
*vha
;
461 uint8_t port_name
[WWN_SIZE
];
463 if (fc_vport
->roles
!= FC_PORT_ROLE_FCP_INITIATOR
)
464 return VPCERR_UNSUPPORTED
;
466 /* Check up the F/W and H/W support NPIV */
467 if (!ha
->flags
.npiv_supported
)
468 return VPCERR_UNSUPPORTED
;
470 /* Check up whether npiv supported switch presented */
471 if (!(ha
->switch_cap
& FLOGI_MID_SUPPORT
))
472 return VPCERR_NO_FABRIC_SUPP
;
474 /* Check up unique WWPN */
475 u64_to_wwn(fc_vport
->port_name
, port_name
);
476 if (!memcmp(port_name
, base_vha
->port_name
, WWN_SIZE
))
477 return VPCERR_BAD_WWN
;
478 vha
= qla24xx_find_vhost_by_name(ha
, port_name
);
480 return VPCERR_BAD_WWN
;
482 /* Check up max-npiv-supports */
483 if (ha
->num_vhosts
> ha
->max_npiv_vports
) {
484 ql_dbg(ql_dbg_vport
, vha
, 0xa004,
485 "num_vhosts %ud is bigger "
486 "than max_npiv_vports %ud.\n",
487 ha
->num_vhosts
, ha
->max_npiv_vports
);
488 return VPCERR_UNSUPPORTED
;
494 qla24xx_create_vhost(struct fc_vport
*fc_vport
)
496 scsi_qla_host_t
*base_vha
= shost_priv(fc_vport
->shost
);
497 struct qla_hw_data
*ha
= base_vha
->hw
;
498 scsi_qla_host_t
*vha
;
499 const struct scsi_host_template
*sht
= &qla2xxx_driver_template
;
500 struct Scsi_Host
*host
;
502 vha
= qla2x00_create_host(sht
, ha
);
504 ql_log(ql_log_warn
, vha
, 0xa005,
505 "scsi_host_alloc() failed for vport.\n");
509 vha
->irq_offset
= QLA_BASE_VECTORS
;
511 fc_vport
->dd_data
= vha
;
513 u64_to_wwn(fc_vport
->node_name
, vha
->node_name
);
514 u64_to_wwn(fc_vport
->port_name
, vha
->port_name
);
516 vha
->fc_vport
= fc_vport
;
517 vha
->device_flags
= 0;
518 vha
->vp_idx
= qla24xx_allocate_vp_id(vha
);
519 if (vha
->vp_idx
> ha
->max_npiv_vports
) {
520 ql_dbg(ql_dbg_vport
, vha
, 0xa006,
521 "Couldn't allocate vp_id.\n");
522 goto create_vhost_failed
;
524 vha
->mgmt_svr_loop_id
= qla2x00_reserve_mgmt_server_loop_id(vha
);
528 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
529 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
532 * To fix the issue of processing a parent's RSCN for the vport before
533 * its SCR is complete.
535 set_bit(VP_SCR_NEEDED
, &vha
->vp_flags
);
536 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
537 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
539 qla2x00_start_timer(vha
, WATCH_INTERVAL
);
541 vha
->req
= base_vha
->req
;
542 vha
->flags
.nvme_enabled
= base_vha
->flags
.nvme_enabled
;
543 host
->can_queue
= base_vha
->req
->length
+ 128;
544 host
->cmd_per_lun
= 3;
545 if (IS_T10_PI_CAPABLE(ha
) && ql2xenabledif
)
546 host
->max_cmd_len
= 32;
548 host
->max_cmd_len
= MAX_CMDSZ
;
549 host
->max_channel
= MAX_BUSES
- 1;
550 host
->max_lun
= ql2xmaxlun
;
551 host
->unique_id
= host
->host_no
;
552 host
->max_id
= ha
->max_fibre_devices
;
553 host
->transportt
= qla2xxx_transport_vport_template
;
555 ql_dbg(ql_dbg_vport
, vha
, 0xa007,
556 "Detect vport hba %ld at address = %p.\n",
559 vha
->flags
.init_done
= 1;
561 mutex_lock(&ha
->vport_lock
);
562 set_bit(vha
->vp_idx
, ha
->vp_idx_map
);
563 ha
->cur_vport_count
++;
564 mutex_unlock(&ha
->vport_lock
);
573 qla25xx_free_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
575 struct qla_hw_data
*ha
= vha
->hw
;
576 uint16_t que_id
= req
->id
;
578 dma_free_coherent(&ha
->pdev
->dev
, (req
->length
+ 1) *
579 sizeof(request_t
), req
->ring
, req
->dma
);
583 ha
->req_q_map
[que_id
] = NULL
;
584 mutex_lock(&ha
->vport_lock
);
585 clear_bit(que_id
, ha
->req_qid_map
);
586 mutex_unlock(&ha
->vport_lock
);
588 kfree(req
->outstanding_cmds
);
593 qla25xx_free_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
595 struct qla_hw_data
*ha
= vha
->hw
;
596 uint16_t que_id
= rsp
->id
;
598 if (rsp
->msix
&& rsp
->msix
->have_irq
) {
599 free_irq(rsp
->msix
->vector
, rsp
->msix
->handle
);
600 rsp
->msix
->have_irq
= 0;
601 rsp
->msix
->in_use
= 0;
602 rsp
->msix
->handle
= NULL
;
604 dma_free_coherent(&ha
->pdev
->dev
, (rsp
->length
+ 1) *
605 sizeof(response_t
), rsp
->ring
, rsp
->dma
);
609 ha
->rsp_q_map
[que_id
] = NULL
;
610 mutex_lock(&ha
->vport_lock
);
611 clear_bit(que_id
, ha
->rsp_qid_map
);
612 mutex_unlock(&ha
->vport_lock
);
618 qla25xx_delete_req_que(struct scsi_qla_host
*vha
, struct req_que
*req
)
620 int ret
= QLA_SUCCESS
;
622 if (req
&& vha
->flags
.qpairs_req_created
) {
623 req
->options
|= BIT_0
;
624 ret
= qla25xx_init_req_que(vha
, req
);
625 if (ret
!= QLA_SUCCESS
)
626 return QLA_FUNCTION_FAILED
;
628 qla25xx_free_req_que(vha
, req
);
635 qla25xx_delete_rsp_que(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
)
637 int ret
= QLA_SUCCESS
;
639 if (rsp
&& vha
->flags
.qpairs_rsp_created
) {
640 rsp
->options
|= BIT_0
;
641 ret
= qla25xx_init_rsp_que(vha
, rsp
);
642 if (ret
!= QLA_SUCCESS
)
643 return QLA_FUNCTION_FAILED
;
645 qla25xx_free_rsp_que(vha
, rsp
);
651 /* Delete all queues for a given vhost */
653 qla25xx_delete_queues(struct scsi_qla_host
*vha
)
656 struct req_que
*req
= NULL
;
657 struct rsp_que
*rsp
= NULL
;
658 struct qla_hw_data
*ha
= vha
->hw
;
659 struct qla_qpair
*qpair
, *tqpair
;
661 if (ql2xmqsupport
|| ql2xnvmeenable
) {
662 list_for_each_entry_safe(qpair
, tqpair
, &vha
->qp_list
,
664 qla2xxx_delete_qpair(vha
, qpair
);
666 /* Delete request queues */
667 for (cnt
= 1; cnt
< ha
->max_req_queues
; cnt
++) {
668 req
= ha
->req_q_map
[cnt
];
669 if (req
&& test_bit(cnt
, ha
->req_qid_map
)) {
670 ret
= qla25xx_delete_req_que(vha
, req
);
671 if (ret
!= QLA_SUCCESS
) {
672 ql_log(ql_log_warn
, vha
, 0x00ea,
673 "Couldn't delete req que %d.\n",
680 /* Delete response queues */
681 for (cnt
= 1; cnt
< ha
->max_rsp_queues
; cnt
++) {
682 rsp
= ha
->rsp_q_map
[cnt
];
683 if (rsp
&& test_bit(cnt
, ha
->rsp_qid_map
)) {
684 ret
= qla25xx_delete_rsp_que(vha
, rsp
);
685 if (ret
!= QLA_SUCCESS
) {
686 ql_log(ql_log_warn
, vha
, 0x00eb,
687 "Couldn't delete rsp que %d.\n",
699 qla25xx_create_req_que(struct qla_hw_data
*ha
, uint16_t options
,
700 uint8_t vp_idx
, uint16_t rid
, int rsp_que
, uint8_t qos
, bool startqp
)
703 struct req_que
*req
= NULL
;
704 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
705 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
710 req
= kzalloc(sizeof(struct req_que
), GFP_KERNEL
);
712 ql_log(ql_log_fatal
, base_vha
, 0x00d9,
713 "Failed to allocate memory for request queue.\n");
717 req
->length
= REQUEST_ENTRY_CNT_24XX
;
718 req
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
719 (req
->length
+ 1) * sizeof(request_t
),
720 &req
->dma
, GFP_KERNEL
);
721 if (req
->ring
== NULL
) {
722 ql_log(ql_log_fatal
, base_vha
, 0x00da,
723 "Failed to allocate memory for request_ring.\n");
727 ret
= qla2x00_alloc_outstanding_cmds(ha
, req
);
728 if (ret
!= QLA_SUCCESS
)
731 mutex_lock(&ha
->mq_lock
);
732 que_id
= find_first_zero_bit(ha
->req_qid_map
, ha
->max_req_queues
);
733 if (que_id
>= ha
->max_req_queues
) {
734 mutex_unlock(&ha
->mq_lock
);
735 ql_log(ql_log_warn
, base_vha
, 0x00db,
736 "No resources to create additional request queue.\n");
739 set_bit(que_id
, ha
->req_qid_map
);
740 ha
->req_q_map
[que_id
] = req
;
742 req
->vp_idx
= vp_idx
;
745 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc002,
746 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
747 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
748 ql_dbg(ql_dbg_init
, base_vha
, 0x00dc,
749 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
750 que_id
, req
->rid
, req
->vp_idx
, req
->qos
);
754 req
->rsp
= ha
->rsp_q_map
[rsp_que
];
755 /* Use alternate PCI bus number */
758 /* Use alternate PCI devfn */
761 req
->options
= options
;
763 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc003,
764 "options=0x%x.\n", req
->options
);
765 ql_dbg(ql_dbg_init
, base_vha
, 0x00dd,
766 "options=0x%x.\n", req
->options
);
767 for (cnt
= 1; cnt
< req
->num_outstanding_cmds
; cnt
++)
768 req
->outstanding_cmds
[cnt
] = NULL
;
769 req
->current_outstanding_cmd
= 1;
771 req
->ring_ptr
= req
->ring
;
773 req
->cnt
= req
->length
;
775 reg
= ISP_QUE_REG(ha
, que_id
);
776 req
->req_q_in
= ®
->isp25mq
.req_q_in
;
777 req
->req_q_out
= ®
->isp25mq
.req_q_out
;
778 req
->max_q_depth
= ha
->req_q_map
[0]->max_q_depth
;
779 req
->out_ptr
= (uint16_t *)(req
->ring
+ req
->length
);
780 mutex_unlock(&ha
->mq_lock
);
781 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc004,
782 "ring_ptr=%p ring_index=%d, "
783 "cnt=%d id=%d max_q_depth=%d.\n",
784 req
->ring_ptr
, req
->ring_index
,
785 req
->cnt
, req
->id
, req
->max_q_depth
);
786 ql_dbg(ql_dbg_init
, base_vha
, 0x00de,
787 "ring_ptr=%p ring_index=%d, "
788 "cnt=%d id=%d max_q_depth=%d.\n",
789 req
->ring_ptr
, req
->ring_index
, req
->cnt
,
790 req
->id
, req
->max_q_depth
);
793 ret
= qla25xx_init_req_que(base_vha
, req
);
794 if (ret
!= QLA_SUCCESS
) {
795 ql_log(ql_log_fatal
, base_vha
, 0x00df,
796 "%s failed.\n", __func__
);
797 mutex_lock(&ha
->mq_lock
);
798 clear_bit(que_id
, ha
->req_qid_map
);
799 mutex_unlock(&ha
->mq_lock
);
802 vha
->flags
.qpairs_req_created
= 1;
808 qla25xx_free_req_que(base_vha
, req
);
813 static void qla_do_work(struct work_struct
*work
)
816 struct qla_qpair
*qpair
= container_of(work
, struct qla_qpair
, q_work
);
817 struct scsi_qla_host
*vha
= qpair
->vha
;
819 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
820 qla24xx_process_response_queue(vha
, qpair
->rsp
);
821 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
825 /* create response queue */
827 qla25xx_create_rsp_que(struct qla_hw_data
*ha
, uint16_t options
,
828 uint8_t vp_idx
, uint16_t rid
, struct qla_qpair
*qpair
, bool startqp
)
831 struct rsp_que
*rsp
= NULL
;
832 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
833 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
837 rsp
= kzalloc(sizeof(struct rsp_que
), GFP_KERNEL
);
839 ql_log(ql_log_warn
, base_vha
, 0x0066,
840 "Failed to allocate memory for response queue.\n");
844 rsp
->length
= RESPONSE_ENTRY_CNT_MQ
;
845 rsp
->ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
846 (rsp
->length
+ 1) * sizeof(response_t
),
847 &rsp
->dma
, GFP_KERNEL
);
848 if (rsp
->ring
== NULL
) {
849 ql_log(ql_log_warn
, base_vha
, 0x00e1,
850 "Failed to allocate memory for response ring.\n");
854 mutex_lock(&ha
->mq_lock
);
855 que_id
= find_first_zero_bit(ha
->rsp_qid_map
, ha
->max_rsp_queues
);
856 if (que_id
>= ha
->max_rsp_queues
) {
857 mutex_unlock(&ha
->mq_lock
);
858 ql_log(ql_log_warn
, base_vha
, 0x00e2,
859 "No resources to create additional request queue.\n");
862 set_bit(que_id
, ha
->rsp_qid_map
);
864 rsp
->msix
= qpair
->msix
;
866 ha
->rsp_q_map
[que_id
] = rsp
;
868 rsp
->vp_idx
= vp_idx
;
870 ql_dbg(ql_dbg_init
, base_vha
, 0x00e4,
871 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
872 que_id
, rsp
->rid
, rsp
->vp_idx
, rsp
->hw
);
873 /* Use alternate PCI bus number */
876 /* Use alternate PCI devfn */
879 /* Enable MSIX handshake mode on for uncapable adapters */
880 if (!IS_MSIX_NACK_CAPABLE(ha
))
883 /* Set option to indicate response queue creation */
886 rsp
->options
= options
;
888 reg
= ISP_QUE_REG(ha
, que_id
);
889 rsp
->rsp_q_in
= ®
->isp25mq
.rsp_q_in
;
890 rsp
->rsp_q_out
= ®
->isp25mq
.rsp_q_out
;
891 rsp
->in_ptr
= (uint16_t *)(rsp
->ring
+ rsp
->length
);
892 mutex_unlock(&ha
->mq_lock
);
893 ql_dbg(ql_dbg_multiq
, base_vha
, 0xc00b,
894 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
895 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
897 ql_dbg(ql_dbg_init
, base_vha
, 0x00e5,
898 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
899 rsp
->options
, rsp
->id
, rsp
->rsp_q_in
,
902 ret
= qla25xx_request_irq(ha
, qpair
, qpair
->msix
,
903 ha
->flags
.disable_msix_handshake
?
904 QLA_MSIX_QPAIR_MULTIQ_RSP_Q
: QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS
);
909 ret
= qla25xx_init_rsp_que(base_vha
, rsp
);
910 if (ret
!= QLA_SUCCESS
) {
911 ql_log(ql_log_fatal
, base_vha
, 0x00e7,
912 "%s failed.\n", __func__
);
913 mutex_lock(&ha
->mq_lock
);
914 clear_bit(que_id
, ha
->rsp_qid_map
);
915 mutex_unlock(&ha
->mq_lock
);
918 vha
->flags
.qpairs_rsp_created
= 1;
922 qla2x00_init_response_q_entries(rsp
);
924 INIT_WORK(&qpair
->q_work
, qla_do_work
);
928 qla25xx_free_rsp_que(base_vha
, rsp
);
933 static void qla_ctrlvp_sp_done(srb_t
*sp
, int res
)
937 /* don't free sp here. Let the caller do the free */
941 * qla24xx_control_vp() - Enable a virtual port for given host
942 * @vha: adapter block pointer
943 * @cmd: command type to be sent for enable virtual port
945 * Return: qla2xxx local function return status code.
947 int qla24xx_control_vp(scsi_qla_host_t
*vha
, int cmd
)
949 int rval
= QLA_MEMORY_ALLOC_FAILED
;
950 struct qla_hw_data
*ha
= vha
->hw
;
951 int vp_index
= vha
->vp_idx
;
952 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
953 DECLARE_COMPLETION_ONSTACK(comp
);
956 ql_dbg(ql_dbg_vport
, vha
, 0x10c1,
957 "Entered %s cmd %x index %d.\n", __func__
, cmd
, vp_index
);
959 if (vp_index
== 0 || vp_index
>= ha
->max_npiv_vports
)
960 return QLA_PARAMETER_ERROR
;
963 sp
= qla2x00_get_sp(base_vha
, NULL
, GFP_KERNEL
);
967 sp
->type
= SRB_CTRL_VP
;
968 sp
->name
= "ctrl_vp";
970 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
972 sp
->u
.iocb_cmd
.u
.ctrlvp
.cmd
= cmd
;
973 sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
= vp_index
;
975 rval
= qla2x00_start_sp(sp
);
976 if (rval
!= QLA_SUCCESS
) {
977 ql_dbg(ql_dbg_async
, vha
, 0xffff,
978 "%s: %s Failed submission. %x.\n",
979 __func__
, sp
->name
, rval
);
983 ql_dbg(ql_dbg_vport
, vha
, 0x113f, "%s hndl %x submitted\n",
984 sp
->name
, sp
->handle
);
986 wait_for_completion(&comp
);
991 case QLA_FUNCTION_TIMEOUT
:
992 ql_dbg(ql_dbg_vport
, vha
, 0xffff, "%s: %s Timeout. %x.\n",
993 __func__
, sp
->name
, rval
);
996 ql_dbg(ql_dbg_vport
, vha
, 0xffff, "%s: %s done.\n",
1000 ql_dbg(ql_dbg_vport
, vha
, 0xffff, "%s: %s Failed. %x.\n",
1001 __func__
, sp
->name
, rval
);
1006 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
1010 struct scsi_qla_host
*qla_find_host_by_vp_idx(struct scsi_qla_host
*vha
, uint16_t vp_idx
)
1012 struct qla_hw_data
*ha
= vha
->hw
;
1014 if (vha
->vp_idx
== vp_idx
)
1017 BUG_ON(ha
->vp_map
== NULL
);
1018 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
1019 return ha
->vp_map
[vp_idx
].vha
;
1024 /* vport_slock to be held by the caller */
1026 qla_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
1032 if (!vha
->hw
->vp_map
)
1035 key
= vha
->d_id
.b24
;
1039 vha
->hw
->vp_map
[vha
->vp_idx
].vha
= vha
;
1042 slot
= btree_lookup32(&vha
->hw
->host_map
, key
);
1044 ql_dbg(ql_dbg_disc
, vha
, 0xf018,
1045 "Save vha in host_map %p %06x\n", vha
, key
);
1046 rc
= btree_insert32(&vha
->hw
->host_map
,
1047 key
, vha
, GFP_ATOMIC
);
1049 ql_log(ql_log_info
, vha
, 0xd03e,
1050 "Unable to insert s_id into host_map: %06x\n",
1054 ql_dbg(ql_dbg_disc
, vha
, 0xf019,
1055 "replace existing vha in host_map %p %06x\n", vha
, key
);
1056 btree_update32(&vha
->hw
->host_map
, key
, vha
);
1059 vha
->hw
->vp_map
[vha
->vp_idx
].vha
= NULL
;
1062 ql_dbg(ql_dbg_disc
, vha
, 0xf01a,
1063 "clear vha in host_map %p %06x\n", vha
, key
);
1064 slot
= btree_lookup32(&vha
->hw
->host_map
, key
);
1066 btree_remove32(&vha
->hw
->host_map
, key
);
1072 void qla_update_host_map(struct scsi_qla_host
*vha
, port_id_t id
)
1075 if (!vha
->d_id
.b24
) {
1077 qla_update_vp_map(vha
, SET_AL_PA
);
1078 } else if (vha
->d_id
.b24
!= id
.b24
) {
1079 qla_update_vp_map(vha
, RESET_AL_PA
);
1081 qla_update_vp_map(vha
, SET_AL_PA
);
1085 int qla_create_buf_pool(struct scsi_qla_host
*vha
, struct qla_qpair
*qp
)
1089 qp
->buf_pool
.num_bufs
= qp
->req
->length
;
1091 sz
= BITS_TO_LONGS(qp
->req
->length
);
1092 qp
->buf_pool
.buf_map
= kcalloc(sz
, sizeof(long), GFP_KERNEL
);
1093 if (!qp
->buf_pool
.buf_map
) {
1094 ql_log(ql_log_warn
, vha
, 0x0186,
1095 "Failed to allocate buf_map(%zd).\n", sz
* sizeof(unsigned long));
1098 sz
= qp
->req
->length
* sizeof(void *);
1099 qp
->buf_pool
.buf_array
= kcalloc(qp
->req
->length
, sizeof(void *), GFP_KERNEL
);
1100 if (!qp
->buf_pool
.buf_array
) {
1101 ql_log(ql_log_warn
, vha
, 0x0186,
1102 "Failed to allocate buf_array(%d).\n", sz
);
1103 kfree(qp
->buf_pool
.buf_map
);
1106 sz
= qp
->req
->length
* sizeof(dma_addr_t
);
1107 qp
->buf_pool
.dma_array
= kcalloc(qp
->req
->length
, sizeof(dma_addr_t
), GFP_KERNEL
);
1108 if (!qp
->buf_pool
.dma_array
) {
1109 ql_log(ql_log_warn
, vha
, 0x0186,
1110 "Failed to allocate dma_array(%d).\n", sz
);
1111 kfree(qp
->buf_pool
.buf_map
);
1112 kfree(qp
->buf_pool
.buf_array
);
1115 set_bit(0, qp
->buf_pool
.buf_map
);
1119 void qla_free_buf_pool(struct qla_qpair
*qp
)
1122 struct qla_hw_data
*ha
= qp
->vha
->hw
;
1124 for (i
= 0; i
< qp
->buf_pool
.num_bufs
; i
++) {
1125 if (qp
->buf_pool
.buf_array
[i
] && qp
->buf_pool
.dma_array
[i
])
1126 dma_pool_free(ha
->fcp_cmnd_dma_pool
, qp
->buf_pool
.buf_array
[i
],
1127 qp
->buf_pool
.dma_array
[i
]);
1128 qp
->buf_pool
.buf_array
[i
] = NULL
;
1129 qp
->buf_pool
.dma_array
[i
] = 0;
1132 kfree(qp
->buf_pool
.dma_array
);
1133 kfree(qp
->buf_pool
.buf_array
);
1134 kfree(qp
->buf_pool
.buf_map
);
1137 /* it is assume qp->qp_lock is held at this point */
1138 int qla_get_buf(struct scsi_qla_host
*vha
, struct qla_qpair
*qp
, struct qla_buf_dsc
*dsc
)
1143 struct qla_hw_data
*ha
= vha
->hw
;
1145 dsc
->tag
= TAG_FREED
;
1147 tag
= find_first_zero_bit(qp
->buf_pool
.buf_map
, qp
->buf_pool
.num_bufs
);
1148 if (tag
>= qp
->buf_pool
.num_bufs
) {
1149 ql_dbg(ql_dbg_io
, vha
, 0x00e2,
1150 "qp(%d) ran out of buf resource.\n", qp
->id
);
1154 set_bit(0, qp
->buf_pool
.buf_map
);
1157 ql_dbg(ql_dbg_io
, vha
, 0x00e3,
1158 "qp(%d) unable to get tag.\n", qp
->id
);
1164 if (!qp
->buf_pool
.buf_array
[tag
]) {
1165 buf
= dma_pool_zalloc(ha
->fcp_cmnd_dma_pool
, GFP_ATOMIC
, &buf_dma
);
1167 ql_log(ql_log_fatal
, vha
, 0x13b1,
1168 "Failed to allocate buf.\n");
1172 dsc
->buf
= qp
->buf_pool
.buf_array
[tag
] = buf
;
1173 dsc
->buf_dma
= qp
->buf_pool
.dma_array
[tag
] = buf_dma
;
1174 qp
->buf_pool
.num_alloc
++;
1176 dsc
->buf
= qp
->buf_pool
.buf_array
[tag
];
1177 dsc
->buf_dma
= qp
->buf_pool
.dma_array
[tag
];
1178 memset(dsc
->buf
, 0, FCP_CMND_DMA_POOL_SIZE
);
1181 qp
->buf_pool
.num_active
++;
1182 if (qp
->buf_pool
.num_active
> qp
->buf_pool
.max_used
)
1183 qp
->buf_pool
.max_used
= qp
->buf_pool
.num_active
;
1186 set_bit(tag
, qp
->buf_pool
.buf_map
);
1190 static void qla_trim_buf(struct qla_qpair
*qp
, u16 trim
)
1193 struct qla_hw_data
*ha
= qp
->vha
->hw
;
1198 for (i
= 0; i
< trim
; i
++) {
1199 j
= qp
->buf_pool
.num_alloc
- 1;
1200 if (test_bit(j
, qp
->buf_pool
.buf_map
)) {
1201 ql_dbg(ql_dbg_io
+ ql_dbg_verbose
, qp
->vha
, 0x300b,
1202 "QP id(%d): trim active buf[%d]. Remain %d bufs\n",
1203 qp
->id
, j
, qp
->buf_pool
.num_alloc
);
1207 if (qp
->buf_pool
.buf_array
[j
]) {
1208 dma_pool_free(ha
->fcp_cmnd_dma_pool
, qp
->buf_pool
.buf_array
[j
],
1209 qp
->buf_pool
.dma_array
[j
]);
1210 qp
->buf_pool
.buf_array
[j
] = NULL
;
1211 qp
->buf_pool
.dma_array
[j
] = 0;
1213 qp
->buf_pool
.num_alloc
--;
1214 if (!qp
->buf_pool
.num_alloc
)
1217 ql_dbg(ql_dbg_io
+ ql_dbg_verbose
, qp
->vha
, 0x3010,
1218 "QP id(%d): trimmed %d bufs. Remain %d bufs\n",
1219 qp
->id
, trim
, qp
->buf_pool
.num_alloc
);
1222 static void __qla_adjust_buf(struct qla_qpair
*qp
)
1226 qp
->buf_pool
.take_snapshot
= 0;
1227 qp
->buf_pool
.prev_max
= qp
->buf_pool
.max_used
;
1228 qp
->buf_pool
.max_used
= qp
->buf_pool
.num_active
;
1230 if (qp
->buf_pool
.prev_max
> qp
->buf_pool
.max_used
&&
1231 qp
->buf_pool
.num_alloc
> qp
->buf_pool
.max_used
) {
1233 trim
= qp
->buf_pool
.num_alloc
- qp
->buf_pool
.max_used
;
1234 trim
= (trim
* 10) / 100;
1235 trim
= trim
? trim
: 1;
1236 qla_trim_buf(qp
, trim
);
1237 } else if (!qp
->buf_pool
.prev_max
&& !qp
->buf_pool
.max_used
) {
1238 /* 2 periods of no io */
1239 qla_trim_buf(qp
, qp
->buf_pool
.num_alloc
);
1243 /* it is assume qp->qp_lock is held at this point */
1244 void qla_put_buf(struct qla_qpair
*qp
, struct qla_buf_dsc
*dsc
)
1246 if (dsc
->tag
== TAG_FREED
)
1248 lockdep_assert_held(qp
->qp_lock_ptr
);
1250 clear_bit(dsc
->tag
, qp
->buf_pool
.buf_map
);
1251 qp
->buf_pool
.num_active
--;
1252 dsc
->tag
= TAG_FREED
;
1254 if (qp
->buf_pool
.take_snapshot
)
1255 __qla_adjust_buf(qp
);
1258 #define EXPIRE (60 * HZ)
1259 void qla_adjust_buf(struct scsi_qla_host
*vha
)
1261 unsigned long flags
;
1263 struct qla_qpair
*qp
;
1268 if (!vha
->buf_expired
) {
1269 vha
->buf_expired
= jiffies
+ EXPIRE
;
1272 if (time_before(jiffies
, vha
->buf_expired
))
1275 vha
->buf_expired
= jiffies
+ EXPIRE
;
1277 for (i
= 0; i
< vha
->hw
->num_qpairs
; i
++) {
1278 qp
= vha
->hw
->queue_pair_map
[i
];
1281 if (!qp
->buf_pool
.num_alloc
)
1284 if (qp
->buf_pool
.take_snapshot
) {
1285 /* no io has gone through in the last EXPIRE period */
1286 spin_lock_irqsave(qp
->qp_lock_ptr
, flags
);
1287 __qla_adjust_buf(qp
);
1288 spin_unlock_irqrestore(qp
->qp_lock_ptr
, flags
);
1290 qp
->buf_pool
.take_snapshot
= 1;