1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10 * Continuation Type 1 IOCBs to allocate.
13 * @dsds: number of data segment descriptors needed
15 * Returns the number of IOCB entries needed to store @dsds.
17 static inline uint16_t
18 qla24xx_calc_iocbs(scsi_qla_host_t
*vha
, uint16_t dsds
)
24 iocbs
+= (dsds
- 1) / 5;
32 * qla2x00_debounce_register
36 * port = register address.
41 static __inline__
uint16_t
42 qla2x00_debounce_register(volatile __le16 __iomem
*addr
)
44 volatile uint16_t first
;
45 volatile uint16_t second
;
48 first
= rd_reg_word(addr
);
51 second
= rd_reg_word(addr
);
52 } while (first
!= second
);
58 qla2x00_poll(struct rsp_que
*rsp
)
60 struct qla_hw_data
*ha
= rsp
->hw
;
65 ha
->isp_ops
->intr_handler(0, rsp
);
68 static inline uint8_t *
69 host_to_fcp_swap(uint8_t *fcp
, uint32_t bsize
)
71 uint32_t *ifcp
= (uint32_t *) fcp
;
72 uint32_t *ofcp
= (uint32_t *) fcp
;
73 uint32_t iter
= bsize
>> 2;
76 *ofcp
++ = swab32(*ifcp
++);
82 host_to_adap(uint8_t *src
, uint8_t *dst
, uint32_t bsize
)
84 uint32_t *isrc
= (uint32_t *) src
;
85 __le32
*odest
= (__le32
*) dst
;
86 uint32_t iter
= bsize
>> 2;
88 for ( ; iter
--; isrc
++)
89 *odest
++ = cpu_to_le32(*isrc
);
93 qla2x00_clean_dsd_pool(struct qla_hw_data
*ha
, struct crc_context
*ctx
)
95 struct dsd_dma
*dsd
, *tdsd
;
97 /* clean up allocated prev pool */
98 list_for_each_entry_safe(dsd
, tdsd
, &ctx
->dsd_list
, list
) {
99 dma_pool_free(ha
->dl_dma_pool
, dsd
->dsd_addr
,
101 list_del(&dsd
->list
);
104 INIT_LIST_HEAD(&ctx
->dsd_list
);
108 qla2x00_set_fcport_disc_state(fc_port_t
*fcport
, int state
)
111 uint8_t shiftbits
, mask
;
112 uint8_t port_dstate_str_sz
;
114 /* This will have to change when the max no. of states > 16 */
116 mask
= (1 << shiftbits
) - 1;
118 port_dstate_str_sz
= sizeof(port_dstate_str
) / sizeof(char *);
119 fcport
->disc_state
= state
;
121 old_val
= atomic_read(&fcport
->shadow_disc_state
);
122 if (old_val
== atomic_cmpxchg(&fcport
->shadow_disc_state
,
123 old_val
, (old_val
<< shiftbits
) | state
)) {
124 ql_dbg(ql_dbg_disc
, fcport
->vha
, 0x2134,
125 "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
126 fcport
->port_name
, (old_val
& mask
) < port_dstate_str_sz
?
127 port_dstate_str
[old_val
& mask
] : "Unknown",
128 port_dstate_str
[state
], fcport
->d_id
.b24
);
135 qla2x00_hba_err_chk_enabled(srb_t
*sp
)
138 * Uncomment when corresponding SCSI changes are done.
140 if (!sp->cmd->prot_chk)
144 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
145 case SCSI_PROT_READ_STRIP
:
146 case SCSI_PROT_WRITE_INSERT
:
147 if (ql2xenablehba_err_chk
>= 1)
150 case SCSI_PROT_READ_PASS
:
151 case SCSI_PROT_WRITE_PASS
:
152 if (ql2xenablehba_err_chk
>= 2)
155 case SCSI_PROT_READ_INSERT
:
156 case SCSI_PROT_WRITE_STRIP
:
163 qla2x00_reset_active(scsi_qla_host_t
*vha
)
165 scsi_qla_host_t
*base_vha
= pci_get_drvdata(vha
->hw
->pdev
);
167 /* Test appropriate base-vha and vha flags. */
168 return test_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
) ||
169 test_bit(ABORT_ISP_ACTIVE
, &base_vha
->dpc_flags
) ||
170 test_bit(ISP_ABORT_RETRY
, &base_vha
->dpc_flags
) ||
171 test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) ||
172 test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
);
176 qla2x00_chip_is_down(scsi_qla_host_t
*vha
)
178 return (qla2x00_reset_active(vha
) || !vha
->hw
->flags
.fw_started
);
181 static void qla2xxx_init_sp(srb_t
*sp
, scsi_qla_host_t
*vha
,
182 struct qla_qpair
*qpair
, fc_port_t
*fcport
)
184 memset(sp
, 0, sizeof(*sp
));
189 sp
->cmd_type
= TYPE_SRB
;
190 /* ref : INIT - normal flow */
191 kref_init(&sp
->cmd_kref
);
192 INIT_LIST_HEAD(&sp
->elem
);
195 static inline srb_t
*
196 qla2xxx_get_qpair_sp(scsi_qla_host_t
*vha
, struct qla_qpair
*qpair
,
197 fc_port_t
*fcport
, gfp_t flag
)
202 QLA_QPAIR_MARK_BUSY(qpair
, bail
);
206 sp
= mempool_alloc(qpair
->srb_mempool
, flag
);
208 qla2xxx_init_sp(sp
, vha
, qpair
, fcport
);
210 QLA_QPAIR_MARK_NOT_BUSY(qpair
);
214 void qla2xxx_rel_done_warning(srb_t
*sp
, int res
);
215 void qla2xxx_rel_free_warning(srb_t
*sp
);
218 qla2xxx_rel_qpair_sp(struct qla_qpair
*qpair
, srb_t
*sp
)
221 sp
->done
= qla2xxx_rel_done_warning
;
222 sp
->free
= qla2xxx_rel_free_warning
;
223 mempool_free(sp
, qpair
->srb_mempool
);
224 QLA_QPAIR_MARK_NOT_BUSY(qpair
);
227 static inline srb_t
*
228 qla2x00_get_sp(scsi_qla_host_t
*vha
, fc_port_t
*fcport
, gfp_t flag
)
231 struct qla_qpair
*qpair
;
233 if (unlikely(qla_vha_mark_busy(vha
)))
236 qpair
= vha
->hw
->base_qpair
;
237 sp
= qla2xxx_get_qpair_sp(vha
, qpair
, fcport
, flag
);
244 QLA_VHA_MARK_NOT_BUSY(vha
);
249 qla2x00_rel_sp(srb_t
*sp
)
251 QLA_VHA_MARK_NOT_BUSY(sp
->vha
);
252 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
256 qla2x00_gid_list_size(struct qla_hw_data
*ha
)
259 return sizeof(uint32_t) * 32;
261 return sizeof(struct gid_list_info
) * ha
->max_fibre_devices
;
265 qla2x00_handle_mbx_completion(struct qla_hw_data
*ha
, int status
)
267 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
268 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
269 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
270 clear_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
);
271 complete(&ha
->mbx_intr_comp
);
276 qla2x00_set_retry_delay_timestamp(fc_port_t
*fcport
, uint16_t sts_qual
)
280 #define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */
281 #define SQ_SCOPE_SHIFT 14
282 #define SQ_QUAL_MASK 0x3fff
284 #define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */
285 #define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
287 if (!sts_qual
) /* Common case. */
290 scope
= (sts_qual
& SQ_SCOPE_MASK
) >> SQ_SCOPE_SHIFT
;
291 /* Handle only scope 1 or 2, which is for I-T nexus. */
292 if (scope
!= 1 && scope
!= 2)
295 /* Skip processing, if retry delay timer is already in effect. */
296 if (fcport
->retry_delay_timestamp
&&
297 time_before(jiffies
, fcport
->retry_delay_timestamp
))
300 qual
= sts_qual
& SQ_QUAL_MASK
;
301 if (qual
< 1 || qual
> 0x3fef)
303 qual
= min(qual
, (u16
)SQ_MAX_WAIT_TIME
);
305 /* qual is expressed in 100ms increments. */
306 fcport
->retry_delay_timestamp
= jiffies
+ (qual
* HZ
/ 10);
308 ql_log(ql_log_warn
, fcport
->vha
, 0x5101,
309 "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
310 fcport
->port_name
, sts_qual
, qual
* 100);
314 qla_is_exch_offld_enabled(struct scsi_qla_host
*vha
)
316 if (qla_ini_mode_enabled(vha
) &&
317 (vha
->ql2xiniexchg
> FW_DEF_EXCHANGES_CNT
))
319 else if (qla_tgt_mode_enabled(vha
) &&
320 (vha
->ql2xexchoffld
> FW_DEF_EXCHANGES_CNT
))
322 else if (qla_dual_mode_enabled(vha
) &&
323 ((vha
->ql2xiniexchg
+ vha
->ql2xexchoffld
) > FW_DEF_EXCHANGES_CNT
))
330 qla_cpu_update(struct qla_qpair
*qpair
, uint16_t cpuid
)
332 qpair
->cpuid
= cpuid
;
334 if (!list_empty(&qpair
->hints_list
)) {
335 struct qla_qpair_hint
*h
;
337 list_for_each_entry(h
, &qpair
->hints_list
, hint_elem
)
338 h
->cpuid
= qpair
->cpuid
;
342 static inline struct qla_qpair_hint
*
343 qla_qpair_to_hint(struct qla_tgt
*tgt
, struct qla_qpair
*qpair
)
345 struct qla_qpair_hint
*h
;
348 for (i
= 0; i
< tgt
->ha
->max_qpairs
+ 1; i
++) {
349 h
= &tgt
->qphints
[i
];
350 if (h
->qpair
== qpair
)
358 qla_83xx_start_iocbs(struct qla_qpair
*qpair
)
360 struct req_que
*req
= qpair
->req
;
363 if (req
->ring_index
== req
->length
) {
365 req
->ring_ptr
= req
->ring
;
369 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
373 qla2xxx_get_fc4_priority(struct scsi_qla_host
*vha
)
378 ((uint8_t *)vha
->hw
->nvram
)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET
];
381 return (data
>> 6) & BIT_0
? FC4_PRIORITY_FCP
: FC4_PRIORITY_NVME
;
386 RESOURCE_IOCB
= BIT_0
,
387 RESOURCE_EXCH
= BIT_1
, /* exchange */
388 RESOURCE_FORCE
= BIT_2
,
393 qla_get_fw_resources(struct qla_qpair
*qp
, struct iocb_resource
*iores
)
397 struct qla_hw_data
*ha
= qp
->hw
;
399 if (!ql2xenforce_iocb_limit
) {
400 iores
->res_type
= RESOURCE_NONE
;
403 if (iores
->res_type
& RESOURCE_FORCE
)
406 if ((iores
->iocb_cnt
+ qp
->fwres
.iocbs_used
) >= qp
->fwres
.iocbs_qp_limit
) {
407 /* no need to acquire qpair lock. It's just rough calculation */
408 iocbs_used
= ha
->base_qpair
->fwres
.iocbs_used
;
409 for (i
= 0; i
< ha
->max_qpairs
; i
++) {
410 if (ha
->queue_pair_map
[i
])
411 iocbs_used
+= ha
->queue_pair_map
[i
]->fwres
.iocbs_used
;
414 if ((iores
->iocb_cnt
+ iocbs_used
) >= qp
->fwres
.iocbs_limit
) {
415 iores
->res_type
= RESOURCE_NONE
;
420 if (iores
->res_type
& RESOURCE_EXCH
) {
421 exch_used
= ha
->base_qpair
->fwres
.exch_used
;
422 for (i
= 0; i
< ha
->max_qpairs
; i
++) {
423 if (ha
->queue_pair_map
[i
])
424 exch_used
+= ha
->queue_pair_map
[i
]->fwres
.exch_used
;
427 if ((exch_used
+ iores
->exch_cnt
) >= qp
->fwres
.exch_limit
) {
428 iores
->res_type
= RESOURCE_NONE
;
433 if (ql2xenforce_iocb_limit
== 2) {
434 if ((iores
->iocb_cnt
+ atomic_read(&ha
->fwres
.iocb_used
)) >=
435 ha
->fwres
.iocb_limit
) {
436 iores
->res_type
= RESOURCE_NONE
;
440 if (iores
->res_type
& RESOURCE_EXCH
) {
441 if ((iores
->exch_cnt
+ atomic_read(&ha
->fwres
.exch_used
)) >=
442 ha
->fwres
.exch_limit
) {
443 iores
->res_type
= RESOURCE_NONE
;
450 qp
->fwres
.iocbs_used
+= iores
->iocb_cnt
;
451 qp
->fwres
.exch_used
+= iores
->exch_cnt
;
452 if (ql2xenforce_iocb_limit
== 2) {
453 atomic_add(iores
->iocb_cnt
, &ha
->fwres
.iocb_used
);
454 atomic_add(iores
->exch_cnt
, &ha
->fwres
.exch_used
);
455 iores
->res_type
|= RESOURCE_HA
;
461 * decrement to zero. This routine will not decrement below zero
462 * @v: pointer of type atomic_t
463 * @amount: amount to decrement from v
465 static void qla_atomic_dtz(atomic_t
*v
, int amount
)
472 if (unlikely(dec
< 0))
475 old
= atomic_cmpxchg((v
), c
, dec
);
476 if (likely(old
== c
))
483 qla_put_fw_resources(struct qla_qpair
*qp
, struct iocb_resource
*iores
)
485 struct qla_hw_data
*ha
= qp
->hw
;
487 if (iores
->res_type
& RESOURCE_HA
) {
488 if (iores
->res_type
& RESOURCE_IOCB
)
489 qla_atomic_dtz(&ha
->fwres
.iocb_used
, iores
->iocb_cnt
);
491 if (iores
->res_type
& RESOURCE_EXCH
)
492 qla_atomic_dtz(&ha
->fwres
.exch_used
, iores
->exch_cnt
);
495 if (iores
->res_type
& RESOURCE_IOCB
) {
496 if (qp
->fwres
.iocbs_used
>= iores
->iocb_cnt
) {
497 qp
->fwres
.iocbs_used
-= iores
->iocb_cnt
;
499 /* should not happen */
500 qp
->fwres
.iocbs_used
= 0;
504 if (iores
->res_type
& RESOURCE_EXCH
) {
505 if (qp
->fwres
.exch_used
>= iores
->exch_cnt
) {
506 qp
->fwres
.exch_used
-= iores
->exch_cnt
;
508 /* should not happen */
509 qp
->fwres
.exch_used
= 0;
512 iores
->res_type
= RESOURCE_NONE
;
515 #define ISP_REG_DISCONNECT 0xffffffffU
516 /**************************************************************************
517 * qla2x00_isp_reg_stat
520 * Read the host status register of ISP before aborting the command.
523 * ha = pointer to host adapter structure.
527 * Either true or false.
529 * Note: Return true if there is register disconnect.
530 **************************************************************************/
532 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data
*ha
)
534 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
535 struct device_reg_82xx __iomem
*reg82
= &ha
->iobase
->isp82
;
538 return ((rd_reg_dword(®82
->host_int
)) == ISP_REG_DISCONNECT
);
540 return ((rd_reg_dword(®
->host_status
)) ==
545 bool qla_pci_disconnected(struct scsi_qla_host
*vha
,
546 struct device_reg_24xx __iomem
*reg
)
551 stat
= rd_reg_dword(®
->host_status
);
552 if (stat
== 0xffffffff) {
553 ql_log(ql_log_info
, vha
, 0x8041,
554 "detected PCI disconnect.\n");
555 qla_schedule_eeh_work(vha
);
562 fcport_is_smaller(fc_port_t
*fcport
)
564 if (wwn_to_u64(fcport
->port_name
) <
565 wwn_to_u64(fcport
->vha
->port_name
))
572 fcport_is_bigger(fc_port_t
*fcport
)
574 return !fcport_is_smaller(fcport
);
577 static inline struct qla_qpair
*
578 qla_mapq_nvme_select_qpair(struct qla_hw_data
*ha
, struct qla_qpair
*qpair
)
580 int cpuid
= raw_smp_processor_id();
582 if (qpair
->cpuid
!= cpuid
&&
583 ha
->qp_cpu_map
[cpuid
]) {
584 qpair
= ha
->qp_cpu_map
[cpuid
];
590 qla_mapq_init_qp_cpu_map(struct qla_hw_data
*ha
,
591 struct qla_msix_entry
*msix
,
592 struct qla_qpair
*qpair
)
594 const struct cpumask
*mask
;
599 mask
= pci_irq_get_affinity(ha
->pdev
, msix
->vector_base0
);
602 qpair
->cpuid
= cpumask_first(mask
);
603 for_each_cpu(cpu
, mask
) {
604 ha
->qp_cpu_map
[cpu
] = qpair
;
606 msix
->cpuid
= qpair
->cpuid
;
607 qpair
->cpu_mapped
= true;
611 qla_mapq_free_qp_cpu_map(struct qla_hw_data
*ha
)
613 if (ha
->qp_cpu_map
) {
614 kfree(ha
->qp_cpu_map
);
615 ha
->qp_cpu_map
= NULL
;
619 static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data
*ha
)
621 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
623 if (!ha
->qp_cpu_map
) {
624 ha
->qp_cpu_map
= kcalloc(NR_CPUS
, sizeof(struct qla_qpair
*),
626 if (!ha
->qp_cpu_map
) {
627 ql_log(ql_log_fatal
, vha
, 0x0180,
628 "Unable to allocate memory for qp_cpu_map ptrs.\n");
635 static inline bool val_is_in_range(u32 val
, u32 start
, u32 end
)
637 if (val
>= start
&& val
<= end
)