Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / scsi / qla2xxx / qla_inline.h
blobef4b3cc1cd77e1fa22bbac879ef2e2da767af634
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
7 #include "qla_target.h"
8 /**
9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10 * Continuation Type 1 IOCBs to allocate.
12 * @vha: HA context
13 * @dsds: number of data segment descriptors needed
15 * Returns the number of IOCB entries needed to store @dsds.
17 static inline uint16_t
18 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
20 uint16_t iocbs;
22 iocbs = 1;
23 if (dsds > 1) {
24 iocbs += (dsds - 1) / 5;
25 if ((dsds - 1) % 5)
26 iocbs++;
28 return iocbs;
32 * qla2x00_debounce_register
33 * Debounce register.
35 * Input:
36 * port = register address.
38 * Returns:
39 * register value.
41 static __inline__ uint16_t
42 qla2x00_debounce_register(volatile __le16 __iomem *addr)
44 volatile uint16_t first;
45 volatile uint16_t second;
47 do {
48 first = rd_reg_word(addr);
49 barrier();
50 cpu_relax();
51 second = rd_reg_word(addr);
52 } while (first != second);
54 return (first);
57 static inline void
58 qla2x00_poll(struct rsp_que *rsp)
60 struct qla_hw_data *ha = rsp->hw;
62 if (IS_P3P_TYPE(ha))
63 qla82xx_poll(0, rsp);
64 else
65 ha->isp_ops->intr_handler(0, rsp);
68 static inline uint8_t *
69 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
71 uint32_t *ifcp = (uint32_t *) fcp;
72 uint32_t *ofcp = (uint32_t *) fcp;
73 uint32_t iter = bsize >> 2;
75 for (; iter ; iter--)
76 *ofcp++ = swab32(*ifcp++);
78 return fcp;
81 static inline void
82 host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
84 uint32_t *isrc = (uint32_t *) src;
85 __le32 *odest = (__le32 *) dst;
86 uint32_t iter = bsize >> 2;
88 for ( ; iter--; isrc++)
89 *odest++ = cpu_to_le32(*isrc);
92 static inline void
93 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
95 struct dsd_dma *dsd, *tdsd;
97 /* clean up allocated prev pool */
98 list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
99 dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
100 dsd->dsd_list_dma);
101 list_del(&dsd->list);
102 kfree(dsd);
104 INIT_LIST_HEAD(&ctx->dsd_list);
107 static inline void
108 qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
110 int old_val;
111 uint8_t shiftbits, mask;
112 uint8_t port_dstate_str_sz;
114 /* This will have to change when the max no. of states > 16 */
115 shiftbits = 4;
116 mask = (1 << shiftbits) - 1;
118 port_dstate_str_sz = sizeof(port_dstate_str) / sizeof(char *);
119 fcport->disc_state = state;
120 while (1) {
121 old_val = atomic_read(&fcport->shadow_disc_state);
122 if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
123 old_val, (old_val << shiftbits) | state)) {
124 ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
125 "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
126 fcport->port_name, (old_val & mask) < port_dstate_str_sz ?
127 port_dstate_str[old_val & mask] : "Unknown",
128 port_dstate_str[state], fcport->d_id.b24);
129 return;
134 static inline int
135 qla2x00_hba_err_chk_enabled(srb_t *sp)
138 * Uncomment when corresponding SCSI changes are done.
140 if (!sp->cmd->prot_chk)
141 return 0;
144 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
145 case SCSI_PROT_READ_STRIP:
146 case SCSI_PROT_WRITE_INSERT:
147 if (ql2xenablehba_err_chk >= 1)
148 return 1;
149 break;
150 case SCSI_PROT_READ_PASS:
151 case SCSI_PROT_WRITE_PASS:
152 if (ql2xenablehba_err_chk >= 2)
153 return 1;
154 break;
155 case SCSI_PROT_READ_INSERT:
156 case SCSI_PROT_WRITE_STRIP:
157 return 1;
159 return 0;
162 static inline int
163 qla2x00_reset_active(scsi_qla_host_t *vha)
165 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
167 /* Test appropriate base-vha and vha flags. */
168 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
169 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
171 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
172 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
175 static inline int
176 qla2x00_chip_is_down(scsi_qla_host_t *vha)
178 return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
181 static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
182 struct qla_qpair *qpair, fc_port_t *fcport)
184 memset(sp, 0, sizeof(*sp));
185 sp->fcport = fcport;
186 sp->iocbs = 1;
187 sp->vha = vha;
188 sp->qpair = qpair;
189 sp->cmd_type = TYPE_SRB;
190 /* ref : INIT - normal flow */
191 kref_init(&sp->cmd_kref);
192 INIT_LIST_HEAD(&sp->elem);
195 static inline srb_t *
196 qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
197 fc_port_t *fcport, gfp_t flag)
199 srb_t *sp = NULL;
200 uint8_t bail;
202 QLA_QPAIR_MARK_BUSY(qpair, bail);
203 if (unlikely(bail))
204 return NULL;
206 sp = mempool_alloc(qpair->srb_mempool, flag);
207 if (sp)
208 qla2xxx_init_sp(sp, vha, qpair, fcport);
209 else
210 QLA_QPAIR_MARK_NOT_BUSY(qpair);
211 return sp;
214 void qla2xxx_rel_done_warning(srb_t *sp, int res);
215 void qla2xxx_rel_free_warning(srb_t *sp);
217 static inline void
218 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
220 sp->qpair = NULL;
221 sp->done = qla2xxx_rel_done_warning;
222 sp->free = qla2xxx_rel_free_warning;
223 mempool_free(sp, qpair->srb_mempool);
224 QLA_QPAIR_MARK_NOT_BUSY(qpair);
227 static inline srb_t *
228 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
230 srb_t *sp = NULL;
231 struct qla_qpair *qpair;
233 if (unlikely(qla_vha_mark_busy(vha)))
234 return NULL;
236 qpair = vha->hw->base_qpair;
237 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
238 if (!sp)
239 goto done;
241 sp->vha = vha;
242 done:
243 if (!sp)
244 QLA_VHA_MARK_NOT_BUSY(vha);
245 return sp;
248 static inline void
249 qla2x00_rel_sp(srb_t *sp)
251 QLA_VHA_MARK_NOT_BUSY(sp->vha);
252 qla2xxx_rel_qpair_sp(sp->qpair, sp);
255 static inline int
256 qla2x00_gid_list_size(struct qla_hw_data *ha)
258 if (IS_QLAFX00(ha))
259 return sizeof(uint32_t) * 32;
260 else
261 return sizeof(struct gid_list_info) * ha->max_fibre_devices;
264 static inline void
265 qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
267 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
268 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
269 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
270 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
271 complete(&ha->mbx_intr_comp);
275 static inline void
276 qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
278 u8 scope;
279 u16 qual;
280 #define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */
281 #define SQ_SCOPE_SHIFT 14
282 #define SQ_QUAL_MASK 0x3fff
284 #define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */
285 #define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
287 if (!sts_qual) /* Common case. */
288 return;
290 scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
291 /* Handle only scope 1 or 2, which is for I-T nexus. */
292 if (scope != 1 && scope != 2)
293 return;
295 /* Skip processing, if retry delay timer is already in effect. */
296 if (fcport->retry_delay_timestamp &&
297 time_before(jiffies, fcport->retry_delay_timestamp))
298 return;
300 qual = sts_qual & SQ_QUAL_MASK;
301 if (qual < 1 || qual > 0x3fef)
302 return;
303 qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
305 /* qual is expressed in 100ms increments. */
306 fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
308 ql_log(ql_log_warn, fcport->vha, 0x5101,
309 "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
310 fcport->port_name, sts_qual, qual * 100);
313 static inline bool
314 qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
316 if (qla_ini_mode_enabled(vha) &&
317 (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
318 return true;
319 else if (qla_tgt_mode_enabled(vha) &&
320 (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
321 return true;
322 else if (qla_dual_mode_enabled(vha) &&
323 ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
324 return true;
325 else
326 return false;
329 static inline void
330 qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
332 qpair->cpuid = cpuid;
334 if (!list_empty(&qpair->hints_list)) {
335 struct qla_qpair_hint *h;
337 list_for_each_entry(h, &qpair->hints_list, hint_elem)
338 h->cpuid = qpair->cpuid;
342 static inline struct qla_qpair_hint *
343 qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
345 struct qla_qpair_hint *h;
346 u16 i;
348 for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
349 h = &tgt->qphints[i];
350 if (h->qpair == qpair)
351 return h;
354 return NULL;
357 static inline void
358 qla_83xx_start_iocbs(struct qla_qpair *qpair)
360 struct req_que *req = qpair->req;
362 req->ring_index++;
363 if (req->ring_index == req->length) {
364 req->ring_index = 0;
365 req->ring_ptr = req->ring;
366 } else
367 req->ring_ptr++;
369 wrt_reg_dword(req->req_q_in, req->ring_index);
372 static inline int
373 qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
375 uint32_t data;
377 data =
378 ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
381 return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
384 enum {
385 RESOURCE_NONE,
386 RESOURCE_IOCB = BIT_0,
387 RESOURCE_EXCH = BIT_1, /* exchange */
388 RESOURCE_FORCE = BIT_2,
389 RESOURCE_HA = BIT_3,
392 static inline int
393 qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
395 u16 iocbs_used, i;
396 u16 exch_used;
397 struct qla_hw_data *ha = qp->hw;
399 if (!ql2xenforce_iocb_limit) {
400 iores->res_type = RESOURCE_NONE;
401 return 0;
403 if (iores->res_type & RESOURCE_FORCE)
404 goto force;
406 if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) {
407 /* no need to acquire qpair lock. It's just rough calculation */
408 iocbs_used = ha->base_qpair->fwres.iocbs_used;
409 for (i = 0; i < ha->max_qpairs; i++) {
410 if (ha->queue_pair_map[i])
411 iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
414 if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) {
415 iores->res_type = RESOURCE_NONE;
416 return -ENOSPC;
420 if (iores->res_type & RESOURCE_EXCH) {
421 exch_used = ha->base_qpair->fwres.exch_used;
422 for (i = 0; i < ha->max_qpairs; i++) {
423 if (ha->queue_pair_map[i])
424 exch_used += ha->queue_pair_map[i]->fwres.exch_used;
427 if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) {
428 iores->res_type = RESOURCE_NONE;
429 return -ENOSPC;
433 if (ql2xenforce_iocb_limit == 2) {
434 if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
435 ha->fwres.iocb_limit) {
436 iores->res_type = RESOURCE_NONE;
437 return -ENOSPC;
440 if (iores->res_type & RESOURCE_EXCH) {
441 if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
442 ha->fwres.exch_limit) {
443 iores->res_type = RESOURCE_NONE;
444 return -ENOSPC;
449 force:
450 qp->fwres.iocbs_used += iores->iocb_cnt;
451 qp->fwres.exch_used += iores->exch_cnt;
452 if (ql2xenforce_iocb_limit == 2) {
453 atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
454 atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
455 iores->res_type |= RESOURCE_HA;
457 return 0;
461 * decrement to zero. This routine will not decrement below zero
462 * @v: pointer of type atomic_t
463 * @amount: amount to decrement from v
465 static void qla_atomic_dtz(atomic_t *v, int amount)
467 int c, old, dec;
469 c = atomic_read(v);
470 for (;;) {
471 dec = c - amount;
472 if (unlikely(dec < 0))
473 dec = 0;
475 old = atomic_cmpxchg((v), c, dec);
476 if (likely(old == c))
477 break;
478 c = old;
482 static inline void
483 qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
485 struct qla_hw_data *ha = qp->hw;
487 if (iores->res_type & RESOURCE_HA) {
488 if (iores->res_type & RESOURCE_IOCB)
489 qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
491 if (iores->res_type & RESOURCE_EXCH)
492 qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
495 if (iores->res_type & RESOURCE_IOCB) {
496 if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
497 qp->fwres.iocbs_used -= iores->iocb_cnt;
498 } else {
499 /* should not happen */
500 qp->fwres.iocbs_used = 0;
504 if (iores->res_type & RESOURCE_EXCH) {
505 if (qp->fwres.exch_used >= iores->exch_cnt) {
506 qp->fwres.exch_used -= iores->exch_cnt;
507 } else {
508 /* should not happen */
509 qp->fwres.exch_used = 0;
512 iores->res_type = RESOURCE_NONE;
515 #define ISP_REG_DISCONNECT 0xffffffffU
516 /**************************************************************************
517 * qla2x00_isp_reg_stat
519 * Description:
520 * Read the host status register of ISP before aborting the command.
522 * Input:
523 * ha = pointer to host adapter structure.
526 * Returns:
527 * Either true or false.
529 * Note: Return true if there is register disconnect.
530 **************************************************************************/
531 static inline
532 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
534 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
535 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
537 if (IS_P3P_TYPE(ha))
538 return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
539 else
540 return ((rd_reg_dword(&reg->host_status)) ==
541 ISP_REG_DISCONNECT);
544 static inline
545 bool qla_pci_disconnected(struct scsi_qla_host *vha,
546 struct device_reg_24xx __iomem *reg)
548 uint32_t stat;
549 bool ret = false;
551 stat = rd_reg_dword(&reg->host_status);
552 if (stat == 0xffffffff) {
553 ql_log(ql_log_info, vha, 0x8041,
554 "detected PCI disconnect.\n");
555 qla_schedule_eeh_work(vha);
556 ret = true;
558 return ret;
561 static inline bool
562 fcport_is_smaller(fc_port_t *fcport)
564 if (wwn_to_u64(fcport->port_name) <
565 wwn_to_u64(fcport->vha->port_name))
566 return true;
567 else
568 return false;
571 static inline bool
572 fcport_is_bigger(fc_port_t *fcport)
574 return !fcport_is_smaller(fcport);
577 static inline struct qla_qpair *
578 qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
580 int cpuid = raw_smp_processor_id();
582 if (qpair->cpuid != cpuid &&
583 ha->qp_cpu_map[cpuid]) {
584 qpair = ha->qp_cpu_map[cpuid];
586 return qpair;
589 static inline void
590 qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
591 struct qla_msix_entry *msix,
592 struct qla_qpair *qpair)
594 const struct cpumask *mask;
595 unsigned int cpu;
597 if (!ha->qp_cpu_map)
598 return;
599 mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
600 if (!mask)
601 return;
602 qpair->cpuid = cpumask_first(mask);
603 for_each_cpu(cpu, mask) {
604 ha->qp_cpu_map[cpu] = qpair;
606 msix->cpuid = qpair->cpuid;
607 qpair->cpu_mapped = true;
610 static inline void
611 qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha)
613 if (ha->qp_cpu_map) {
614 kfree(ha->qp_cpu_map);
615 ha->qp_cpu_map = NULL;
619 static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
621 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
623 if (!ha->qp_cpu_map) {
624 ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *),
625 GFP_KERNEL);
626 if (!ha->qp_cpu_map) {
627 ql_log(ql_log_fatal, vha, 0x0180,
628 "Unable to allocate memory for qp_cpu_map ptrs.\n");
629 return -1;
632 return 0;
635 static inline bool val_is_in_range(u32 val, u32 start, u32 end)
637 if (val >= start && val <= end)
638 return true;
639 else
640 return false;