gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / scsi / cxlflash / main.c
blobfbd2ae40dab4ff9d7823adecf0e7432c41a4c43a
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * CXL Flash Device Driver
5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
6 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
8 * Copyright (C) 2015 IBM Corporation
9 */
11 #include <linux/delay.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
16 #include <asm/unaligned.h>
18 #include <scsi/scsi_cmnd.h>
19 #include <scsi/scsi_host.h>
20 #include <uapi/scsi/cxlflash_ioctl.h>
22 #include "main.h"
23 #include "sislite.h"
24 #include "common.h"
26 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
27 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
28 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
29 MODULE_LICENSE("GPL");
31 static struct class *cxlflash_class;
32 static u32 cxlflash_major;
33 static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
35 /**
36 * process_cmd_err() - command error handler
37 * @cmd: AFU command that experienced the error.
38 * @scp: SCSI command associated with the AFU command in error.
40 * Translates error bits from AFU command to SCSI command results.
42 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
44 struct afu *afu = cmd->parent;
45 struct cxlflash_cfg *cfg = afu->parent;
46 struct device *dev = &cfg->dev->dev;
47 struct sisl_ioasa *ioasa;
48 u32 resid;
50 if (unlikely(!cmd))
51 return;
53 ioasa = &(cmd->sa);
55 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
56 resid = ioasa->resid;
57 scsi_set_resid(scp, resid);
58 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
59 __func__, cmd, scp, resid);
62 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
63 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
64 __func__, cmd, scp);
65 scp->result = (DID_ERROR << 16);
68 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
69 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
70 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
71 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
73 if (ioasa->rc.scsi_rc) {
74 /* We have a SCSI status */
75 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
76 memcpy(scp->sense_buffer, ioasa->sense_data,
77 SISL_SENSE_DATA_LEN);
78 scp->result = ioasa->rc.scsi_rc;
79 } else
80 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
84 * We encountered an error. Set scp->result based on nature
85 * of error.
87 if (ioasa->rc.fc_rc) {
88 /* We have an FC status */
89 switch (ioasa->rc.fc_rc) {
90 case SISL_FC_RC_LINKDOWN:
91 scp->result = (DID_REQUEUE << 16);
92 break;
93 case SISL_FC_RC_RESID:
94 /* This indicates an FCP resid underrun */
95 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
96 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
97 * then we will handle this error else where.
98 * If not then we must handle it here.
99 * This is probably an AFU bug.
101 scp->result = (DID_ERROR << 16);
103 break;
104 case SISL_FC_RC_RESIDERR:
105 /* Resid mismatch between adapter and device */
106 case SISL_FC_RC_TGTABORT:
107 case SISL_FC_RC_ABORTOK:
108 case SISL_FC_RC_ABORTFAIL:
109 case SISL_FC_RC_NOLOGI:
110 case SISL_FC_RC_ABORTPEND:
111 case SISL_FC_RC_WRABORTPEND:
112 case SISL_FC_RC_NOEXP:
113 case SISL_FC_RC_INUSE:
114 scp->result = (DID_ERROR << 16);
115 break;
119 if (ioasa->rc.afu_rc) {
120 /* We have an AFU error */
121 switch (ioasa->rc.afu_rc) {
122 case SISL_AFU_RC_NO_CHANNELS:
123 scp->result = (DID_NO_CONNECT << 16);
124 break;
125 case SISL_AFU_RC_DATA_DMA_ERR:
126 switch (ioasa->afu_extra) {
127 case SISL_AFU_DMA_ERR_PAGE_IN:
128 /* Retry */
129 scp->result = (DID_IMM_RETRY << 16);
130 break;
131 case SISL_AFU_DMA_ERR_INVALID_EA:
132 default:
133 scp->result = (DID_ERROR << 16);
135 break;
136 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
137 /* Retry */
138 scp->result = (DID_ALLOC_FAILURE << 16);
139 break;
140 default:
141 scp->result = (DID_ERROR << 16);
147 * cmd_complete() - command completion handler
148 * @cmd: AFU command that has completed.
150 * For SCSI commands this routine prepares and submits commands that have
151 * either completed or timed out to the SCSI stack. For internal commands
152 * (TMF or AFU), this routine simply notifies the originator that the
153 * command has completed.
155 static void cmd_complete(struct afu_cmd *cmd)
157 struct scsi_cmnd *scp;
158 ulong lock_flags;
159 struct afu *afu = cmd->parent;
160 struct cxlflash_cfg *cfg = afu->parent;
161 struct device *dev = &cfg->dev->dev;
162 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
164 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
165 list_del(&cmd->list);
166 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
168 if (cmd->scp) {
169 scp = cmd->scp;
170 if (unlikely(cmd->sa.ioasc))
171 process_cmd_err(cmd, scp);
172 else
173 scp->result = (DID_OK << 16);
175 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
176 __func__, scp, scp->result, cmd->sa.ioasc);
177 scp->scsi_done(scp);
178 } else if (cmd->cmd_tmf) {
179 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
180 cfg->tmf_active = false;
181 wake_up_all_locked(&cfg->tmf_waitq);
182 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
183 } else
184 complete(&cmd->cevent);
188 * flush_pending_cmds() - flush all pending commands on this hardware queue
189 * @hwq: Hardware queue to flush.
191 * The hardware send queue lock associated with this hardware queue must be
192 * held when calling this routine.
194 static void flush_pending_cmds(struct hwq *hwq)
196 struct cxlflash_cfg *cfg = hwq->afu->parent;
197 struct afu_cmd *cmd, *tmp;
198 struct scsi_cmnd *scp;
199 ulong lock_flags;
201 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
202 /* Bypass command when on a doneq, cmd_complete() will handle */
203 if (!list_empty(&cmd->queue))
204 continue;
206 list_del(&cmd->list);
208 if (cmd->scp) {
209 scp = cmd->scp;
210 scp->result = (DID_IMM_RETRY << 16);
211 scp->scsi_done(scp);
212 } else {
213 cmd->cmd_aborted = true;
215 if (cmd->cmd_tmf) {
216 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
217 cfg->tmf_active = false;
218 wake_up_all_locked(&cfg->tmf_waitq);
219 spin_unlock_irqrestore(&cfg->tmf_slock,
220 lock_flags);
221 } else
222 complete(&cmd->cevent);
228 * context_reset() - reset context via specified register
229 * @hwq: Hardware queue owning the context to be reset.
230 * @reset_reg: MMIO register to perform reset.
232 * When the reset is successful, the SISLite specification guarantees that
233 * the AFU has aborted all currently pending I/O. Accordingly, these commands
234 * must be flushed.
236 * Return: 0 on success, -errno on failure
238 static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
240 struct cxlflash_cfg *cfg = hwq->afu->parent;
241 struct device *dev = &cfg->dev->dev;
242 int rc = -ETIMEDOUT;
243 int nretry = 0;
244 u64 val = 0x1;
245 ulong lock_flags;
247 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
249 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
251 writeq_be(val, reset_reg);
252 do {
253 val = readq_be(reset_reg);
254 if ((val & 0x1) == 0x0) {
255 rc = 0;
256 break;
259 /* Double delay each time */
260 udelay(1 << nretry);
261 } while (nretry++ < MC_ROOM_RETRY_CNT);
263 if (!rc)
264 flush_pending_cmds(hwq);
266 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
268 dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
269 __func__, rc, val, nretry);
270 return rc;
274 * context_reset_ioarrin() - reset context via IOARRIN register
275 * @hwq: Hardware queue owning the context to be reset.
277 * Return: 0 on success, -errno on failure
279 static int context_reset_ioarrin(struct hwq *hwq)
281 return context_reset(hwq, &hwq->host_map->ioarrin);
285 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
286 * @hwq: Hardware queue owning the context to be reset.
288 * Return: 0 on success, -errno on failure
290 static int context_reset_sq(struct hwq *hwq)
292 return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
296 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
297 * @afu: AFU associated with the host.
298 * @cmd: AFU command to send.
300 * Return:
301 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
303 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
305 struct cxlflash_cfg *cfg = afu->parent;
306 struct device *dev = &cfg->dev->dev;
307 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
308 int rc = 0;
309 s64 room;
310 ulong lock_flags;
313 * To avoid the performance penalty of MMIO, spread the update of
314 * 'room' over multiple commands.
316 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
317 if (--hwq->room < 0) {
318 room = readq_be(&hwq->host_map->cmd_room);
319 if (room <= 0) {
320 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
321 "0x%02X, room=0x%016llX\n",
322 __func__, cmd->rcb.cdb[0], room);
323 hwq->room = 0;
324 rc = SCSI_MLQUEUE_HOST_BUSY;
325 goto out;
327 hwq->room = room - 1;
330 list_add(&cmd->list, &hwq->pending_cmds);
331 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
332 out:
333 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
334 dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
335 __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
336 return rc;
340 * send_cmd_sq() - sends an AFU command via SQ ring
341 * @afu: AFU associated with the host.
342 * @cmd: AFU command to send.
344 * Return:
345 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
347 static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
349 struct cxlflash_cfg *cfg = afu->parent;
350 struct device *dev = &cfg->dev->dev;
351 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
352 int rc = 0;
353 int newval;
354 ulong lock_flags;
356 newval = atomic_dec_if_positive(&hwq->hsq_credits);
357 if (newval <= 0) {
358 rc = SCSI_MLQUEUE_HOST_BUSY;
359 goto out;
362 cmd->rcb.ioasa = &cmd->sa;
364 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
366 *hwq->hsq_curr = cmd->rcb;
367 if (hwq->hsq_curr < hwq->hsq_end)
368 hwq->hsq_curr++;
369 else
370 hwq->hsq_curr = hwq->hsq_start;
372 list_add(&cmd->list, &hwq->pending_cmds);
373 writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
375 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
376 out:
377 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
378 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
379 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
380 readq_be(&hwq->host_map->sq_head),
381 readq_be(&hwq->host_map->sq_tail));
382 return rc;
386 * wait_resp() - polls for a response or timeout to a sent AFU command
387 * @afu: AFU associated with the host.
388 * @cmd: AFU command that was sent.
390 * Return: 0 on success, -errno on failure
392 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
394 struct cxlflash_cfg *cfg = afu->parent;
395 struct device *dev = &cfg->dev->dev;
396 int rc = 0;
397 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
399 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
400 if (!timeout)
401 rc = -ETIMEDOUT;
403 if (cmd->cmd_aborted)
404 rc = -EAGAIN;
406 if (unlikely(cmd->sa.ioasc != 0)) {
407 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
408 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
409 rc = -EIO;
412 return rc;
416 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
417 * @host: SCSI host associated with device.
418 * @scp: SCSI command to send.
419 * @afu: SCSI command to send.
421 * Hashes a command based upon the hardware queue mode.
423 * Return: Trusted index of target hardware queue
425 static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
426 struct afu *afu)
428 u32 tag;
429 u32 hwq = 0;
431 if (afu->num_hwqs == 1)
432 return 0;
434 switch (afu->hwq_mode) {
435 case HWQ_MODE_RR:
436 hwq = afu->hwq_rr_count++ % afu->num_hwqs;
437 break;
438 case HWQ_MODE_TAG:
439 tag = blk_mq_unique_tag(scp->request);
440 hwq = blk_mq_unique_tag_to_hwq(tag);
441 break;
442 case HWQ_MODE_CPU:
443 hwq = smp_processor_id() % afu->num_hwqs;
444 break;
445 default:
446 WARN_ON_ONCE(1);
449 return hwq;
453 * send_tmf() - sends a Task Management Function (TMF)
454 * @cfg: Internal structure associated with the host.
455 * @sdev: SCSI device destined for TMF.
456 * @tmfcmd: TMF command to send.
458 * Return:
459 * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
461 static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
462 u64 tmfcmd)
464 struct afu *afu = cfg->afu;
465 struct afu_cmd *cmd = NULL;
466 struct device *dev = &cfg->dev->dev;
467 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
468 bool needs_deletion = false;
469 char *buf = NULL;
470 ulong lock_flags;
471 int rc = 0;
472 ulong to;
474 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
475 if (unlikely(!buf)) {
476 dev_err(dev, "%s: no memory for command\n", __func__);
477 rc = -ENOMEM;
478 goto out;
481 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
482 INIT_LIST_HEAD(&cmd->queue);
484 /* When Task Management Function is active do not send another */
485 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
486 if (cfg->tmf_active)
487 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
488 !cfg->tmf_active,
489 cfg->tmf_slock);
490 cfg->tmf_active = true;
491 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
493 cmd->parent = afu;
494 cmd->cmd_tmf = true;
495 cmd->hwq_index = hwq->index;
497 cmd->rcb.ctx_id = hwq->ctx_hndl;
498 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
499 cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
500 cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
501 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
502 SISL_REQ_FLAGS_SUP_UNDERRUN |
503 SISL_REQ_FLAGS_TMF_CMD);
504 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
506 rc = afu->send_cmd(afu, cmd);
507 if (unlikely(rc)) {
508 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
509 cfg->tmf_active = false;
510 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
511 goto out;
514 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
515 to = msecs_to_jiffies(5000);
516 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
517 !cfg->tmf_active,
518 cfg->tmf_slock,
519 to);
520 if (!to) {
521 dev_err(dev, "%s: TMF timed out\n", __func__);
522 rc = -ETIMEDOUT;
523 needs_deletion = true;
524 } else if (cmd->cmd_aborted) {
525 dev_err(dev, "%s: TMF aborted\n", __func__);
526 rc = -EAGAIN;
527 } else if (cmd->sa.ioasc) {
528 dev_err(dev, "%s: TMF failed ioasc=%08x\n",
529 __func__, cmd->sa.ioasc);
530 rc = -EIO;
532 cfg->tmf_active = false;
533 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
535 if (needs_deletion) {
536 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
537 list_del(&cmd->list);
538 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
540 out:
541 kfree(buf);
542 return rc;
546 * cxlflash_driver_info() - information handler for this host driver
547 * @host: SCSI host associated with device.
549 * Return: A string describing the device.
551 static const char *cxlflash_driver_info(struct Scsi_Host *host)
553 return CXLFLASH_ADAPTER_NAME;
557 * cxlflash_queuecommand() - sends a mid-layer request
558 * @host: SCSI host associated with device.
559 * @scp: SCSI command to send.
561 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
563 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
565 struct cxlflash_cfg *cfg = shost_priv(host);
566 struct afu *afu = cfg->afu;
567 struct device *dev = &cfg->dev->dev;
568 struct afu_cmd *cmd = sc_to_afuci(scp);
569 struct scatterlist *sg = scsi_sglist(scp);
570 int hwq_index = cmd_to_target_hwq(host, scp, afu);
571 struct hwq *hwq = get_hwq(afu, hwq_index);
572 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
573 ulong lock_flags;
574 int rc = 0;
576 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
577 "cdb=(%08x-%08x-%08x-%08x)\n",
578 __func__, scp, host->host_no, scp->device->channel,
579 scp->device->id, scp->device->lun,
580 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
581 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
582 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
583 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
586 * If a Task Management Function is active, wait for it to complete
587 * before continuing with regular commands.
589 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
590 if (cfg->tmf_active) {
591 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
592 rc = SCSI_MLQUEUE_HOST_BUSY;
593 goto out;
595 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
597 switch (cfg->state) {
598 case STATE_PROBING:
599 case STATE_PROBED:
600 case STATE_RESET:
601 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
602 rc = SCSI_MLQUEUE_HOST_BUSY;
603 goto out;
604 case STATE_FAILTERM:
605 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
606 scp->result = (DID_NO_CONNECT << 16);
607 scp->scsi_done(scp);
608 rc = 0;
609 goto out;
610 default:
611 atomic_inc(&afu->cmds_active);
612 break;
615 if (likely(sg)) {
616 cmd->rcb.data_len = sg->length;
617 cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
620 cmd->scp = scp;
621 cmd->parent = afu;
622 cmd->hwq_index = hwq_index;
624 cmd->sa.ioasc = 0;
625 cmd->rcb.ctx_id = hwq->ctx_hndl;
626 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
627 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
628 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
630 if (scp->sc_data_direction == DMA_TO_DEVICE)
631 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
633 cmd->rcb.req_flags = req_flags;
634 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
636 rc = afu->send_cmd(afu, cmd);
637 atomic_dec(&afu->cmds_active);
638 out:
639 return rc;
643 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
644 * @cfg: Internal structure associated with the host.
646 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
648 struct pci_dev *pdev = cfg->dev;
650 if (pci_channel_offline(pdev))
651 wait_event_timeout(cfg->reset_waitq,
652 !pci_channel_offline(pdev),
653 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
657 * free_mem() - free memory associated with the AFU
658 * @cfg: Internal structure associated with the host.
660 static void free_mem(struct cxlflash_cfg *cfg)
662 struct afu *afu = cfg->afu;
664 if (cfg->afu) {
665 free_pages((ulong)afu, get_order(sizeof(struct afu)));
666 cfg->afu = NULL;
671 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
672 * @cfg: Internal structure associated with the host.
674 static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
676 if (cfg->async_reset_cookie == 0)
677 return;
679 /* Wait until all async calls prior to this cookie have completed */
680 async_synchronize_cookie(cfg->async_reset_cookie + 1);
681 cfg->async_reset_cookie = 0;
685 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
686 * @cfg: Internal structure associated with the host.
688 * Safe to call with AFU in a partially allocated/initialized state.
690 * Cancels scheduled worker threads, waits for any active internal AFU
691 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
693 static void stop_afu(struct cxlflash_cfg *cfg)
695 struct afu *afu = cfg->afu;
696 struct hwq *hwq;
697 int i;
699 cancel_work_sync(&cfg->work_q);
700 if (!current_is_async())
701 cxlflash_reset_sync(cfg);
703 if (likely(afu)) {
704 while (atomic_read(&afu->cmds_active))
705 ssleep(1);
707 if (afu_is_irqpoll_enabled(afu)) {
708 for (i = 0; i < afu->num_hwqs; i++) {
709 hwq = get_hwq(afu, i);
711 irq_poll_disable(&hwq->irqpoll);
715 if (likely(afu->afu_map)) {
716 cfg->ops->psa_unmap(afu->afu_map);
717 afu->afu_map = NULL;
723 * term_intr() - disables all AFU interrupts
724 * @cfg: Internal structure associated with the host.
725 * @level: Depth of allocation, where to begin waterfall tear down.
726 * @index: Index of the hardware queue.
728 * Safe to call with AFU/MC in partially allocated/initialized state.
730 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
731 u32 index)
733 struct afu *afu = cfg->afu;
734 struct device *dev = &cfg->dev->dev;
735 struct hwq *hwq;
737 if (!afu) {
738 dev_err(dev, "%s: returning with NULL afu\n", __func__);
739 return;
742 hwq = get_hwq(afu, index);
744 if (!hwq->ctx_cookie) {
745 dev_err(dev, "%s: returning with NULL MC\n", __func__);
746 return;
749 switch (level) {
750 case UNMAP_THREE:
751 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
752 if (index == PRIMARY_HWQ)
753 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
754 /* fall through */
755 case UNMAP_TWO:
756 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
757 /* fall through */
758 case UNMAP_ONE:
759 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
760 /* fall through */
761 case FREE_IRQ:
762 cfg->ops->free_afu_irqs(hwq->ctx_cookie);
763 /* fall through */
764 case UNDO_NOOP:
765 /* No action required */
766 break;
771 * term_mc() - terminates the master context
772 * @cfg: Internal structure associated with the host.
773 * @index: Index of the hardware queue.
775 * Safe to call with AFU/MC in partially allocated/initialized state.
777 static void term_mc(struct cxlflash_cfg *cfg, u32 index)
779 struct afu *afu = cfg->afu;
780 struct device *dev = &cfg->dev->dev;
781 struct hwq *hwq;
782 ulong lock_flags;
784 if (!afu) {
785 dev_err(dev, "%s: returning with NULL afu\n", __func__);
786 return;
789 hwq = get_hwq(afu, index);
791 if (!hwq->ctx_cookie) {
792 dev_err(dev, "%s: returning with NULL MC\n", __func__);
793 return;
796 WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
797 if (index != PRIMARY_HWQ)
798 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
799 hwq->ctx_cookie = NULL;
801 spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
802 hwq->hrrq_online = false;
803 spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
805 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
806 flush_pending_cmds(hwq);
807 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
811 * term_afu() - terminates the AFU
812 * @cfg: Internal structure associated with the host.
814 * Safe to call with AFU/MC in partially allocated/initialized state.
816 static void term_afu(struct cxlflash_cfg *cfg)
818 struct device *dev = &cfg->dev->dev;
819 int k;
822 * Tear down is carefully orchestrated to ensure
823 * no interrupts can come in when the problem state
824 * area is unmapped.
826 * 1) Disable all AFU interrupts for each master
827 * 2) Unmap the problem state area
828 * 3) Stop each master context
830 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
831 term_intr(cfg, UNMAP_THREE, k);
833 stop_afu(cfg);
835 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
836 term_mc(cfg, k);
838 dev_dbg(dev, "%s: returning\n", __func__);
842 * notify_shutdown() - notifies device of pending shutdown
843 * @cfg: Internal structure associated with the host.
844 * @wait: Whether to wait for shutdown processing to complete.
846 * This function will notify the AFU that the adapter is being shutdown
847 * and will wait for shutdown processing to complete if wait is true.
848 * This notification should flush pending I/Os to the device and halt
849 * further I/Os until the next AFU reset is issued and device restarted.
851 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
853 struct afu *afu = cfg->afu;
854 struct device *dev = &cfg->dev->dev;
855 struct dev_dependent_vals *ddv;
856 __be64 __iomem *fc_port_regs;
857 u64 reg, status;
858 int i, retry_cnt = 0;
860 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
861 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
862 return;
864 if (!afu || !afu->afu_map) {
865 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
866 return;
869 /* Notify AFU */
870 for (i = 0; i < cfg->num_fc_ports; i++) {
871 fc_port_regs = get_fc_port_regs(cfg, i);
873 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
874 reg |= SISL_FC_SHUTDOWN_NORMAL;
875 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
878 if (!wait)
879 return;
881 /* Wait up to 1.5 seconds for shutdown processing to complete */
882 for (i = 0; i < cfg->num_fc_ports; i++) {
883 fc_port_regs = get_fc_port_regs(cfg, i);
884 retry_cnt = 0;
886 while (true) {
887 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
888 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
889 break;
890 if (++retry_cnt >= MC_RETRY_CNT) {
891 dev_dbg(dev, "%s: port %d shutdown processing "
892 "not yet completed\n", __func__, i);
893 break;
895 msleep(100 * retry_cnt);
901 * cxlflash_get_minor() - gets the first available minor number
903 * Return: Unique minor number that can be used to create the character device.
905 static int cxlflash_get_minor(void)
907 int minor;
908 long bit;
910 bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
911 if (bit >= CXLFLASH_MAX_ADAPTERS)
912 return -1;
914 minor = bit & MINORMASK;
915 set_bit(minor, cxlflash_minor);
916 return minor;
920 * cxlflash_put_minor() - releases the minor number
921 * @minor: Minor number that is no longer needed.
923 static void cxlflash_put_minor(int minor)
925 clear_bit(minor, cxlflash_minor);
929 * cxlflash_release_chrdev() - release the character device for the host
930 * @cfg: Internal structure associated with the host.
932 static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
934 device_unregister(cfg->chardev);
935 cfg->chardev = NULL;
936 cdev_del(&cfg->cdev);
937 cxlflash_put_minor(MINOR(cfg->cdev.dev));
941 * cxlflash_remove() - PCI entry point to tear down host
942 * @pdev: PCI device associated with the host.
944 * Safe to use as a cleanup in partially allocated/initialized state. Note that
945 * the reset_waitq is flushed as part of the stop/termination of user contexts.
947 static void cxlflash_remove(struct pci_dev *pdev)
949 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
950 struct device *dev = &pdev->dev;
951 ulong lock_flags;
953 if (!pci_is_enabled(pdev)) {
954 dev_dbg(dev, "%s: Device is disabled\n", __func__);
955 return;
958 /* Yield to running recovery threads before continuing with remove */
959 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
960 cfg->state != STATE_PROBING);
961 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
962 if (cfg->tmf_active)
963 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
964 !cfg->tmf_active,
965 cfg->tmf_slock);
966 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
968 /* Notify AFU and wait for shutdown processing to complete */
969 notify_shutdown(cfg, true);
971 cfg->state = STATE_FAILTERM;
972 cxlflash_stop_term_user_contexts(cfg);
974 switch (cfg->init_state) {
975 case INIT_STATE_CDEV:
976 cxlflash_release_chrdev(cfg);
977 /* fall through */
978 case INIT_STATE_SCSI:
979 cxlflash_term_local_luns(cfg);
980 scsi_remove_host(cfg->host);
981 /* fall through */
982 case INIT_STATE_AFU:
983 term_afu(cfg);
984 /* fall through */
985 case INIT_STATE_PCI:
986 cfg->ops->destroy_afu(cfg->afu_cookie);
987 pci_disable_device(pdev);
988 /* fall through */
989 case INIT_STATE_NONE:
990 free_mem(cfg);
991 scsi_host_put(cfg->host);
992 break;
995 dev_dbg(dev, "%s: returning\n", __func__);
999 * alloc_mem() - allocates the AFU and its command pool
1000 * @cfg: Internal structure associated with the host.
1002 * A partially allocated state remains on failure.
1004 * Return:
1005 * 0 on success
1006 * -ENOMEM on failure to allocate memory
1008 static int alloc_mem(struct cxlflash_cfg *cfg)
1010 int rc = 0;
1011 struct device *dev = &cfg->dev->dev;
1013 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1014 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1015 get_order(sizeof(struct afu)));
1016 if (unlikely(!cfg->afu)) {
1017 dev_err(dev, "%s: cannot get %d free pages\n",
1018 __func__, get_order(sizeof(struct afu)));
1019 rc = -ENOMEM;
1020 goto out;
1022 cfg->afu->parent = cfg;
1023 cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1024 cfg->afu->afu_map = NULL;
1025 out:
1026 return rc;
1030 * init_pci() - initializes the host as a PCI device
1031 * @cfg: Internal structure associated with the host.
1033 * Return: 0 on success, -errno on failure
1035 static int init_pci(struct cxlflash_cfg *cfg)
1037 struct pci_dev *pdev = cfg->dev;
1038 struct device *dev = &cfg->dev->dev;
1039 int rc = 0;
1041 rc = pci_enable_device(pdev);
1042 if (rc || pci_channel_offline(pdev)) {
1043 if (pci_channel_offline(pdev)) {
1044 cxlflash_wait_for_pci_err_recovery(cfg);
1045 rc = pci_enable_device(pdev);
1048 if (rc) {
1049 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1050 cxlflash_wait_for_pci_err_recovery(cfg);
1051 goto out;
1055 out:
1056 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1057 return rc;
1061 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1062 * @cfg: Internal structure associated with the host.
1064 * Return: 0 on success, -errno on failure
1066 static int init_scsi(struct cxlflash_cfg *cfg)
1068 struct pci_dev *pdev = cfg->dev;
1069 struct device *dev = &cfg->dev->dev;
1070 int rc = 0;
1072 rc = scsi_add_host(cfg->host, &pdev->dev);
1073 if (rc) {
1074 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1075 goto out;
1078 scsi_scan_host(cfg->host);
1080 out:
1081 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1082 return rc;
1086 * set_port_online() - transitions the specified host FC port to online state
1087 * @fc_regs: Top of MMIO region defined for specified port.
1089 * The provided MMIO region must be mapped prior to call. Online state means
1090 * that the FC link layer has synced, completed the handshaking process, and
1091 * is ready for login to start.
1093 static void set_port_online(__be64 __iomem *fc_regs)
1095 u64 cmdcfg;
1097 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1098 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1099 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
1100 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1104 * set_port_offline() - transitions the specified host FC port to offline state
1105 * @fc_regs: Top of MMIO region defined for specified port.
1107 * The provided MMIO region must be mapped prior to call.
1109 static void set_port_offline(__be64 __iomem *fc_regs)
1111 u64 cmdcfg;
1113 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1114 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
1115 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
1116 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1120 * wait_port_online() - waits for the specified host FC port come online
1121 * @fc_regs: Top of MMIO region defined for specified port.
1122 * @delay_us: Number of microseconds to delay between reading port status.
1123 * @nretry: Number of cycles to retry reading port status.
1125 * The provided MMIO region must be mapped prior to call. This will timeout
1126 * when the cable is not plugged in.
1128 * Return:
1129 * TRUE (1) when the specified port is online
1130 * FALSE (0) when the specified port fails to come online after timeout
1132 static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1134 u64 status;
1136 WARN_ON(delay_us < 1000);
1138 do {
1139 msleep(delay_us / 1000);
1140 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1141 if (status == U64_MAX)
1142 nretry /= 2;
1143 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1144 nretry--);
1146 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1150 * wait_port_offline() - waits for the specified host FC port go offline
1151 * @fc_regs: Top of MMIO region defined for specified port.
1152 * @delay_us: Number of microseconds to delay between reading port status.
1153 * @nretry: Number of cycles to retry reading port status.
1155 * The provided MMIO region must be mapped prior to call.
1157 * Return:
1158 * TRUE (1) when the specified port is offline
1159 * FALSE (0) when the specified port fails to go offline after timeout
1161 static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1163 u64 status;
1165 WARN_ON(delay_us < 1000);
1167 do {
1168 msleep(delay_us / 1000);
1169 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1170 if (status == U64_MAX)
1171 nretry /= 2;
1172 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1173 nretry--);
1175 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1179 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1180 * @afu: AFU associated with the host that owns the specified FC port.
1181 * @port: Port number being configured.
1182 * @fc_regs: Top of MMIO region defined for specified port.
1183 * @wwpn: The world-wide-port-number previously discovered for port.
1185 * The provided MMIO region must be mapped prior to call. As part of the
1186 * sequence to configure the WWPN, the port is toggled offline and then back
1187 * online. This toggling action can cause this routine to delay up to a few
1188 * seconds. When configured to use the internal LUN feature of the AFU, a
1189 * failure to come online is overridden.
1191 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1192 u64 wwpn)
1194 struct cxlflash_cfg *cfg = afu->parent;
1195 struct device *dev = &cfg->dev->dev;
1197 set_port_offline(fc_regs);
1198 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1199 FC_PORT_STATUS_RETRY_CNT)) {
1200 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1201 __func__, port);
1204 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1206 set_port_online(fc_regs);
1207 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1208 FC_PORT_STATUS_RETRY_CNT)) {
1209 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1210 __func__, port);
1215 * afu_link_reset() - resets the specified host FC port
1216 * @afu: AFU associated with the host that owns the specified FC port.
1217 * @port: Port number being configured.
1218 * @fc_regs: Top of MMIO region defined for specified port.
1220 * The provided MMIO region must be mapped prior to call. The sequence to
1221 * reset the port involves toggling it offline and then back online. This
1222 * action can cause this routine to delay up to a few seconds. An effort
1223 * is made to maintain link with the device by switching to host to use
1224 * the alternate port exclusively while the reset takes place.
1225 * failure to come online is overridden.
1227 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1229 struct cxlflash_cfg *cfg = afu->parent;
1230 struct device *dev = &cfg->dev->dev;
1231 u64 port_sel;
1233 /* first switch the AFU to the other links, if any */
1234 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1235 port_sel &= ~(1ULL << port);
1236 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1237 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1239 set_port_offline(fc_regs);
1240 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1241 FC_PORT_STATUS_RETRY_CNT))
1242 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1243 __func__, port);
1245 set_port_online(fc_regs);
1246 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1247 FC_PORT_STATUS_RETRY_CNT))
1248 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1249 __func__, port);
1251 /* switch back to include this port */
1252 port_sel |= (1ULL << port);
1253 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1254 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1256 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1260 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1261 * @afu: AFU associated with the host.
1263 static void afu_err_intr_init(struct afu *afu)
1265 struct cxlflash_cfg *cfg = afu->parent;
1266 __be64 __iomem *fc_port_regs;
1267 int i;
1268 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1269 u64 reg;
1271 /* global async interrupts: AFU clears afu_ctrl on context exit
1272 * if async interrupts were sent to that context. This prevents
1273 * the AFU form sending further async interrupts when
1274 * there is
1275 * nobody to receive them.
1278 /* mask all */
1279 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1280 /* set LISN# to send and point to primary master context */
1281 reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1283 if (afu->internal_lun)
1284 reg |= 1; /* Bit 63 indicates local lun */
1285 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1286 /* clear all */
1287 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1288 /* unmask bits that are of interest */
1289 /* note: afu can send an interrupt after this step */
1290 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1291 /* clear again in case a bit came on after previous clear but before */
1292 /* unmask */
1293 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1295 /* Clear/Set internal lun bits */
1296 fc_port_regs = get_fc_port_regs(cfg, 0);
1297 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1298 reg &= SISL_FC_INTERNAL_MASK;
1299 if (afu->internal_lun)
1300 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1301 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1303 /* now clear FC errors */
1304 for (i = 0; i < cfg->num_fc_ports; i++) {
1305 fc_port_regs = get_fc_port_regs(cfg, i);
1307 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1308 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1311 /* sync interrupts for master's IOARRIN write */
1312 /* note that unlike asyncs, there can be no pending sync interrupts */
1313 /* at this time (this is a fresh context and master has not written */
1314 /* IOARRIN yet), so there is nothing to clear. */
1316 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1317 for (i = 0; i < afu->num_hwqs; i++) {
1318 hwq = get_hwq(afu, i);
1320 reg = readq_be(&hwq->host_map->ctx_ctrl);
1321 WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1322 reg |= SISL_MSI_SYNC_ERROR;
1323 writeq_be(reg, &hwq->host_map->ctx_ctrl);
1324 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1329 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1330 * @irq: Interrupt number.
1331 * @data: Private data provided at interrupt registration, the AFU.
1333 * Return: Always return IRQ_HANDLED.
1335 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1337 struct hwq *hwq = (struct hwq *)data;
1338 struct cxlflash_cfg *cfg = hwq->afu->parent;
1339 struct device *dev = &cfg->dev->dev;
1340 u64 reg;
1341 u64 reg_unmasked;
1343 reg = readq_be(&hwq->host_map->intr_status);
1344 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1346 if (reg_unmasked == 0UL) {
1347 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1348 __func__, reg);
1349 goto cxlflash_sync_err_irq_exit;
1352 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1353 __func__, reg);
1355 writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1357 cxlflash_sync_err_irq_exit:
1358 return IRQ_HANDLED;
1362 * process_hrrq() - process the read-response queue
1363 * @afu: AFU associated with the host.
1364 * @doneq: Queue of commands harvested from the RRQ.
1365 * @budget: Threshold of RRQ entries to process.
1367 * This routine must be called holding the disabled RRQ spin lock.
1369 * Return: The number of entries processed.
1371 static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1373 struct afu *afu = hwq->afu;
1374 struct afu_cmd *cmd;
1375 struct sisl_ioasa *ioasa;
1376 struct sisl_ioarcb *ioarcb;
1377 bool toggle = hwq->toggle;
1378 int num_hrrq = 0;
1379 u64 entry,
1380 *hrrq_start = hwq->hrrq_start,
1381 *hrrq_end = hwq->hrrq_end,
1382 *hrrq_curr = hwq->hrrq_curr;
1384 /* Process ready RRQ entries up to the specified budget (if any) */
1385 while (true) {
1386 entry = *hrrq_curr;
1388 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1389 break;
1391 entry &= ~SISL_RESP_HANDLE_T_BIT;
1393 if (afu_is_sq_cmd_mode(afu)) {
1394 ioasa = (struct sisl_ioasa *)entry;
1395 cmd = container_of(ioasa, struct afu_cmd, sa);
1396 } else {
1397 ioarcb = (struct sisl_ioarcb *)entry;
1398 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1401 list_add_tail(&cmd->queue, doneq);
1403 /* Advance to next entry or wrap and flip the toggle bit */
1404 if (hrrq_curr < hrrq_end)
1405 hrrq_curr++;
1406 else {
1407 hrrq_curr = hrrq_start;
1408 toggle ^= SISL_RESP_HANDLE_T_BIT;
1411 atomic_inc(&hwq->hsq_credits);
1412 num_hrrq++;
1414 if (budget > 0 && num_hrrq >= budget)
1415 break;
1418 hwq->hrrq_curr = hrrq_curr;
1419 hwq->toggle = toggle;
1421 return num_hrrq;
1425 * process_cmd_doneq() - process a queue of harvested RRQ commands
1426 * @doneq: Queue of completed commands.
1428 * Note that upon return the queue can no longer be trusted.
1430 static void process_cmd_doneq(struct list_head *doneq)
1432 struct afu_cmd *cmd, *tmp;
1434 WARN_ON(list_empty(doneq));
1436 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1437 cmd_complete(cmd);
1441 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1442 * @irqpoll: IRQ poll structure associated with queue to poll.
1443 * @budget: Threshold of RRQ entries to process per poll.
1445 * Return: The number of entries processed.
1447 static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1449 struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1450 unsigned long hrrq_flags;
1451 LIST_HEAD(doneq);
1452 int num_entries = 0;
1454 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1456 num_entries = process_hrrq(hwq, &doneq, budget);
1457 if (num_entries < budget)
1458 irq_poll_complete(irqpoll);
1460 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1462 process_cmd_doneq(&doneq);
1463 return num_entries;
1467 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1468 * @irq: Interrupt number.
1469 * @data: Private data provided at interrupt registration, the AFU.
1471 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1473 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1475 struct hwq *hwq = (struct hwq *)data;
1476 struct afu *afu = hwq->afu;
1477 unsigned long hrrq_flags;
1478 LIST_HEAD(doneq);
1479 int num_entries = 0;
1481 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1483 /* Silently drop spurious interrupts when queue is not online */
1484 if (!hwq->hrrq_online) {
1485 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1486 return IRQ_HANDLED;
1489 if (afu_is_irqpoll_enabled(afu)) {
1490 irq_poll_sched(&hwq->irqpoll);
1491 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1492 return IRQ_HANDLED;
1495 num_entries = process_hrrq(hwq, &doneq, -1);
1496 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1498 if (num_entries == 0)
1499 return IRQ_NONE;
1501 process_cmd_doneq(&doneq);
1502 return IRQ_HANDLED;
1506 * Asynchronous interrupt information table
1508 * NOTE:
1509 * - Order matters here as this array is indexed by bit position.
1511 * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1512 * as complex and complains due to a lack of parentheses/braces.
1514 #define ASTATUS_FC(_a, _b, _c, _d) \
1515 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1517 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1518 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1519 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1520 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1521 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1522 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1523 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1524 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1525 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1527 static const struct asyc_intr_info ainfo[] = {
1528 BUILD_SISL_ASTATUS_FC_PORT(1),
1529 BUILD_SISL_ASTATUS_FC_PORT(0),
1530 BUILD_SISL_ASTATUS_FC_PORT(3),
1531 BUILD_SISL_ASTATUS_FC_PORT(2)
1535 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1536 * @irq: Interrupt number.
1537 * @data: Private data provided at interrupt registration, the AFU.
1539 * Return: Always return IRQ_HANDLED.
1541 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1543 struct hwq *hwq = (struct hwq *)data;
1544 struct afu *afu = hwq->afu;
1545 struct cxlflash_cfg *cfg = afu->parent;
1546 struct device *dev = &cfg->dev->dev;
1547 const struct asyc_intr_info *info;
1548 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1549 __be64 __iomem *fc_port_regs;
1550 u64 reg_unmasked;
1551 u64 reg;
1552 u64 bit;
1553 u8 port;
1555 reg = readq_be(&global->regs.aintr_status);
1556 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1558 if (unlikely(reg_unmasked == 0)) {
1559 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1560 __func__, reg);
1561 goto out;
1564 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1565 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1567 /* Check each bit that is on */
1568 for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1569 if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1570 WARN_ON_ONCE(1);
1571 continue;
1574 info = &ainfo[bit];
1575 if (unlikely(info->status != 1ULL << bit)) {
1576 WARN_ON_ONCE(1);
1577 continue;
1580 port = info->port;
1581 fc_port_regs = get_fc_port_regs(cfg, port);
1583 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1584 __func__, port, info->desc,
1585 readq_be(&fc_port_regs[FC_STATUS / 8]));
1588 * Do link reset first, some OTHER errors will set FC_ERROR
1589 * again if cleared before or w/o a reset
1591 if (info->action & LINK_RESET) {
1592 dev_err(dev, "%s: FC Port %d: resetting link\n",
1593 __func__, port);
1594 cfg->lr_state = LINK_RESET_REQUIRED;
1595 cfg->lr_port = port;
1596 schedule_work(&cfg->work_q);
1599 if (info->action & CLR_FC_ERROR) {
1600 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1603 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1604 * should be the same and tracing one is sufficient.
1607 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1608 __func__, port, reg);
1610 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1611 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1614 if (info->action & SCAN_HOST) {
1615 atomic_inc(&cfg->scan_host_needed);
1616 schedule_work(&cfg->work_q);
1620 out:
1621 return IRQ_HANDLED;
1625 * read_vpd() - obtains the WWPNs from VPD
1626 * @cfg: Internal structure associated with the host.
1627 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1629 * Return: 0 on success, -errno on failure
1631 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1633 struct device *dev = &cfg->dev->dev;
1634 struct pci_dev *pdev = cfg->dev;
1635 int rc = 0;
1636 int ro_start, ro_size, i, j, k;
1637 ssize_t vpd_size;
1638 char vpd_data[CXLFLASH_VPD_LEN];
1639 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1640 const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1641 cfg->dev_id->driver_data;
1642 const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1643 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1645 /* Get the VPD data from the device */
1646 vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1647 if (unlikely(vpd_size <= 0)) {
1648 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1649 __func__, vpd_size);
1650 rc = -ENODEV;
1651 goto out;
1654 /* Get the read only section offset */
1655 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1656 PCI_VPD_LRDT_RO_DATA);
1657 if (unlikely(ro_start < 0)) {
1658 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1659 rc = -ENODEV;
1660 goto out;
1663 /* Get the read only section size, cap when extends beyond read VPD */
1664 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1665 j = ro_size;
1666 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1667 if (unlikely((i + j) > vpd_size)) {
1668 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1669 __func__, (i + j), vpd_size);
1670 ro_size = vpd_size - i;
1674 * Find the offset of the WWPN tag within the read only
1675 * VPD data and validate the found field (partials are
1676 * no good to us). Convert the ASCII data to an integer
1677 * value. Note that we must copy to a temporary buffer
1678 * because the conversion service requires that the ASCII
1679 * string be terminated.
1681 * Allow for WWPN not being found for all devices, setting
1682 * the returned WWPN to zero when not found. Notify with a
1683 * log error for cards that should have had WWPN keywords
1684 * in the VPD - cards requiring WWPN will not have their
1685 * ports programmed and operate in an undefined state.
1687 for (k = 0; k < cfg->num_fc_ports; k++) {
1688 j = ro_size;
1689 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1691 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1692 if (i < 0) {
1693 if (wwpn_vpd_required)
1694 dev_err(dev, "%s: Port %d WWPN not found\n",
1695 __func__, k);
1696 wwpn[k] = 0ULL;
1697 continue;
1700 j = pci_vpd_info_field_size(&vpd_data[i]);
1701 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1702 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1703 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1704 __func__, k);
1705 rc = -ENODEV;
1706 goto out;
1709 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1710 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1711 if (unlikely(rc)) {
1712 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1713 __func__, k);
1714 rc = -ENODEV;
1715 goto out;
1718 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1721 out:
1722 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1723 return rc;
1727 * init_pcr() - initialize the provisioning and control registers
1728 * @cfg: Internal structure associated with the host.
1730 * Also sets up fast access to the mapped registers and initializes AFU
1731 * command fields that never change.
1733 static void init_pcr(struct cxlflash_cfg *cfg)
1735 struct afu *afu = cfg->afu;
1736 struct sisl_ctrl_map __iomem *ctrl_map;
1737 struct hwq *hwq;
1738 void *cookie;
1739 int i;
1741 for (i = 0; i < MAX_CONTEXT; i++) {
1742 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1743 /* Disrupt any clients that could be running */
1744 /* e.g. clients that survived a master restart */
1745 writeq_be(0, &ctrl_map->rht_start);
1746 writeq_be(0, &ctrl_map->rht_cnt_id);
1747 writeq_be(0, &ctrl_map->ctx_cap);
1750 /* Copy frequently used fields into hwq */
1751 for (i = 0; i < afu->num_hwqs; i++) {
1752 hwq = get_hwq(afu, i);
1753 cookie = hwq->ctx_cookie;
1755 hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1756 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1757 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1759 /* Program the Endian Control for the master context */
1760 writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1765 * init_global() - initialize AFU global registers
1766 * @cfg: Internal structure associated with the host.
1768 static int init_global(struct cxlflash_cfg *cfg)
1770 struct afu *afu = cfg->afu;
1771 struct device *dev = &cfg->dev->dev;
1772 struct hwq *hwq;
1773 struct sisl_host_map __iomem *hmap;
1774 __be64 __iomem *fc_port_regs;
1775 u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1776 int i = 0, num_ports = 0;
1777 int rc = 0;
1778 int j;
1779 void *ctx;
1780 u64 reg;
1782 rc = read_vpd(cfg, &wwpn[0]);
1783 if (rc) {
1784 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1785 goto out;
1788 /* Set up RRQ and SQ in HWQ for master issued cmds */
1789 for (i = 0; i < afu->num_hwqs; i++) {
1790 hwq = get_hwq(afu, i);
1791 hmap = hwq->host_map;
1793 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1794 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1795 hwq->hrrq_online = true;
1797 if (afu_is_sq_cmd_mode(afu)) {
1798 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1799 writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1803 /* AFU configuration */
1804 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1805 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1806 /* enable all auto retry options and control endianness */
1807 /* leave others at default: */
1808 /* CTX_CAP write protected, mbox_r does not clear on read and */
1809 /* checker on if dual afu */
1810 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1812 /* Global port select: select either port */
1813 if (afu->internal_lun) {
1814 /* Only use port 0 */
1815 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1816 num_ports = 0;
1817 } else {
1818 writeq_be(PORT_MASK(cfg->num_fc_ports),
1819 &afu->afu_map->global.regs.afu_port_sel);
1820 num_ports = cfg->num_fc_ports;
1823 for (i = 0; i < num_ports; i++) {
1824 fc_port_regs = get_fc_port_regs(cfg, i);
1826 /* Unmask all errors (but they are still masked at AFU) */
1827 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1828 /* Clear CRC error cnt & set a threshold */
1829 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1830 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1832 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1833 if (wwpn[i] != 0)
1834 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1835 /* Programming WWPN back to back causes additional
1836 * offline/online transitions and a PLOGI
1838 msleep(100);
1841 if (afu_is_ocxl_lisn(afu)) {
1842 /* Set up the LISN effective address for each master */
1843 for (i = 0; i < afu->num_hwqs; i++) {
1844 hwq = get_hwq(afu, i);
1845 ctx = hwq->ctx_cookie;
1847 for (j = 0; j < hwq->num_irqs; j++) {
1848 reg = cfg->ops->get_irq_objhndl(ctx, j);
1849 writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1852 reg = hwq->ctx_hndl;
1853 writeq_be(SISL_LISN_PASID(reg, reg),
1854 &hwq->ctrl_map->lisn_pasid[0]);
1855 writeq_be(SISL_LISN_PASID(0UL, reg),
1856 &hwq->ctrl_map->lisn_pasid[1]);
1860 /* Set up master's own CTX_CAP to allow real mode, host translation */
1861 /* tables, afu cmds and read/write GSCSI cmds. */
1862 /* First, unlock ctx_cap write by reading mbox */
1863 for (i = 0; i < afu->num_hwqs; i++) {
1864 hwq = get_hwq(afu, i);
1866 (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1867 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1868 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1869 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1870 &hwq->ctrl_map->ctx_cap);
1874 * Determine write-same unmap support for host by evaluating the unmap
1875 * sector support bit of the context control register associated with
1876 * the primary hardware queue. Note that while this status is reflected
1877 * in a context register, the outcome can be assumed to be host-wide.
1879 hwq = get_hwq(afu, PRIMARY_HWQ);
1880 reg = readq_be(&hwq->host_map->ctx_ctrl);
1881 if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1882 cfg->ws_unmap = true;
1884 /* Initialize heartbeat */
1885 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1886 out:
1887 return rc;
1891 * start_afu() - initializes and starts the AFU
1892 * @cfg: Internal structure associated with the host.
1894 static int start_afu(struct cxlflash_cfg *cfg)
1896 struct afu *afu = cfg->afu;
1897 struct device *dev = &cfg->dev->dev;
1898 struct hwq *hwq;
1899 int rc = 0;
1900 int i;
1902 init_pcr(cfg);
1904 /* Initialize each HWQ */
1905 for (i = 0; i < afu->num_hwqs; i++) {
1906 hwq = get_hwq(afu, i);
1908 /* After an AFU reset, RRQ entries are stale, clear them */
1909 memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1911 /* Initialize RRQ pointers */
1912 hwq->hrrq_start = &hwq->rrq_entry[0];
1913 hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1914 hwq->hrrq_curr = hwq->hrrq_start;
1915 hwq->toggle = 1;
1917 /* Initialize spin locks */
1918 spin_lock_init(&hwq->hrrq_slock);
1919 spin_lock_init(&hwq->hsq_slock);
1921 /* Initialize SQ */
1922 if (afu_is_sq_cmd_mode(afu)) {
1923 memset(&hwq->sq, 0, sizeof(hwq->sq));
1924 hwq->hsq_start = &hwq->sq[0];
1925 hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1926 hwq->hsq_curr = hwq->hsq_start;
1928 atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1931 /* Initialize IRQ poll */
1932 if (afu_is_irqpoll_enabled(afu))
1933 irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1934 cxlflash_irqpoll);
1938 rc = init_global(cfg);
1940 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1941 return rc;
1945 * init_intr() - setup interrupt handlers for the master context
1946 * @cfg: Internal structure associated with the host.
1947 * @hwq: Hardware queue to initialize.
1949 * Return: 0 on success, -errno on failure
1951 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1952 struct hwq *hwq)
1954 struct device *dev = &cfg->dev->dev;
1955 void *ctx = hwq->ctx_cookie;
1956 int rc = 0;
1957 enum undo_level level = UNDO_NOOP;
1958 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1959 int num_irqs = hwq->num_irqs;
1961 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1962 if (unlikely(rc)) {
1963 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1964 __func__, rc);
1965 level = UNDO_NOOP;
1966 goto out;
1969 rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1970 "SISL_MSI_SYNC_ERROR");
1971 if (unlikely(rc <= 0)) {
1972 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1973 level = FREE_IRQ;
1974 goto out;
1977 rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1978 "SISL_MSI_RRQ_UPDATED");
1979 if (unlikely(rc <= 0)) {
1980 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1981 level = UNMAP_ONE;
1982 goto out;
1985 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1986 if (!is_primary_hwq)
1987 goto out;
1989 rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1990 "SISL_MSI_ASYNC_ERROR");
1991 if (unlikely(rc <= 0)) {
1992 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1993 level = UNMAP_TWO;
1994 goto out;
1996 out:
1997 return level;
2001 * init_mc() - create and register as the master context
2002 * @cfg: Internal structure associated with the host.
2003 * index: HWQ Index of the master context.
2005 * Return: 0 on success, -errno on failure
2007 static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2009 void *ctx;
2010 struct device *dev = &cfg->dev->dev;
2011 struct hwq *hwq = get_hwq(cfg->afu, index);
2012 int rc = 0;
2013 int num_irqs;
2014 enum undo_level level;
2016 hwq->afu = cfg->afu;
2017 hwq->index = index;
2018 INIT_LIST_HEAD(&hwq->pending_cmds);
2020 if (index == PRIMARY_HWQ) {
2021 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2022 num_irqs = 3;
2023 } else {
2024 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2025 num_irqs = 2;
2027 if (IS_ERR_OR_NULL(ctx)) {
2028 rc = -ENOMEM;
2029 goto err1;
2032 WARN_ON(hwq->ctx_cookie);
2033 hwq->ctx_cookie = ctx;
2034 hwq->num_irqs = num_irqs;
2036 /* Set it up as a master with the CXL */
2037 cfg->ops->set_master(ctx);
2039 /* Reset AFU when initializing primary context */
2040 if (index == PRIMARY_HWQ) {
2041 rc = cfg->ops->afu_reset(ctx);
2042 if (unlikely(rc)) {
2043 dev_err(dev, "%s: AFU reset failed rc=%d\n",
2044 __func__, rc);
2045 goto err1;
2049 level = init_intr(cfg, hwq);
2050 if (unlikely(level)) {
2051 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2052 goto err2;
2055 /* Finally, activate the context by starting it */
2056 rc = cfg->ops->start_context(hwq->ctx_cookie);
2057 if (unlikely(rc)) {
2058 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2059 level = UNMAP_THREE;
2060 goto err2;
2063 out:
2064 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2065 return rc;
2066 err2:
2067 term_intr(cfg, level, index);
2068 if (index != PRIMARY_HWQ)
2069 cfg->ops->release_context(ctx);
2070 err1:
2071 hwq->ctx_cookie = NULL;
2072 goto out;
2076 * get_num_afu_ports() - determines and configures the number of AFU ports
2077 * @cfg: Internal structure associated with the host.
2079 * This routine determines the number of AFU ports by converting the global
2080 * port selection mask. The converted value is only valid following an AFU
2081 * reset (explicit or power-on). This routine must be invoked shortly after
2082 * mapping as other routines are dependent on the number of ports during the
2083 * initialization sequence.
2085 * To support legacy AFUs that might not have reflected an initial global
2086 * port mask (value read is 0), default to the number of ports originally
2087 * supported by the cxlflash driver (2) before hardware with other port
2088 * offerings was introduced.
2090 static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2092 struct afu *afu = cfg->afu;
2093 struct device *dev = &cfg->dev->dev;
2094 u64 port_mask;
2095 int num_fc_ports = LEGACY_FC_PORTS;
2097 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2098 if (port_mask != 0ULL)
2099 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2101 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2102 __func__, port_mask, num_fc_ports);
2104 cfg->num_fc_ports = num_fc_ports;
2105 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2109 * init_afu() - setup as master context and start AFU
2110 * @cfg: Internal structure associated with the host.
2112 * This routine is a higher level of control for configuring the
2113 * AFU on probe and reset paths.
2115 * Return: 0 on success, -errno on failure
2117 static int init_afu(struct cxlflash_cfg *cfg)
2119 u64 reg;
2120 int rc = 0;
2121 struct afu *afu = cfg->afu;
2122 struct device *dev = &cfg->dev->dev;
2123 struct hwq *hwq;
2124 int i;
2126 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2128 mutex_init(&afu->sync_active);
2129 afu->num_hwqs = afu->desired_hwqs;
2130 for (i = 0; i < afu->num_hwqs; i++) {
2131 rc = init_mc(cfg, i);
2132 if (rc) {
2133 dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2134 __func__, rc, i);
2135 goto err1;
2139 /* Map the entire MMIO space of the AFU using the first context */
2140 hwq = get_hwq(afu, PRIMARY_HWQ);
2141 afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2142 if (!afu->afu_map) {
2143 dev_err(dev, "%s: psa_map failed\n", __func__);
2144 rc = -ENOMEM;
2145 goto err1;
2148 /* No byte reverse on reading afu_version or string will be backwards */
2149 reg = readq(&afu->afu_map->global.regs.afu_version);
2150 memcpy(afu->version, &reg, sizeof(reg));
2151 afu->interface_version =
2152 readq_be(&afu->afu_map->global.regs.interface_version);
2153 if ((afu->interface_version + 1) == 0) {
2154 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2155 "interface version %016llx\n", afu->version,
2156 afu->interface_version);
2157 rc = -EINVAL;
2158 goto err1;
2161 if (afu_is_sq_cmd_mode(afu)) {
2162 afu->send_cmd = send_cmd_sq;
2163 afu->context_reset = context_reset_sq;
2164 } else {
2165 afu->send_cmd = send_cmd_ioarrin;
2166 afu->context_reset = context_reset_ioarrin;
2169 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2170 afu->version, afu->interface_version);
2172 get_num_afu_ports(cfg);
2174 rc = start_afu(cfg);
2175 if (rc) {
2176 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2177 goto err1;
2180 afu_err_intr_init(cfg->afu);
2181 for (i = 0; i < afu->num_hwqs; i++) {
2182 hwq = get_hwq(afu, i);
2184 hwq->room = readq_be(&hwq->host_map->cmd_room);
2187 /* Restore the LUN mappings */
2188 cxlflash_restore_luntable(cfg);
2189 out:
2190 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2191 return rc;
2193 err1:
2194 for (i = afu->num_hwqs - 1; i >= 0; i--) {
2195 term_intr(cfg, UNMAP_THREE, i);
2196 term_mc(cfg, i);
2198 goto out;
2202 * afu_reset() - resets the AFU
2203 * @cfg: Internal structure associated with the host.
2205 * Return: 0 on success, -errno on failure
2207 static int afu_reset(struct cxlflash_cfg *cfg)
2209 struct device *dev = &cfg->dev->dev;
2210 int rc = 0;
2212 /* Stop the context before the reset. Since the context is
2213 * no longer available restart it after the reset is complete
2215 term_afu(cfg);
2217 rc = init_afu(cfg);
2219 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2220 return rc;
2224 * drain_ioctls() - wait until all currently executing ioctls have completed
2225 * @cfg: Internal structure associated with the host.
2227 * Obtain write access to read/write semaphore that wraps ioctl
2228 * handling to 'drain' ioctls currently executing.
2230 static void drain_ioctls(struct cxlflash_cfg *cfg)
2232 down_write(&cfg->ioctl_rwsem);
2233 up_write(&cfg->ioctl_rwsem);
2237 * cxlflash_async_reset_host() - asynchronous host reset handler
2238 * @data: Private data provided while scheduling reset.
2239 * @cookie: Cookie that can be used for checkpointing.
2241 static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2243 struct cxlflash_cfg *cfg = data;
2244 struct device *dev = &cfg->dev->dev;
2245 int rc = 0;
2247 if (cfg->state != STATE_RESET) {
2248 dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2249 __func__, cfg->state);
2250 goto out;
2253 drain_ioctls(cfg);
2254 cxlflash_mark_contexts_error(cfg);
2255 rc = afu_reset(cfg);
2256 if (rc)
2257 cfg->state = STATE_FAILTERM;
2258 else
2259 cfg->state = STATE_NORMAL;
2260 wake_up_all(&cfg->reset_waitq);
2262 out:
2263 scsi_unblock_requests(cfg->host);
2267 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2268 * @cfg: Internal structure associated with the host.
2270 static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2272 struct device *dev = &cfg->dev->dev;
2274 if (cfg->state != STATE_NORMAL) {
2275 dev_dbg(dev, "%s: Not performing reset state=%d\n",
2276 __func__, cfg->state);
2277 return;
2280 cfg->state = STATE_RESET;
2281 scsi_block_requests(cfg->host);
2282 cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2283 cfg);
2287 * send_afu_cmd() - builds and sends an internal AFU command
2288 * @afu: AFU associated with the host.
2289 * @rcb: Pre-populated IOARCB describing command to send.
2291 * The AFU can only take one internal AFU command at a time. This limitation is
2292 * enforced by using a mutex to provide exclusive access to the AFU during the
2293 * operation. This design point requires calling threads to not be on interrupt
2294 * context due to the possibility of sleeping during concurrent AFU operations.
2296 * The command status is optionally passed back to the caller when the caller
2297 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2299 * Return:
2300 * 0 on success, -errno on failure
2302 static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2304 struct cxlflash_cfg *cfg = afu->parent;
2305 struct device *dev = &cfg->dev->dev;
2306 struct afu_cmd *cmd = NULL;
2307 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2308 ulong lock_flags;
2309 char *buf = NULL;
2310 int rc = 0;
2311 int nretry = 0;
2313 if (cfg->state != STATE_NORMAL) {
2314 dev_dbg(dev, "%s: Sync not required state=%u\n",
2315 __func__, cfg->state);
2316 return 0;
2319 mutex_lock(&afu->sync_active);
2320 atomic_inc(&afu->cmds_active);
2321 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2322 if (unlikely(!buf)) {
2323 dev_err(dev, "%s: no memory for command\n", __func__);
2324 rc = -ENOMEM;
2325 goto out;
2328 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2330 retry:
2331 memset(cmd, 0, sizeof(*cmd));
2332 memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2333 INIT_LIST_HEAD(&cmd->queue);
2334 init_completion(&cmd->cevent);
2335 cmd->parent = afu;
2336 cmd->hwq_index = hwq->index;
2337 cmd->rcb.ctx_id = hwq->ctx_hndl;
2339 dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2340 __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2342 rc = afu->send_cmd(afu, cmd);
2343 if (unlikely(rc)) {
2344 rc = -ENOBUFS;
2345 goto out;
2348 rc = wait_resp(afu, cmd);
2349 switch (rc) {
2350 case -ETIMEDOUT:
2351 rc = afu->context_reset(hwq);
2352 if (rc) {
2353 /* Delete the command from pending_cmds list */
2354 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2355 list_del(&cmd->list);
2356 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2358 cxlflash_schedule_async_reset(cfg);
2359 break;
2361 /* fall through - to retry */
2362 case -EAGAIN:
2363 if (++nretry < 2)
2364 goto retry;
2365 /* fall through - to exit */
2366 default:
2367 break;
2370 if (rcb->ioasa)
2371 *rcb->ioasa = cmd->sa;
2372 out:
2373 atomic_dec(&afu->cmds_active);
2374 mutex_unlock(&afu->sync_active);
2375 kfree(buf);
2376 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2377 return rc;
2381 * cxlflash_afu_sync() - builds and sends an AFU sync command
2382 * @afu: AFU associated with the host.
2383 * @ctx: Identifies context requesting sync.
2384 * @res: Identifies resource requesting sync.
2385 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2387 * AFU sync operations are only necessary and allowed when the device is
2388 * operating normally. When not operating normally, sync requests can occur as
2389 * part of cleaning up resources associated with an adapter prior to removal.
2390 * In this scenario, these requests are simply ignored (safe due to the AFU
2391 * going away).
2393 * Return:
2394 * 0 on success, -errno on failure
2396 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2398 struct cxlflash_cfg *cfg = afu->parent;
2399 struct device *dev = &cfg->dev->dev;
2400 struct sisl_ioarcb rcb = { 0 };
2402 dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2403 __func__, afu, ctx, res, mode);
2405 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2406 rcb.msi = SISL_MSI_RRQ_UPDATED;
2407 rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2409 rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2410 rcb.cdb[1] = mode;
2411 put_unaligned_be16(ctx, &rcb.cdb[2]);
2412 put_unaligned_be32(res, &rcb.cdb[4]);
2414 return send_afu_cmd(afu, &rcb);
2418 * cxlflash_eh_abort_handler() - abort a SCSI command
2419 * @scp: SCSI command to abort.
2421 * CXL Flash devices do not support a single command abort. Reset the context
2422 * as per SISLite specification. Flush any pending commands in the hardware
2423 * queue before the reset.
2425 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2427 static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2429 int rc = FAILED;
2430 struct Scsi_Host *host = scp->device->host;
2431 struct cxlflash_cfg *cfg = shost_priv(host);
2432 struct afu_cmd *cmd = sc_to_afuc(scp);
2433 struct device *dev = &cfg->dev->dev;
2434 struct afu *afu = cfg->afu;
2435 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2437 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2438 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2439 scp->device->channel, scp->device->id, scp->device->lun,
2440 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2441 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2442 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2443 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2445 /* When the state is not normal, another reset/reload is in progress.
2446 * Return failed and the mid-layer will invoke host reset handler.
2448 if (cfg->state != STATE_NORMAL) {
2449 dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2450 __func__, cfg->state);
2451 goto out;
2454 rc = afu->context_reset(hwq);
2455 if (unlikely(rc))
2456 goto out;
2458 rc = SUCCESS;
2460 out:
2461 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2462 return rc;
2466 * cxlflash_eh_device_reset_handler() - reset a single LUN
2467 * @scp: SCSI command to send.
2469 * Return:
2470 * SUCCESS as defined in scsi/scsi.h
2471 * FAILED as defined in scsi/scsi.h
2473 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2475 int rc = SUCCESS;
2476 struct scsi_device *sdev = scp->device;
2477 struct Scsi_Host *host = sdev->host;
2478 struct cxlflash_cfg *cfg = shost_priv(host);
2479 struct device *dev = &cfg->dev->dev;
2480 int rcr = 0;
2482 dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2483 host->host_no, sdev->channel, sdev->id, sdev->lun);
2484 retry:
2485 switch (cfg->state) {
2486 case STATE_NORMAL:
2487 rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2488 if (unlikely(rcr))
2489 rc = FAILED;
2490 break;
2491 case STATE_RESET:
2492 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2493 goto retry;
2494 default:
2495 rc = FAILED;
2496 break;
2499 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2500 return rc;
2504 * cxlflash_eh_host_reset_handler() - reset the host adapter
2505 * @scp: SCSI command from stack identifying host.
2507 * Following a reset, the state is evaluated again in case an EEH occurred
2508 * during the reset. In such a scenario, the host reset will either yield
2509 * until the EEH recovery is complete or return success or failure based
2510 * upon the current device state.
2512 * Return:
2513 * SUCCESS as defined in scsi/scsi.h
2514 * FAILED as defined in scsi/scsi.h
2516 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2518 int rc = SUCCESS;
2519 int rcr = 0;
2520 struct Scsi_Host *host = scp->device->host;
2521 struct cxlflash_cfg *cfg = shost_priv(host);
2522 struct device *dev = &cfg->dev->dev;
2524 dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2526 switch (cfg->state) {
2527 case STATE_NORMAL:
2528 cfg->state = STATE_RESET;
2529 drain_ioctls(cfg);
2530 cxlflash_mark_contexts_error(cfg);
2531 rcr = afu_reset(cfg);
2532 if (rcr) {
2533 rc = FAILED;
2534 cfg->state = STATE_FAILTERM;
2535 } else
2536 cfg->state = STATE_NORMAL;
2537 wake_up_all(&cfg->reset_waitq);
2538 ssleep(1);
2539 /* fall through */
2540 case STATE_RESET:
2541 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2542 if (cfg->state == STATE_NORMAL)
2543 break;
2544 /* fall through */
2545 default:
2546 rc = FAILED;
2547 break;
2550 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2551 return rc;
2555 * cxlflash_change_queue_depth() - change the queue depth for the device
2556 * @sdev: SCSI device destined for queue depth change.
2557 * @qdepth: Requested queue depth value to set.
2559 * The requested queue depth is capped to the maximum supported value.
2561 * Return: The actual queue depth set.
2563 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2566 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2567 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2569 scsi_change_queue_depth(sdev, qdepth);
2570 return sdev->queue_depth;
2574 * cxlflash_show_port_status() - queries and presents the current port status
2575 * @port: Desired port for status reporting.
2576 * @cfg: Internal structure associated with the host.
2577 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2579 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2581 static ssize_t cxlflash_show_port_status(u32 port,
2582 struct cxlflash_cfg *cfg,
2583 char *buf)
2585 struct device *dev = &cfg->dev->dev;
2586 char *disp_status;
2587 u64 status;
2588 __be64 __iomem *fc_port_regs;
2590 WARN_ON(port >= MAX_FC_PORTS);
2592 if (port >= cfg->num_fc_ports) {
2593 dev_info(dev, "%s: Port %d not supported on this card.\n",
2594 __func__, port);
2595 return -EINVAL;
2598 fc_port_regs = get_fc_port_regs(cfg, port);
2599 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2600 status &= FC_MTIP_STATUS_MASK;
2602 if (status == FC_MTIP_STATUS_ONLINE)
2603 disp_status = "online";
2604 else if (status == FC_MTIP_STATUS_OFFLINE)
2605 disp_status = "offline";
2606 else
2607 disp_status = "unknown";
2609 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2613 * port0_show() - queries and presents the current status of port 0
2614 * @dev: Generic device associated with the host owning the port.
2615 * @attr: Device attribute representing the port.
2616 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2618 * Return: The size of the ASCII string returned in @buf.
2620 static ssize_t port0_show(struct device *dev,
2621 struct device_attribute *attr,
2622 char *buf)
2624 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2626 return cxlflash_show_port_status(0, cfg, buf);
2630 * port1_show() - queries and presents the current status of port 1
2631 * @dev: Generic device associated with the host owning the port.
2632 * @attr: Device attribute representing the port.
2633 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2635 * Return: The size of the ASCII string returned in @buf.
2637 static ssize_t port1_show(struct device *dev,
2638 struct device_attribute *attr,
2639 char *buf)
2641 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2643 return cxlflash_show_port_status(1, cfg, buf);
2647 * port2_show() - queries and presents the current status of port 2
2648 * @dev: Generic device associated with the host owning the port.
2649 * @attr: Device attribute representing the port.
2650 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2652 * Return: The size of the ASCII string returned in @buf.
2654 static ssize_t port2_show(struct device *dev,
2655 struct device_attribute *attr,
2656 char *buf)
2658 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2660 return cxlflash_show_port_status(2, cfg, buf);
2664 * port3_show() - queries and presents the current status of port 3
2665 * @dev: Generic device associated with the host owning the port.
2666 * @attr: Device attribute representing the port.
2667 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2669 * Return: The size of the ASCII string returned in @buf.
2671 static ssize_t port3_show(struct device *dev,
2672 struct device_attribute *attr,
2673 char *buf)
2675 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2677 return cxlflash_show_port_status(3, cfg, buf);
2681 * lun_mode_show() - presents the current LUN mode of the host
2682 * @dev: Generic device associated with the host.
2683 * @attr: Device attribute representing the LUN mode.
2684 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2686 * Return: The size of the ASCII string returned in @buf.
2688 static ssize_t lun_mode_show(struct device *dev,
2689 struct device_attribute *attr, char *buf)
2691 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2692 struct afu *afu = cfg->afu;
2694 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2698 * lun_mode_store() - sets the LUN mode of the host
2699 * @dev: Generic device associated with the host.
2700 * @attr: Device attribute representing the LUN mode.
2701 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2702 * @count: Length of data resizing in @buf.
2704 * The CXL Flash AFU supports a dummy LUN mode where the external
2705 * links and storage are not required. Space on the FPGA is used
2706 * to create 1 or 2 small LUNs which are presented to the system
2707 * as if they were a normal storage device. This feature is useful
2708 * during development and also provides manufacturing with a way
2709 * to test the AFU without an actual device.
2711 * 0 = external LUN[s] (default)
2712 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2713 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2714 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2715 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2717 * Return: The size of the ASCII string returned in @buf.
2719 static ssize_t lun_mode_store(struct device *dev,
2720 struct device_attribute *attr,
2721 const char *buf, size_t count)
2723 struct Scsi_Host *shost = class_to_shost(dev);
2724 struct cxlflash_cfg *cfg = shost_priv(shost);
2725 struct afu *afu = cfg->afu;
2726 int rc;
2727 u32 lun_mode;
2729 rc = kstrtouint(buf, 10, &lun_mode);
2730 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2731 afu->internal_lun = lun_mode;
2734 * When configured for internal LUN, there is only one channel,
2735 * channel number 0, else there will be one less than the number
2736 * of fc ports for this card.
2738 if (afu->internal_lun)
2739 shost->max_channel = 0;
2740 else
2741 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2743 afu_reset(cfg);
2744 scsi_scan_host(cfg->host);
2747 return count;
2751 * ioctl_version_show() - presents the current ioctl version of the host
2752 * @dev: Generic device associated with the host.
2753 * @attr: Device attribute representing the ioctl version.
2754 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2756 * Return: The size of the ASCII string returned in @buf.
2758 static ssize_t ioctl_version_show(struct device *dev,
2759 struct device_attribute *attr, char *buf)
2761 ssize_t bytes = 0;
2763 bytes = scnprintf(buf, PAGE_SIZE,
2764 "disk: %u\n", DK_CXLFLASH_VERSION_0);
2765 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2766 "host: %u\n", HT_CXLFLASH_VERSION_0);
2768 return bytes;
2772 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2773 * @port: Desired port for status reporting.
2774 * @cfg: Internal structure associated with the host.
2775 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2777 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2779 static ssize_t cxlflash_show_port_lun_table(u32 port,
2780 struct cxlflash_cfg *cfg,
2781 char *buf)
2783 struct device *dev = &cfg->dev->dev;
2784 __be64 __iomem *fc_port_luns;
2785 int i;
2786 ssize_t bytes = 0;
2788 WARN_ON(port >= MAX_FC_PORTS);
2790 if (port >= cfg->num_fc_ports) {
2791 dev_info(dev, "%s: Port %d not supported on this card.\n",
2792 __func__, port);
2793 return -EINVAL;
2796 fc_port_luns = get_fc_port_luns(cfg, port);
2798 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2799 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2800 "%03d: %016llx\n",
2801 i, readq_be(&fc_port_luns[i]));
2802 return bytes;
2806 * port0_lun_table_show() - presents the current LUN table of port 0
2807 * @dev: Generic device associated with the host owning the port.
2808 * @attr: Device attribute representing the port.
2809 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2811 * Return: The size of the ASCII string returned in @buf.
2813 static ssize_t port0_lun_table_show(struct device *dev,
2814 struct device_attribute *attr,
2815 char *buf)
2817 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2819 return cxlflash_show_port_lun_table(0, cfg, buf);
2823 * port1_lun_table_show() - presents the current LUN table of port 1
2824 * @dev: Generic device associated with the host owning the port.
2825 * @attr: Device attribute representing the port.
2826 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2828 * Return: The size of the ASCII string returned in @buf.
2830 static ssize_t port1_lun_table_show(struct device *dev,
2831 struct device_attribute *attr,
2832 char *buf)
2834 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2836 return cxlflash_show_port_lun_table(1, cfg, buf);
2840 * port2_lun_table_show() - presents the current LUN table of port 2
2841 * @dev: Generic device associated with the host owning the port.
2842 * @attr: Device attribute representing the port.
2843 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2845 * Return: The size of the ASCII string returned in @buf.
2847 static ssize_t port2_lun_table_show(struct device *dev,
2848 struct device_attribute *attr,
2849 char *buf)
2851 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2853 return cxlflash_show_port_lun_table(2, cfg, buf);
2857 * port3_lun_table_show() - presents the current LUN table of port 3
2858 * @dev: Generic device associated with the host owning the port.
2859 * @attr: Device attribute representing the port.
2860 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2862 * Return: The size of the ASCII string returned in @buf.
2864 static ssize_t port3_lun_table_show(struct device *dev,
2865 struct device_attribute *attr,
2866 char *buf)
2868 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2870 return cxlflash_show_port_lun_table(3, cfg, buf);
2874 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2875 * @dev: Generic device associated with the host.
2876 * @attr: Device attribute representing the IRQ poll weight.
2877 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2878 * weight in ASCII.
2880 * An IRQ poll weight of 0 indicates polling is disabled.
2882 * Return: The size of the ASCII string returned in @buf.
2884 static ssize_t irqpoll_weight_show(struct device *dev,
2885 struct device_attribute *attr, char *buf)
2887 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2888 struct afu *afu = cfg->afu;
2890 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2894 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2895 * @dev: Generic device associated with the host.
2896 * @attr: Device attribute representing the IRQ poll weight.
2897 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2898 * weight in ASCII.
2899 * @count: Length of data resizing in @buf.
2901 * An IRQ poll weight of 0 indicates polling is disabled.
2903 * Return: The size of the ASCII string returned in @buf.
2905 static ssize_t irqpoll_weight_store(struct device *dev,
2906 struct device_attribute *attr,
2907 const char *buf, size_t count)
2909 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2910 struct device *cfgdev = &cfg->dev->dev;
2911 struct afu *afu = cfg->afu;
2912 struct hwq *hwq;
2913 u32 weight;
2914 int rc, i;
2916 rc = kstrtouint(buf, 10, &weight);
2917 if (rc)
2918 return -EINVAL;
2920 if (weight > 256) {
2921 dev_info(cfgdev,
2922 "Invalid IRQ poll weight. It must be 256 or less.\n");
2923 return -EINVAL;
2926 if (weight == afu->irqpoll_weight) {
2927 dev_info(cfgdev,
2928 "Current IRQ poll weight has the same weight.\n");
2929 return -EINVAL;
2932 if (afu_is_irqpoll_enabled(afu)) {
2933 for (i = 0; i < afu->num_hwqs; i++) {
2934 hwq = get_hwq(afu, i);
2936 irq_poll_disable(&hwq->irqpoll);
2940 afu->irqpoll_weight = weight;
2942 if (weight > 0) {
2943 for (i = 0; i < afu->num_hwqs; i++) {
2944 hwq = get_hwq(afu, i);
2946 irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2950 return count;
2954 * num_hwqs_show() - presents the number of hardware queues for the host
2955 * @dev: Generic device associated with the host.
2956 * @attr: Device attribute representing the number of hardware queues.
2957 * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
2958 * queues in ASCII.
2960 * Return: The size of the ASCII string returned in @buf.
2962 static ssize_t num_hwqs_show(struct device *dev,
2963 struct device_attribute *attr, char *buf)
2965 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2966 struct afu *afu = cfg->afu;
2968 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2972 * num_hwqs_store() - sets the number of hardware queues for the host
2973 * @dev: Generic device associated with the host.
2974 * @attr: Device attribute representing the number of hardware queues.
2975 * @buf: Buffer of length PAGE_SIZE containing the number of hardware
2976 * queues in ASCII.
2977 * @count: Length of data resizing in @buf.
2979 * n > 0: num_hwqs = n
2980 * n = 0: num_hwqs = num_online_cpus()
2981 * n < 0: num_online_cpus() / abs(n)
2983 * Return: The size of the ASCII string returned in @buf.
2985 static ssize_t num_hwqs_store(struct device *dev,
2986 struct device_attribute *attr,
2987 const char *buf, size_t count)
2989 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2990 struct afu *afu = cfg->afu;
2991 int rc;
2992 int nhwqs, num_hwqs;
2994 rc = kstrtoint(buf, 10, &nhwqs);
2995 if (rc)
2996 return -EINVAL;
2998 if (nhwqs >= 1)
2999 num_hwqs = nhwqs;
3000 else if (nhwqs == 0)
3001 num_hwqs = num_online_cpus();
3002 else
3003 num_hwqs = num_online_cpus() / abs(nhwqs);
3005 afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3006 WARN_ON_ONCE(afu->desired_hwqs == 0);
3008 retry:
3009 switch (cfg->state) {
3010 case STATE_NORMAL:
3011 cfg->state = STATE_RESET;
3012 drain_ioctls(cfg);
3013 cxlflash_mark_contexts_error(cfg);
3014 rc = afu_reset(cfg);
3015 if (rc)
3016 cfg->state = STATE_FAILTERM;
3017 else
3018 cfg->state = STATE_NORMAL;
3019 wake_up_all(&cfg->reset_waitq);
3020 break;
3021 case STATE_RESET:
3022 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3023 if (cfg->state == STATE_NORMAL)
3024 goto retry;
3025 /* else, fall through */
3026 default:
3027 /* Ideally should not happen */
3028 dev_err(dev, "%s: Device is not ready, state=%d\n",
3029 __func__, cfg->state);
3030 break;
3033 return count;
3036 static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3039 * hwq_mode_show() - presents the HWQ steering mode for the host
3040 * @dev: Generic device associated with the host.
3041 * @attr: Device attribute representing the HWQ steering mode.
3042 * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
3043 * as a character string.
3045 * Return: The size of the ASCII string returned in @buf.
3047 static ssize_t hwq_mode_show(struct device *dev,
3048 struct device_attribute *attr, char *buf)
3050 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3051 struct afu *afu = cfg->afu;
3053 return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3057 * hwq_mode_store() - sets the HWQ steering mode for the host
3058 * @dev: Generic device associated with the host.
3059 * @attr: Device attribute representing the HWQ steering mode.
3060 * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
3061 * as a character string.
3062 * @count: Length of data resizing in @buf.
3064 * rr = Round-Robin
3065 * tag = Block MQ Tagging
3066 * cpu = CPU Affinity
3068 * Return: The size of the ASCII string returned in @buf.
3070 static ssize_t hwq_mode_store(struct device *dev,
3071 struct device_attribute *attr,
3072 const char *buf, size_t count)
3074 struct Scsi_Host *shost = class_to_shost(dev);
3075 struct cxlflash_cfg *cfg = shost_priv(shost);
3076 struct device *cfgdev = &cfg->dev->dev;
3077 struct afu *afu = cfg->afu;
3078 int i;
3079 u32 mode = MAX_HWQ_MODE;
3081 for (i = 0; i < MAX_HWQ_MODE; i++) {
3082 if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3083 mode = i;
3084 break;
3088 if (mode >= MAX_HWQ_MODE) {
3089 dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3090 return -EINVAL;
3093 afu->hwq_mode = mode;
3095 return count;
3099 * mode_show() - presents the current mode of the device
3100 * @dev: Generic device associated with the device.
3101 * @attr: Device attribute representing the device mode.
3102 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3104 * Return: The size of the ASCII string returned in @buf.
3106 static ssize_t mode_show(struct device *dev,
3107 struct device_attribute *attr, char *buf)
3109 struct scsi_device *sdev = to_scsi_device(dev);
3111 return scnprintf(buf, PAGE_SIZE, "%s\n",
3112 sdev->hostdata ? "superpipe" : "legacy");
3116 * Host attributes
3118 static DEVICE_ATTR_RO(port0);
3119 static DEVICE_ATTR_RO(port1);
3120 static DEVICE_ATTR_RO(port2);
3121 static DEVICE_ATTR_RO(port3);
3122 static DEVICE_ATTR_RW(lun_mode);
3123 static DEVICE_ATTR_RO(ioctl_version);
3124 static DEVICE_ATTR_RO(port0_lun_table);
3125 static DEVICE_ATTR_RO(port1_lun_table);
3126 static DEVICE_ATTR_RO(port2_lun_table);
3127 static DEVICE_ATTR_RO(port3_lun_table);
3128 static DEVICE_ATTR_RW(irqpoll_weight);
3129 static DEVICE_ATTR_RW(num_hwqs);
3130 static DEVICE_ATTR_RW(hwq_mode);
3132 static struct device_attribute *cxlflash_host_attrs[] = {
3133 &dev_attr_port0,
3134 &dev_attr_port1,
3135 &dev_attr_port2,
3136 &dev_attr_port3,
3137 &dev_attr_lun_mode,
3138 &dev_attr_ioctl_version,
3139 &dev_attr_port0_lun_table,
3140 &dev_attr_port1_lun_table,
3141 &dev_attr_port2_lun_table,
3142 &dev_attr_port3_lun_table,
3143 &dev_attr_irqpoll_weight,
3144 &dev_attr_num_hwqs,
3145 &dev_attr_hwq_mode,
3146 NULL
3150 * Device attributes
3152 static DEVICE_ATTR_RO(mode);
3154 static struct device_attribute *cxlflash_dev_attrs[] = {
3155 &dev_attr_mode,
3156 NULL
3160 * Host template
3162 static struct scsi_host_template driver_template = {
3163 .module = THIS_MODULE,
3164 .name = CXLFLASH_ADAPTER_NAME,
3165 .info = cxlflash_driver_info,
3166 .ioctl = cxlflash_ioctl,
3167 .proc_name = CXLFLASH_NAME,
3168 .queuecommand = cxlflash_queuecommand,
3169 .eh_abort_handler = cxlflash_eh_abort_handler,
3170 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3171 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3172 .change_queue_depth = cxlflash_change_queue_depth,
3173 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3174 .can_queue = CXLFLASH_MAX_CMDS,
3175 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3176 .this_id = -1,
3177 .sg_tablesize = 1, /* No scatter gather support */
3178 .max_sectors = CXLFLASH_MAX_SECTORS,
3179 .shost_attrs = cxlflash_host_attrs,
3180 .sdev_attrs = cxlflash_dev_attrs,
3184 * Device dependent values
3186 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3187 CXLFLASH_WWPN_VPD_REQUIRED };
3188 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3189 CXLFLASH_NOTIFY_SHUTDOWN };
3190 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3191 (CXLFLASH_NOTIFY_SHUTDOWN |
3192 CXLFLASH_OCXL_DEV) };
3195 * PCI device binding table
3197 static struct pci_device_id cxlflash_pci_table[] = {
3198 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3200 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3202 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3207 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3210 * cxlflash_worker_thread() - work thread handler for the AFU
3211 * @work: Work structure contained within cxlflash associated with host.
3213 * Handles the following events:
3214 * - Link reset which cannot be performed on interrupt context due to
3215 * blocking up to a few seconds
3216 * - Rescan the host
3218 static void cxlflash_worker_thread(struct work_struct *work)
3220 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3221 work_q);
3222 struct afu *afu = cfg->afu;
3223 struct device *dev = &cfg->dev->dev;
3224 __be64 __iomem *fc_port_regs;
3225 int port;
3226 ulong lock_flags;
3228 /* Avoid MMIO if the device has failed */
3230 if (cfg->state != STATE_NORMAL)
3231 return;
3233 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3235 if (cfg->lr_state == LINK_RESET_REQUIRED) {
3236 port = cfg->lr_port;
3237 if (port < 0)
3238 dev_err(dev, "%s: invalid port index %d\n",
3239 __func__, port);
3240 else {
3241 spin_unlock_irqrestore(cfg->host->host_lock,
3242 lock_flags);
3244 /* The reset can block... */
3245 fc_port_regs = get_fc_port_regs(cfg, port);
3246 afu_link_reset(afu, port, fc_port_regs);
3247 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3250 cfg->lr_state = LINK_RESET_COMPLETE;
3253 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3255 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3256 scsi_scan_host(cfg->host);
3260 * cxlflash_chr_open() - character device open handler
3261 * @inode: Device inode associated with this character device.
3262 * @file: File pointer for this device.
3264 * Only users with admin privileges are allowed to open the character device.
3266 * Return: 0 on success, -errno on failure
3268 static int cxlflash_chr_open(struct inode *inode, struct file *file)
3270 struct cxlflash_cfg *cfg;
3272 if (!capable(CAP_SYS_ADMIN))
3273 return -EACCES;
3275 cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3276 file->private_data = cfg;
3278 return 0;
3282 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3283 * @cmd: The host ioctl command to decode.
3285 * Return: A string identifying the decoded host ioctl.
3287 static char *decode_hioctl(unsigned int cmd)
3289 switch (cmd) {
3290 case HT_CXLFLASH_LUN_PROVISION:
3291 return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3294 return "UNKNOWN";
3298 * cxlflash_lun_provision() - host LUN provisioning handler
3299 * @cfg: Internal structure associated with the host.
3300 * @arg: Kernel copy of userspace ioctl data structure.
3302 * Return: 0 on success, -errno on failure
3304 static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3305 struct ht_cxlflash_lun_provision *lunprov)
3307 struct afu *afu = cfg->afu;
3308 struct device *dev = &cfg->dev->dev;
3309 struct sisl_ioarcb rcb;
3310 struct sisl_ioasa asa;
3311 __be64 __iomem *fc_port_regs;
3312 u16 port = lunprov->port;
3313 u16 scmd = lunprov->hdr.subcmd;
3314 u16 type;
3315 u64 reg;
3316 u64 size;
3317 u64 lun_id;
3318 int rc = 0;
3320 if (!afu_is_lun_provision(afu)) {
3321 rc = -ENOTSUPP;
3322 goto out;
3325 if (port >= cfg->num_fc_ports) {
3326 rc = -EINVAL;
3327 goto out;
3330 switch (scmd) {
3331 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3332 type = SISL_AFU_LUN_PROVISION_CREATE;
3333 size = lunprov->size;
3334 lun_id = 0;
3335 break;
3336 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3337 type = SISL_AFU_LUN_PROVISION_DELETE;
3338 size = 0;
3339 lun_id = lunprov->lun_id;
3340 break;
3341 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3342 fc_port_regs = get_fc_port_regs(cfg, port);
3344 reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3345 lunprov->max_num_luns = reg;
3346 reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3347 lunprov->cur_num_luns = reg;
3348 reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3349 lunprov->max_cap_port = reg;
3350 reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3351 lunprov->cur_cap_port = reg;
3353 goto out;
3354 default:
3355 rc = -EINVAL;
3356 goto out;
3359 memset(&rcb, 0, sizeof(rcb));
3360 memset(&asa, 0, sizeof(asa));
3361 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3362 rcb.lun_id = lun_id;
3363 rcb.msi = SISL_MSI_RRQ_UPDATED;
3364 rcb.timeout = MC_LUN_PROV_TIMEOUT;
3365 rcb.ioasa = &asa;
3367 rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3368 rcb.cdb[1] = type;
3369 rcb.cdb[2] = port;
3370 put_unaligned_be64(size, &rcb.cdb[8]);
3372 rc = send_afu_cmd(afu, &rcb);
3373 if (rc) {
3374 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3375 __func__, rc, asa.ioasc, asa.afu_extra);
3376 goto out;
3379 if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3380 lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3381 memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3383 out:
3384 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3385 return rc;
3389 * cxlflash_afu_debug() - host AFU debug handler
3390 * @cfg: Internal structure associated with the host.
3391 * @arg: Kernel copy of userspace ioctl data structure.
3393 * For debug requests requiring a data buffer, always provide an aligned
3394 * (cache line) buffer to the AFU to appease any alignment requirements.
3396 * Return: 0 on success, -errno on failure
3398 static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3399 struct ht_cxlflash_afu_debug *afu_dbg)
3401 struct afu *afu = cfg->afu;
3402 struct device *dev = &cfg->dev->dev;
3403 struct sisl_ioarcb rcb;
3404 struct sisl_ioasa asa;
3405 char *buf = NULL;
3406 char *kbuf = NULL;
3407 void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3408 u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3409 u32 ulen = afu_dbg->data_len;
3410 bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3411 int rc = 0;
3413 if (!afu_is_afu_debug(afu)) {
3414 rc = -ENOTSUPP;
3415 goto out;
3418 if (ulen) {
3419 req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3421 if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3422 rc = -EINVAL;
3423 goto out;
3426 buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3427 if (unlikely(!buf)) {
3428 rc = -ENOMEM;
3429 goto out;
3432 kbuf = PTR_ALIGN(buf, cache_line_size());
3434 if (is_write) {
3435 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3437 if (copy_from_user(kbuf, ubuf, ulen)) {
3438 rc = -EFAULT;
3439 goto out;
3444 memset(&rcb, 0, sizeof(rcb));
3445 memset(&asa, 0, sizeof(asa));
3447 rcb.req_flags = req_flags;
3448 rcb.msi = SISL_MSI_RRQ_UPDATED;
3449 rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3450 rcb.ioasa = &asa;
3452 if (ulen) {
3453 rcb.data_len = ulen;
3454 rcb.data_ea = (uintptr_t)kbuf;
3457 rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3458 memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3459 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3461 rc = send_afu_cmd(afu, &rcb);
3462 if (rc) {
3463 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3464 __func__, rc, asa.ioasc, asa.afu_extra);
3465 goto out;
3468 if (ulen && !is_write) {
3469 if (copy_to_user(ubuf, kbuf, ulen))
3470 rc = -EFAULT;
3472 out:
3473 kfree(buf);
3474 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3475 return rc;
3479 * cxlflash_chr_ioctl() - character device IOCTL handler
3480 * @file: File pointer for this device.
3481 * @cmd: IOCTL command.
3482 * @arg: Userspace ioctl data structure.
3484 * A read/write semaphore is used to implement a 'drain' of currently
3485 * running ioctls. The read semaphore is taken at the beginning of each
3486 * ioctl thread and released upon concluding execution. Additionally the
3487 * semaphore should be released and then reacquired in any ioctl execution
3488 * path which will wait for an event to occur that is outside the scope of
3489 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3490 * a thread simply needs to acquire the write semaphore.
3492 * Return: 0 on success, -errno on failure
3494 static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3495 unsigned long arg)
3497 typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3499 struct cxlflash_cfg *cfg = file->private_data;
3500 struct device *dev = &cfg->dev->dev;
3501 char buf[sizeof(union cxlflash_ht_ioctls)];
3502 void __user *uarg = (void __user *)arg;
3503 struct ht_cxlflash_hdr *hdr;
3504 size_t size = 0;
3505 bool known_ioctl = false;
3506 int idx = 0;
3507 int rc = 0;
3508 hioctl do_ioctl = NULL;
3510 static const struct {
3511 size_t size;
3512 hioctl ioctl;
3513 } ioctl_tbl[] = { /* NOTE: order matters here */
3514 { sizeof(struct ht_cxlflash_lun_provision),
3515 (hioctl)cxlflash_lun_provision },
3516 { sizeof(struct ht_cxlflash_afu_debug),
3517 (hioctl)cxlflash_afu_debug },
3520 /* Hold read semaphore so we can drain if needed */
3521 down_read(&cfg->ioctl_rwsem);
3523 dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3524 __func__, cmd, idx, sizeof(ioctl_tbl));
3526 switch (cmd) {
3527 case HT_CXLFLASH_LUN_PROVISION:
3528 case HT_CXLFLASH_AFU_DEBUG:
3529 known_ioctl = true;
3530 idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3531 size = ioctl_tbl[idx].size;
3532 do_ioctl = ioctl_tbl[idx].ioctl;
3534 if (likely(do_ioctl))
3535 break;
3537 /* fall through */
3538 default:
3539 rc = -EINVAL;
3540 goto out;
3543 if (unlikely(copy_from_user(&buf, uarg, size))) {
3544 dev_err(dev, "%s: copy_from_user() fail "
3545 "size=%lu cmd=%d (%s) uarg=%p\n",
3546 __func__, size, cmd, decode_hioctl(cmd), uarg);
3547 rc = -EFAULT;
3548 goto out;
3551 hdr = (struct ht_cxlflash_hdr *)&buf;
3552 if (hdr->version != HT_CXLFLASH_VERSION_0) {
3553 dev_dbg(dev, "%s: Version %u not supported for %s\n",
3554 __func__, hdr->version, decode_hioctl(cmd));
3555 rc = -EINVAL;
3556 goto out;
3559 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3560 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3561 rc = -EINVAL;
3562 goto out;
3565 rc = do_ioctl(cfg, (void *)&buf);
3566 if (likely(!rc))
3567 if (unlikely(copy_to_user(uarg, &buf, size))) {
3568 dev_err(dev, "%s: copy_to_user() fail "
3569 "size=%lu cmd=%d (%s) uarg=%p\n",
3570 __func__, size, cmd, decode_hioctl(cmd), uarg);
3571 rc = -EFAULT;
3574 /* fall through to exit */
3576 out:
3577 up_read(&cfg->ioctl_rwsem);
3578 if (unlikely(rc && known_ioctl))
3579 dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3580 __func__, decode_hioctl(cmd), cmd, rc);
3581 else
3582 dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3583 __func__, decode_hioctl(cmd), cmd, rc);
3584 return rc;
3588 * Character device file operations
3590 static const struct file_operations cxlflash_chr_fops = {
3591 .owner = THIS_MODULE,
3592 .open = cxlflash_chr_open,
3593 .unlocked_ioctl = cxlflash_chr_ioctl,
3594 .compat_ioctl = compat_ptr_ioctl,
3598 * init_chrdev() - initialize the character device for the host
3599 * @cfg: Internal structure associated with the host.
3601 * Return: 0 on success, -errno on failure
3603 static int init_chrdev(struct cxlflash_cfg *cfg)
3605 struct device *dev = &cfg->dev->dev;
3606 struct device *char_dev;
3607 dev_t devno;
3608 int minor;
3609 int rc = 0;
3611 minor = cxlflash_get_minor();
3612 if (unlikely(minor < 0)) {
3613 dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3614 rc = -ENOSPC;
3615 goto out;
3618 devno = MKDEV(cxlflash_major, minor);
3619 cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3621 rc = cdev_add(&cfg->cdev, devno, 1);
3622 if (rc) {
3623 dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3624 goto err1;
3627 char_dev = device_create(cxlflash_class, NULL, devno,
3628 NULL, "cxlflash%d", minor);
3629 if (IS_ERR(char_dev)) {
3630 rc = PTR_ERR(char_dev);
3631 dev_err(dev, "%s: device_create failed rc=%d\n",
3632 __func__, rc);
3633 goto err2;
3636 cfg->chardev = char_dev;
3637 out:
3638 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3639 return rc;
3640 err2:
3641 cdev_del(&cfg->cdev);
3642 err1:
3643 cxlflash_put_minor(minor);
3644 goto out;
3648 * cxlflash_probe() - PCI entry point to add host
3649 * @pdev: PCI device associated with the host.
3650 * @dev_id: PCI device id associated with device.
3652 * The device will initially start out in a 'probing' state and
3653 * transition to the 'normal' state at the end of a successful
3654 * probe. Should an EEH event occur during probe, the notification
3655 * thread (error_detected()) will wait until the probe handler
3656 * is nearly complete. At that time, the device will be moved to
3657 * a 'probed' state and the EEH thread woken up to drive the slot
3658 * reset and recovery (device moves to 'normal' state). Meanwhile,
3659 * the probe will be allowed to exit successfully.
3661 * Return: 0 on success, -errno on failure
3663 static int cxlflash_probe(struct pci_dev *pdev,
3664 const struct pci_device_id *dev_id)
3666 struct Scsi_Host *host;
3667 struct cxlflash_cfg *cfg = NULL;
3668 struct device *dev = &pdev->dev;
3669 struct dev_dependent_vals *ddv;
3670 int rc = 0;
3671 int k;
3673 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3674 __func__, pdev->irq);
3676 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3677 driver_template.max_sectors = ddv->max_sectors;
3679 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3680 if (!host) {
3681 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3682 rc = -ENOMEM;
3683 goto out;
3686 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3687 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3688 host->unique_id = host->host_no;
3689 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3691 cfg = shost_priv(host);
3692 cfg->state = STATE_PROBING;
3693 cfg->host = host;
3694 rc = alloc_mem(cfg);
3695 if (rc) {
3696 dev_err(dev, "%s: alloc_mem failed\n", __func__);
3697 rc = -ENOMEM;
3698 scsi_host_put(cfg->host);
3699 goto out;
3702 cfg->init_state = INIT_STATE_NONE;
3703 cfg->dev = pdev;
3704 cfg->cxl_fops = cxlflash_cxl_fops;
3705 cfg->ops = cxlflash_assign_ops(ddv);
3706 WARN_ON_ONCE(!cfg->ops);
3709 * Promoted LUNs move to the top of the LUN table. The rest stay on
3710 * the bottom half. The bottom half grows from the end (index = 255),
3711 * whereas the top half grows from the beginning (index = 0).
3713 * Initialize the last LUN index for all possible ports.
3715 cfg->promote_lun_index = 0;
3717 for (k = 0; k < MAX_FC_PORTS; k++)
3718 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3720 cfg->dev_id = (struct pci_device_id *)dev_id;
3722 init_waitqueue_head(&cfg->tmf_waitq);
3723 init_waitqueue_head(&cfg->reset_waitq);
3725 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3726 cfg->lr_state = LINK_RESET_INVALID;
3727 cfg->lr_port = -1;
3728 spin_lock_init(&cfg->tmf_slock);
3729 mutex_init(&cfg->ctx_tbl_list_mutex);
3730 mutex_init(&cfg->ctx_recovery_mutex);
3731 init_rwsem(&cfg->ioctl_rwsem);
3732 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3733 INIT_LIST_HEAD(&cfg->lluns);
3735 pci_set_drvdata(pdev, cfg);
3737 rc = init_pci(cfg);
3738 if (rc) {
3739 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3740 goto out_remove;
3742 cfg->init_state = INIT_STATE_PCI;
3744 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3745 if (unlikely(!cfg->afu_cookie)) {
3746 dev_err(dev, "%s: create_afu failed\n", __func__);
3747 goto out_remove;
3750 rc = init_afu(cfg);
3751 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3752 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3753 goto out_remove;
3755 cfg->init_state = INIT_STATE_AFU;
3757 rc = init_scsi(cfg);
3758 if (rc) {
3759 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3760 goto out_remove;
3762 cfg->init_state = INIT_STATE_SCSI;
3764 rc = init_chrdev(cfg);
3765 if (rc) {
3766 dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3767 goto out_remove;
3769 cfg->init_state = INIT_STATE_CDEV;
3771 if (wq_has_sleeper(&cfg->reset_waitq)) {
3772 cfg->state = STATE_PROBED;
3773 wake_up_all(&cfg->reset_waitq);
3774 } else
3775 cfg->state = STATE_NORMAL;
3776 out:
3777 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3778 return rc;
3780 out_remove:
3781 cfg->state = STATE_PROBED;
3782 cxlflash_remove(pdev);
3783 goto out;
3787 * cxlflash_pci_error_detected() - called when a PCI error is detected
3788 * @pdev: PCI device struct.
3789 * @state: PCI channel state.
3791 * When an EEH occurs during an active reset, wait until the reset is
3792 * complete and then take action based upon the device state.
3794 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3796 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3797 pci_channel_state_t state)
3799 int rc = 0;
3800 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3801 struct device *dev = &cfg->dev->dev;
3803 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3805 switch (state) {
3806 case pci_channel_io_frozen:
3807 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3808 cfg->state != STATE_PROBING);
3809 if (cfg->state == STATE_FAILTERM)
3810 return PCI_ERS_RESULT_DISCONNECT;
3812 cfg->state = STATE_RESET;
3813 scsi_block_requests(cfg->host);
3814 drain_ioctls(cfg);
3815 rc = cxlflash_mark_contexts_error(cfg);
3816 if (unlikely(rc))
3817 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3818 __func__, rc);
3819 term_afu(cfg);
3820 return PCI_ERS_RESULT_NEED_RESET;
3821 case pci_channel_io_perm_failure:
3822 cfg->state = STATE_FAILTERM;
3823 wake_up_all(&cfg->reset_waitq);
3824 scsi_unblock_requests(cfg->host);
3825 return PCI_ERS_RESULT_DISCONNECT;
3826 default:
3827 break;
3829 return PCI_ERS_RESULT_NEED_RESET;
3833 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3834 * @pdev: PCI device struct.
3836 * This routine is called by the pci error recovery code after the PCI
3837 * slot has been reset, just before we should resume normal operations.
3839 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3841 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3843 int rc = 0;
3844 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3845 struct device *dev = &cfg->dev->dev;
3847 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3849 rc = init_afu(cfg);
3850 if (unlikely(rc)) {
3851 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3852 return PCI_ERS_RESULT_DISCONNECT;
3855 return PCI_ERS_RESULT_RECOVERED;
3859 * cxlflash_pci_resume() - called when normal operation can resume
3860 * @pdev: PCI device struct
3862 static void cxlflash_pci_resume(struct pci_dev *pdev)
3864 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3865 struct device *dev = &cfg->dev->dev;
3867 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3869 cfg->state = STATE_NORMAL;
3870 wake_up_all(&cfg->reset_waitq);
3871 scsi_unblock_requests(cfg->host);
3875 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3876 * @dev: Character device.
3877 * @mode: Mode that can be used to verify access.
3879 * Return: Allocated string describing the devtmpfs structure.
3881 static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3883 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3887 * cxlflash_class_init() - create character device class
3889 * Return: 0 on success, -errno on failure
3891 static int cxlflash_class_init(void)
3893 dev_t devno;
3894 int rc = 0;
3896 rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3897 if (unlikely(rc)) {
3898 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3899 goto out;
3902 cxlflash_major = MAJOR(devno);
3904 cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3905 if (IS_ERR(cxlflash_class)) {
3906 rc = PTR_ERR(cxlflash_class);
3907 pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3908 goto err;
3911 cxlflash_class->devnode = cxlflash_devnode;
3912 out:
3913 pr_debug("%s: returning rc=%d\n", __func__, rc);
3914 return rc;
3915 err:
3916 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3917 goto out;
3921 * cxlflash_class_exit() - destroy character device class
3923 static void cxlflash_class_exit(void)
3925 dev_t devno = MKDEV(cxlflash_major, 0);
3927 class_destroy(cxlflash_class);
3928 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3931 static const struct pci_error_handlers cxlflash_err_handler = {
3932 .error_detected = cxlflash_pci_error_detected,
3933 .slot_reset = cxlflash_pci_slot_reset,
3934 .resume = cxlflash_pci_resume,
3938 * PCI device structure
3940 static struct pci_driver cxlflash_driver = {
3941 .name = CXLFLASH_NAME,
3942 .id_table = cxlflash_pci_table,
3943 .probe = cxlflash_probe,
3944 .remove = cxlflash_remove,
3945 .shutdown = cxlflash_remove,
3946 .err_handler = &cxlflash_err_handler,
3950 * init_cxlflash() - module entry point
3952 * Return: 0 on success, -errno on failure
3954 static int __init init_cxlflash(void)
3956 int rc;
3958 check_sizes();
3959 cxlflash_list_init();
3960 rc = cxlflash_class_init();
3961 if (unlikely(rc))
3962 goto out;
3964 rc = pci_register_driver(&cxlflash_driver);
3965 if (unlikely(rc))
3966 goto err;
3967 out:
3968 pr_debug("%s: returning rc=%d\n", __func__, rc);
3969 return rc;
3970 err:
3971 cxlflash_class_exit();
3972 goto out;
3976 * exit_cxlflash() - module exit point
3978 static void __exit exit_cxlflash(void)
3980 cxlflash_term_global_luns();
3981 cxlflash_free_errpage();
3983 pci_unregister_driver(&cxlflash_driver);
3984 cxlflash_class_exit();
3987 module_init(init_cxlflash);
3988 module_exit(exit_cxlflash);