irqchip/s3c24xx: Mark init_eint as __maybe_unused
[linux/fpc-iii.git] / drivers / scsi / cxlflash / main.c
blob1e5bf0ca81da1043a3be9d752271baf9c0a85836
1 /*
2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
20 #include <asm/unaligned.h>
22 #include <misc/cxl.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
37 /**
38 * cmd_checkout() - checks out an AFU command
39 * @afu: AFU to checkout from.
41 * Commands are checked out in a round-robin fashion. Note that since
42 * the command pool is larger than the hardware queue, the majority of
43 * times we will only loop once or twice before getting a command. The
44 * buffer and CDB within the command are initialized (zeroed) prior to
45 * returning.
47 * Return: The checked out command or NULL when command pool is empty.
49 static struct afu_cmd *cmd_checkout(struct afu *afu)
51 int k, dec = CXLFLASH_NUM_CMDS;
52 struct afu_cmd *cmd;
54 while (dec--) {
55 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
57 cmd = &afu->cmd[k];
59 if (!atomic_dec_if_positive(&cmd->free)) {
60 pr_devel("%s: returning found index=%d cmd=%p\n",
61 __func__, cmd->slot, cmd);
62 memset(cmd->buf, 0, CMD_BUFSIZE);
63 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
64 return cmd;
68 return NULL;
71 /**
72 * cmd_checkin() - checks in an AFU command
73 * @cmd: AFU command to checkin.
75 * Safe to pass commands that have already been checked in. Several
76 * internal tracking fields are reset as part of the checkin. Note
77 * that these are intentionally reset prior to toggling the free bit
78 * to avoid clobbering values in the event that the command is checked
79 * out right away.
81 static void cmd_checkin(struct afu_cmd *cmd)
83 cmd->rcb.scp = NULL;
84 cmd->rcb.timeout = 0;
85 cmd->sa.ioasc = 0;
86 cmd->cmd_tmf = false;
87 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
89 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
90 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
91 __func__, cmd->slot);
92 return;
95 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
98 /**
99 * process_cmd_err() - command error handler
100 * @cmd: AFU command that experienced the error.
101 * @scp: SCSI command associated with the AFU command in error.
103 * Translates error bits from AFU command to SCSI command results.
105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
107 struct sisl_ioarcb *ioarcb;
108 struct sisl_ioasa *ioasa;
109 u32 resid;
111 if (unlikely(!cmd))
112 return;
114 ioarcb = &(cmd->rcb);
115 ioasa = &(cmd->sa);
117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 resid = ioasa->resid;
119 scsi_set_resid(scp, resid);
120 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
121 __func__, cmd, scp, resid);
124 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
125 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
126 __func__, cmd, scp);
127 scp->result = (DID_ERROR << 16);
130 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
131 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
132 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
133 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
134 ioasa->fc_extra);
136 if (ioasa->rc.scsi_rc) {
137 /* We have a SCSI status */
138 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
139 memcpy(scp->sense_buffer, ioasa->sense_data,
140 SISL_SENSE_DATA_LEN);
141 scp->result = ioasa->rc.scsi_rc;
142 } else
143 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
147 * We encountered an error. Set scp->result based on nature
148 * of error.
150 if (ioasa->rc.fc_rc) {
151 /* We have an FC status */
152 switch (ioasa->rc.fc_rc) {
153 case SISL_FC_RC_LINKDOWN:
154 scp->result = (DID_REQUEUE << 16);
155 break;
156 case SISL_FC_RC_RESID:
157 /* This indicates an FCP resid underrun */
158 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
159 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
160 * then we will handle this error else where.
161 * If not then we must handle it here.
162 * This is probably an AFU bug.
164 scp->result = (DID_ERROR << 16);
166 break;
167 case SISL_FC_RC_RESIDERR:
168 /* Resid mismatch between adapter and device */
169 case SISL_FC_RC_TGTABORT:
170 case SISL_FC_RC_ABORTOK:
171 case SISL_FC_RC_ABORTFAIL:
172 case SISL_FC_RC_NOLOGI:
173 case SISL_FC_RC_ABORTPEND:
174 case SISL_FC_RC_WRABORTPEND:
175 case SISL_FC_RC_NOEXP:
176 case SISL_FC_RC_INUSE:
177 scp->result = (DID_ERROR << 16);
178 break;
182 if (ioasa->rc.afu_rc) {
183 /* We have an AFU error */
184 switch (ioasa->rc.afu_rc) {
185 case SISL_AFU_RC_NO_CHANNELS:
186 scp->result = (DID_NO_CONNECT << 16);
187 break;
188 case SISL_AFU_RC_DATA_DMA_ERR:
189 switch (ioasa->afu_extra) {
190 case SISL_AFU_DMA_ERR_PAGE_IN:
191 /* Retry */
192 scp->result = (DID_IMM_RETRY << 16);
193 break;
194 case SISL_AFU_DMA_ERR_INVALID_EA:
195 default:
196 scp->result = (DID_ERROR << 16);
198 break;
199 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 /* Retry */
201 scp->result = (DID_ALLOC_FAILURE << 16);
202 break;
203 default:
204 scp->result = (DID_ERROR << 16);
210 * cmd_complete() - command completion handler
211 * @cmd: AFU command that has completed.
213 * Prepares and submits command that has either completed or timed out to
214 * the SCSI stack. Checks AFU command back into command pool for non-internal
215 * (rcb.scp populated) commands.
217 static void cmd_complete(struct afu_cmd *cmd)
219 struct scsi_cmnd *scp;
220 ulong lock_flags;
221 struct afu *afu = cmd->parent;
222 struct cxlflash_cfg *cfg = afu->parent;
223 bool cmd_is_tmf;
225 spin_lock_irqsave(&cmd->slock, lock_flags);
226 cmd->sa.host_use_b[0] |= B_DONE;
227 spin_unlock_irqrestore(&cmd->slock, lock_flags);
229 if (cmd->rcb.scp) {
230 scp = cmd->rcb.scp;
231 if (unlikely(cmd->sa.ioasc))
232 process_cmd_err(cmd, scp);
233 else
234 scp->result = (DID_OK << 16);
236 cmd_is_tmf = cmd->cmd_tmf;
237 cmd_checkin(cmd); /* Don't use cmd after here */
239 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
240 "ioasc=%d\n", __func__, scp, scp->result,
241 cmd->sa.ioasc);
243 scsi_dma_unmap(scp);
244 scp->scsi_done(scp);
246 if (cmd_is_tmf) {
247 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
248 cfg->tmf_active = false;
249 wake_up_all_locked(&cfg->tmf_waitq);
250 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
252 } else
253 complete(&cmd->cevent);
257 * context_reset() - timeout handler for AFU commands
258 * @cmd: AFU command that timed out.
260 * Sends a reset to the AFU.
262 static void context_reset(struct afu_cmd *cmd)
264 int nretry = 0;
265 u64 rrin = 0x1;
266 u64 room = 0;
267 struct afu *afu = cmd->parent;
268 ulong lock_flags;
270 pr_debug("%s: cmd=%p\n", __func__, cmd);
272 spin_lock_irqsave(&cmd->slock, lock_flags);
274 /* Already completed? */
275 if (cmd->sa.host_use_b[0] & B_DONE) {
276 spin_unlock_irqrestore(&cmd->slock, lock_flags);
277 return;
280 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
281 spin_unlock_irqrestore(&cmd->slock, lock_flags);
284 * We really want to send this reset at all costs, so spread
285 * out wait time on successive retries for available room.
287 do {
288 room = readq_be(&afu->host_map->cmd_room);
289 atomic64_set(&afu->room, room);
290 if (room)
291 goto write_rrin;
292 udelay(nretry);
293 } while (nretry++ < MC_ROOM_RETRY_CNT);
295 pr_err("%s: no cmd_room to send reset\n", __func__);
296 return;
298 write_rrin:
299 nretry = 0;
300 writeq_be(rrin, &afu->host_map->ioarrin);
301 do {
302 rrin = readq_be(&afu->host_map->ioarrin);
303 if (rrin != 0x1)
304 break;
305 /* Double delay each time */
306 udelay(2 << nretry);
307 } while (nretry++ < MC_ROOM_RETRY_CNT);
311 * send_cmd() - sends an AFU command
312 * @afu: AFU associated with the host.
313 * @cmd: AFU command to send.
315 * Return:
316 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
320 struct cxlflash_cfg *cfg = afu->parent;
321 struct device *dev = &cfg->dev->dev;
322 int nretry = 0;
323 int rc = 0;
324 u64 room;
325 long newval;
328 * This routine is used by critical users such an AFU sync and to
329 * send a task management function (TMF). Thus we want to retry a
330 * bit before returning an error. To avoid the performance penalty
331 * of MMIO, we spread the update of 'room' over multiple commands.
333 retry:
334 newval = atomic64_dec_if_positive(&afu->room);
335 if (!newval) {
336 do {
337 room = readq_be(&afu->host_map->cmd_room);
338 atomic64_set(&afu->room, room);
339 if (room)
340 goto write_ioarrin;
341 udelay(nretry);
342 } while (nretry++ < MC_ROOM_RETRY_CNT);
344 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
345 __func__, cmd->rcb.cdb[0]);
347 goto no_room;
348 } else if (unlikely(newval < 0)) {
349 /* This should be rare. i.e. Only if two threads race and
350 * decrement before the MMIO read is done. In this case
351 * just benefit from the other thread having updated
352 * afu->room.
354 if (nretry++ < MC_ROOM_RETRY_CNT) {
355 udelay(nretry);
356 goto retry;
359 goto no_room;
362 write_ioarrin:
363 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
364 out:
365 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
366 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
367 return rc;
369 no_room:
370 afu->read_room = true;
371 schedule_work(&cfg->work_q);
372 rc = SCSI_MLQUEUE_HOST_BUSY;
373 goto out;
377 * wait_resp() - polls for a response or timeout to a sent AFU command
378 * @afu: AFU associated with the host.
379 * @cmd: AFU command that was sent.
381 static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
383 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
385 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
386 if (!timeout)
387 context_reset(cmd);
389 if (unlikely(cmd->sa.ioasc != 0))
390 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
391 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
392 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
393 cmd->sa.rc.fc_rc);
397 * send_tmf() - sends a Task Management Function (TMF)
398 * @afu: AFU to checkout from.
399 * @scp: SCSI command from stack.
400 * @tmfcmd: TMF command to send.
402 * Return:
403 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
405 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407 struct afu_cmd *cmd;
409 u32 port_sel = scp->device->channel + 1;
410 short lflag = 0;
411 struct Scsi_Host *host = scp->device->host;
412 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
413 struct device *dev = &cfg->dev->dev;
414 ulong lock_flags;
415 int rc = 0;
416 ulong to;
418 cmd = cmd_checkout(afu);
419 if (unlikely(!cmd)) {
420 dev_err(dev, "%s: could not get a free command\n", __func__);
421 rc = SCSI_MLQUEUE_HOST_BUSY;
422 goto out;
425 /* When Task Management Function is active do not send another */
426 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
427 if (cfg->tmf_active)
428 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
429 !cfg->tmf_active,
430 cfg->tmf_slock);
431 cfg->tmf_active = true;
432 cmd->cmd_tmf = true;
433 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
435 cmd->rcb.ctx_id = afu->ctx_hndl;
436 cmd->rcb.port_sel = port_sel;
437 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
439 lflag = SISL_REQ_FLAGS_TMF_CMD;
441 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
442 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
444 /* Stash the scp in the reserved field, for reuse during interrupt */
445 cmd->rcb.scp = scp;
447 /* Copy the CDB from the cmd passed in */
448 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
450 /* Send the command */
451 rc = send_cmd(afu, cmd);
452 if (unlikely(rc)) {
453 cmd_checkin(cmd);
454 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
455 cfg->tmf_active = false;
456 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
457 goto out;
460 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
461 to = msecs_to_jiffies(5000);
462 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
463 !cfg->tmf_active,
464 cfg->tmf_slock,
465 to);
466 if (!to) {
467 cfg->tmf_active = false;
468 dev_err(dev, "%s: TMF timed out!\n", __func__);
469 rc = -1;
471 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
472 out:
473 return rc;
477 * cxlflash_driver_info() - information handler for this host driver
478 * @host: SCSI host associated with device.
480 * Return: A string describing the device.
482 static const char *cxlflash_driver_info(struct Scsi_Host *host)
484 return CXLFLASH_ADAPTER_NAME;
488 * cxlflash_queuecommand() - sends a mid-layer request
489 * @host: SCSI host associated with device.
490 * @scp: SCSI command to send.
492 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
494 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
496 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
497 struct afu *afu = cfg->afu;
498 struct device *dev = &cfg->dev->dev;
499 struct afu_cmd *cmd;
500 u32 port_sel = scp->device->channel + 1;
501 int nseg, i, ncount;
502 struct scatterlist *sg;
503 ulong lock_flags;
504 short lflag = 0;
505 int rc = 0;
507 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
508 "cdb=(%08X-%08X-%08X-%08X)\n",
509 __func__, scp, host->host_no, scp->device->channel,
510 scp->device->id, scp->device->lun,
511 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
512 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
513 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
514 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
517 * If a Task Management Function is active, wait for it to complete
518 * before continuing with regular commands.
520 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
521 if (cfg->tmf_active) {
522 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
523 rc = SCSI_MLQUEUE_HOST_BUSY;
524 goto out;
526 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
528 switch (cfg->state) {
529 case STATE_RESET:
530 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
531 rc = SCSI_MLQUEUE_HOST_BUSY;
532 goto out;
533 case STATE_FAILTERM:
534 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
535 scp->result = (DID_NO_CONNECT << 16);
536 scp->scsi_done(scp);
537 rc = 0;
538 goto out;
539 default:
540 break;
543 cmd = cmd_checkout(afu);
544 if (unlikely(!cmd)) {
545 dev_err(dev, "%s: could not get a free command\n", __func__);
546 rc = SCSI_MLQUEUE_HOST_BUSY;
547 goto out;
550 cmd->rcb.ctx_id = afu->ctx_hndl;
551 cmd->rcb.port_sel = port_sel;
552 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
554 if (scp->sc_data_direction == DMA_TO_DEVICE)
555 lflag = SISL_REQ_FLAGS_HOST_WRITE;
556 else
557 lflag = SISL_REQ_FLAGS_HOST_READ;
559 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
560 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
562 /* Stash the scp in the reserved field, for reuse during interrupt */
563 cmd->rcb.scp = scp;
565 nseg = scsi_dma_map(scp);
566 if (unlikely(nseg < 0)) {
567 dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
568 __func__, nseg);
569 rc = SCSI_MLQUEUE_HOST_BUSY;
570 goto out;
573 ncount = scsi_sg_count(scp);
574 scsi_for_each_sg(scp, sg, ncount, i) {
575 cmd->rcb.data_len = sg_dma_len(sg);
576 cmd->rcb.data_ea = sg_dma_address(sg);
579 /* Copy the CDB from the scsi_cmnd passed in */
580 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
582 /* Send the command */
583 rc = send_cmd(afu, cmd);
584 if (unlikely(rc)) {
585 cmd_checkin(cmd);
586 scsi_dma_unmap(scp);
589 out:
590 pr_devel("%s: returning rc=%d\n", __func__, rc);
591 return rc;
595 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
596 * @cfg: Internal structure associated with the host.
598 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
600 struct pci_dev *pdev = cfg->dev;
602 if (pci_channel_offline(pdev))
603 wait_event_timeout(cfg->reset_waitq,
604 !pci_channel_offline(pdev),
605 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
609 * free_mem() - free memory associated with the AFU
610 * @cfg: Internal structure associated with the host.
612 static void free_mem(struct cxlflash_cfg *cfg)
614 int i;
615 char *buf = NULL;
616 struct afu *afu = cfg->afu;
618 if (cfg->afu) {
619 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
620 buf = afu->cmd[i].buf;
621 if (!((u64)buf & (PAGE_SIZE - 1)))
622 free_page((ulong)buf);
625 free_pages((ulong)afu, get_order(sizeof(struct afu)));
626 cfg->afu = NULL;
631 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
632 * @cfg: Internal structure associated with the host.
634 * Safe to call with AFU in a partially allocated/initialized state.
636 static void stop_afu(struct cxlflash_cfg *cfg)
638 int i;
639 struct afu *afu = cfg->afu;
641 if (likely(afu)) {
642 for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
643 complete(&afu->cmd[i].cevent);
645 if (likely(afu->afu_map)) {
646 cxl_psa_unmap((void __iomem *)afu->afu_map);
647 afu->afu_map = NULL;
653 * term_mc() - terminates the master context
654 * @cfg: Internal structure associated with the host.
655 * @level: Depth of allocation, where to begin waterfall tear down.
657 * Safe to call with AFU/MC in partially allocated/initialized state.
659 static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
661 int rc = 0;
662 struct afu *afu = cfg->afu;
663 struct device *dev = &cfg->dev->dev;
665 if (!afu || !cfg->mcctx) {
666 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
667 __func__);
668 return;
671 switch (level) {
672 case UNDO_START:
673 rc = cxl_stop_context(cfg->mcctx);
674 BUG_ON(rc);
675 case UNMAP_THREE:
676 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
677 case UNMAP_TWO:
678 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
679 case UNMAP_ONE:
680 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
681 case FREE_IRQ:
682 cxl_free_afu_irqs(cfg->mcctx);
683 case RELEASE_CONTEXT:
684 cfg->mcctx = NULL;
689 * term_afu() - terminates the AFU
690 * @cfg: Internal structure associated with the host.
692 * Safe to call with AFU/MC in partially allocated/initialized state.
694 static void term_afu(struct cxlflash_cfg *cfg)
696 term_mc(cfg, UNDO_START);
698 if (cfg->afu)
699 stop_afu(cfg);
701 pr_debug("%s: returning\n", __func__);
705 * cxlflash_remove() - PCI entry point to tear down host
706 * @pdev: PCI device associated with the host.
708 * Safe to use as a cleanup in partially allocated/initialized state.
710 static void cxlflash_remove(struct pci_dev *pdev)
712 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
713 ulong lock_flags;
715 /* If a Task Management Function is active, wait for it to complete
716 * before continuing with remove.
718 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
719 if (cfg->tmf_active)
720 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
721 !cfg->tmf_active,
722 cfg->tmf_slock);
723 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
725 cfg->state = STATE_FAILTERM;
726 cxlflash_stop_term_user_contexts(cfg);
728 switch (cfg->init_state) {
729 case INIT_STATE_SCSI:
730 cxlflash_term_local_luns(cfg);
731 scsi_remove_host(cfg->host);
732 /* fall through */
733 case INIT_STATE_AFU:
734 term_afu(cfg);
735 cancel_work_sync(&cfg->work_q);
736 case INIT_STATE_PCI:
737 pci_release_regions(cfg->dev);
738 pci_disable_device(pdev);
739 case INIT_STATE_NONE:
740 free_mem(cfg);
741 scsi_host_put(cfg->host);
742 break;
745 pr_debug("%s: returning\n", __func__);
749 * alloc_mem() - allocates the AFU and its command pool
750 * @cfg: Internal structure associated with the host.
752 * A partially allocated state remains on failure.
754 * Return:
755 * 0 on success
756 * -ENOMEM on failure to allocate memory
758 static int alloc_mem(struct cxlflash_cfg *cfg)
760 int rc = 0;
761 int i;
762 char *buf = NULL;
763 struct device *dev = &cfg->dev->dev;
765 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
766 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
767 get_order(sizeof(struct afu)));
768 if (unlikely(!cfg->afu)) {
769 dev_err(dev, "%s: cannot get %d free pages\n",
770 __func__, get_order(sizeof(struct afu)));
771 rc = -ENOMEM;
772 goto out;
774 cfg->afu->parent = cfg;
775 cfg->afu->afu_map = NULL;
777 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
778 if (!((u64)buf & (PAGE_SIZE - 1))) {
779 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
780 if (unlikely(!buf)) {
781 dev_err(dev,
782 "%s: Allocate command buffers fail!\n",
783 __func__);
784 rc = -ENOMEM;
785 free_mem(cfg);
786 goto out;
790 cfg->afu->cmd[i].buf = buf;
791 atomic_set(&cfg->afu->cmd[i].free, 1);
792 cfg->afu->cmd[i].slot = i;
795 out:
796 return rc;
800 * init_pci() - initializes the host as a PCI device
801 * @cfg: Internal structure associated with the host.
803 * Return: 0 on success, -errno on failure
805 static int init_pci(struct cxlflash_cfg *cfg)
807 struct pci_dev *pdev = cfg->dev;
808 int rc = 0;
810 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
811 rc = pci_request_regions(pdev, CXLFLASH_NAME);
812 if (rc < 0) {
813 dev_err(&pdev->dev,
814 "%s: Couldn't register memory range of registers\n",
815 __func__);
816 goto out;
819 rc = pci_enable_device(pdev);
820 if (rc || pci_channel_offline(pdev)) {
821 if (pci_channel_offline(pdev)) {
822 cxlflash_wait_for_pci_err_recovery(cfg);
823 rc = pci_enable_device(pdev);
826 if (rc) {
827 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
828 __func__);
829 cxlflash_wait_for_pci_err_recovery(cfg);
830 goto out_release_regions;
834 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
835 if (rc < 0) {
836 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
837 __func__);
838 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
841 if (rc < 0) {
842 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
843 __func__);
844 goto out_disable;
847 pci_set_master(pdev);
849 if (pci_channel_offline(pdev)) {
850 cxlflash_wait_for_pci_err_recovery(cfg);
851 if (pci_channel_offline(pdev)) {
852 rc = -EIO;
853 goto out_msi_disable;
857 rc = pci_save_state(pdev);
859 if (rc != PCIBIOS_SUCCESSFUL) {
860 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
861 __func__);
862 rc = -EIO;
863 goto cleanup_nolog;
866 out:
867 pr_debug("%s: returning rc=%d\n", __func__, rc);
868 return rc;
870 cleanup_nolog:
871 out_msi_disable:
872 cxlflash_wait_for_pci_err_recovery(cfg);
873 out_disable:
874 pci_disable_device(pdev);
875 out_release_regions:
876 pci_release_regions(pdev);
877 goto out;
882 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
883 * @cfg: Internal structure associated with the host.
885 * Return: 0 on success, -errno on failure
887 static int init_scsi(struct cxlflash_cfg *cfg)
889 struct pci_dev *pdev = cfg->dev;
890 int rc = 0;
892 rc = scsi_add_host(cfg->host, &pdev->dev);
893 if (rc) {
894 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
895 __func__, rc);
896 goto out;
899 scsi_scan_host(cfg->host);
901 out:
902 pr_debug("%s: returning rc=%d\n", __func__, rc);
903 return rc;
907 * set_port_online() - transitions the specified host FC port to online state
908 * @fc_regs: Top of MMIO region defined for specified port.
910 * The provided MMIO region must be mapped prior to call. Online state means
911 * that the FC link layer has synced, completed the handshaking process, and
912 * is ready for login to start.
914 static void set_port_online(__be64 __iomem *fc_regs)
916 u64 cmdcfg;
918 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
919 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
920 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
921 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
925 * set_port_offline() - transitions the specified host FC port to offline state
926 * @fc_regs: Top of MMIO region defined for specified port.
928 * The provided MMIO region must be mapped prior to call.
930 static void set_port_offline(__be64 __iomem *fc_regs)
932 u64 cmdcfg;
934 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
935 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
936 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
937 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
941 * wait_port_online() - waits for the specified host FC port come online
942 * @fc_regs: Top of MMIO region defined for specified port.
943 * @delay_us: Number of microseconds to delay between reading port status.
944 * @nretry: Number of cycles to retry reading port status.
946 * The provided MMIO region must be mapped prior to call. This will timeout
947 * when the cable is not plugged in.
949 * Return:
950 * TRUE (1) when the specified port is online
951 * FALSE (0) when the specified port fails to come online after timeout
952 * -EINVAL when @delay_us is less than 1000
954 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
956 u64 status;
958 if (delay_us < 1000) {
959 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
960 return -EINVAL;
963 do {
964 msleep(delay_us / 1000);
965 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
966 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
967 nretry--);
969 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
973 * wait_port_offline() - waits for the specified host FC port go offline
974 * @fc_regs: Top of MMIO region defined for specified port.
975 * @delay_us: Number of microseconds to delay between reading port status.
976 * @nretry: Number of cycles to retry reading port status.
978 * The provided MMIO region must be mapped prior to call.
980 * Return:
981 * TRUE (1) when the specified port is offline
982 * FALSE (0) when the specified port fails to go offline after timeout
983 * -EINVAL when @delay_us is less than 1000
985 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
987 u64 status;
989 if (delay_us < 1000) {
990 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
991 return -EINVAL;
994 do {
995 msleep(delay_us / 1000);
996 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
997 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
998 nretry--);
1000 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1004 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1005 * @afu: AFU associated with the host that owns the specified FC port.
1006 * @port: Port number being configured.
1007 * @fc_regs: Top of MMIO region defined for specified port.
1008 * @wwpn: The world-wide-port-number previously discovered for port.
1010 * The provided MMIO region must be mapped prior to call. As part of the
1011 * sequence to configure the WWPN, the port is toggled offline and then back
1012 * online. This toggling action can cause this routine to delay up to a few
1013 * seconds. When configured to use the internal LUN feature of the AFU, a
1014 * failure to come online is overridden.
1016 * Return:
1017 * 0 when the WWPN is successfully written and the port comes back online
1018 * -1 when the port fails to go offline or come back up online
1020 static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1021 u64 wwpn)
1023 int rc = 0;
1025 set_port_offline(fc_regs);
1027 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1028 FC_PORT_STATUS_RETRY_CNT)) {
1029 pr_debug("%s: wait on port %d to go offline timed out\n",
1030 __func__, port);
1031 rc = -1; /* but continue on to leave the port back online */
1034 if (rc == 0)
1035 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1037 /* Always return success after programming WWPN */
1038 rc = 0;
1040 set_port_online(fc_regs);
1042 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1043 FC_PORT_STATUS_RETRY_CNT)) {
1044 pr_err("%s: wait on port %d to go online timed out\n",
1045 __func__, port);
1048 pr_debug("%s: returning rc=%d\n", __func__, rc);
1050 return rc;
1054 * afu_link_reset() - resets the specified host FC port
1055 * @afu: AFU associated with the host that owns the specified FC port.
1056 * @port: Port number being configured.
1057 * @fc_regs: Top of MMIO region defined for specified port.
1059 * The provided MMIO region must be mapped prior to call. The sequence to
1060 * reset the port involves toggling it offline and then back online. This
1061 * action can cause this routine to delay up to a few seconds. An effort
1062 * is made to maintain link with the device by switching to host to use
1063 * the alternate port exclusively while the reset takes place.
1064 * failure to come online is overridden.
1066 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1068 u64 port_sel;
1070 /* first switch the AFU to the other links, if any */
1071 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1072 port_sel &= ~(1ULL << port);
1073 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1074 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1076 set_port_offline(fc_regs);
1077 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1078 FC_PORT_STATUS_RETRY_CNT))
1079 pr_err("%s: wait on port %d to go offline timed out\n",
1080 __func__, port);
1082 set_port_online(fc_regs);
1083 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1084 FC_PORT_STATUS_RETRY_CNT))
1085 pr_err("%s: wait on port %d to go online timed out\n",
1086 __func__, port);
1088 /* switch back to include this port */
1089 port_sel |= (1ULL << port);
1090 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1091 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1093 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1097 * Asynchronous interrupt information table
1099 static const struct asyc_intr_info ainfo[] = {
1100 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1101 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1102 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1103 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
1104 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1105 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1106 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1107 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
1108 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1109 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1110 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1111 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
1112 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1113 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1114 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1115 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
1116 {0x0, "", 0, 0} /* terminator */
1120 * find_ainfo() - locates and returns asynchronous interrupt information
1121 * @status: Status code set by AFU on error.
1123 * Return: The located information or NULL when the status code is invalid.
1125 static const struct asyc_intr_info *find_ainfo(u64 status)
1127 const struct asyc_intr_info *info;
1129 for (info = &ainfo[0]; info->status; info++)
1130 if (info->status == status)
1131 return info;
1133 return NULL;
1137 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1138 * @afu: AFU associated with the host.
1140 static void afu_err_intr_init(struct afu *afu)
1142 int i;
1143 u64 reg;
1145 /* global async interrupts: AFU clears afu_ctrl on context exit
1146 * if async interrupts were sent to that context. This prevents
1147 * the AFU form sending further async interrupts when
1148 * there is
1149 * nobody to receive them.
1152 /* mask all */
1153 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1154 /* set LISN# to send and point to master context */
1155 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1157 if (afu->internal_lun)
1158 reg |= 1; /* Bit 63 indicates local lun */
1159 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1160 /* clear all */
1161 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1162 /* unmask bits that are of interest */
1163 /* note: afu can send an interrupt after this step */
1164 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1165 /* clear again in case a bit came on after previous clear but before */
1166 /* unmask */
1167 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1169 /* Clear/Set internal lun bits */
1170 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1171 reg &= SISL_FC_INTERNAL_MASK;
1172 if (afu->internal_lun)
1173 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1174 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1176 /* now clear FC errors */
1177 for (i = 0; i < NUM_FC_PORTS; i++) {
1178 writeq_be(0xFFFFFFFFU,
1179 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1180 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1183 /* sync interrupts for master's IOARRIN write */
1184 /* note that unlike asyncs, there can be no pending sync interrupts */
1185 /* at this time (this is a fresh context and master has not written */
1186 /* IOARRIN yet), so there is nothing to clear. */
1188 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1189 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1190 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1194 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1195 * @irq: Interrupt number.
1196 * @data: Private data provided at interrupt registration, the AFU.
1198 * Return: Always return IRQ_HANDLED.
1200 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1202 struct afu *afu = (struct afu *)data;
1203 u64 reg;
1204 u64 reg_unmasked;
1206 reg = readq_be(&afu->host_map->intr_status);
1207 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1209 if (reg_unmasked == 0UL) {
1210 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1211 __func__, (u64)afu, reg);
1212 goto cxlflash_sync_err_irq_exit;
1215 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1216 __func__, (u64)afu, reg);
1218 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1220 cxlflash_sync_err_irq_exit:
1221 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1222 return IRQ_HANDLED;
1226 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1227 * @irq: Interrupt number.
1228 * @data: Private data provided at interrupt registration, the AFU.
1230 * Return: Always return IRQ_HANDLED.
1232 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1234 struct afu *afu = (struct afu *)data;
1235 struct afu_cmd *cmd;
1236 bool toggle = afu->toggle;
1237 u64 entry,
1238 *hrrq_start = afu->hrrq_start,
1239 *hrrq_end = afu->hrrq_end,
1240 *hrrq_curr = afu->hrrq_curr;
1242 /* Process however many RRQ entries that are ready */
1243 while (true) {
1244 entry = *hrrq_curr;
1246 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1247 break;
1249 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1250 cmd_complete(cmd);
1252 /* Advance to next entry or wrap and flip the toggle bit */
1253 if (hrrq_curr < hrrq_end)
1254 hrrq_curr++;
1255 else {
1256 hrrq_curr = hrrq_start;
1257 toggle ^= SISL_RESP_HANDLE_T_BIT;
1261 afu->hrrq_curr = hrrq_curr;
1262 afu->toggle = toggle;
1264 return IRQ_HANDLED;
1268 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1269 * @irq: Interrupt number.
1270 * @data: Private data provided at interrupt registration, the AFU.
1272 * Return: Always return IRQ_HANDLED.
1274 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1276 struct afu *afu = (struct afu *)data;
1277 struct cxlflash_cfg *cfg = afu->parent;
1278 struct device *dev = &cfg->dev->dev;
1279 u64 reg_unmasked;
1280 const struct asyc_intr_info *info;
1281 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1282 u64 reg;
1283 u8 port;
1284 int i;
1286 reg = readq_be(&global->regs.aintr_status);
1287 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1289 if (reg_unmasked == 0) {
1290 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1291 __func__, reg);
1292 goto out;
1295 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1296 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1298 /* Check each bit that is on */
1299 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1300 info = find_ainfo(1ULL << i);
1301 if (((reg_unmasked & 0x1) == 0) || !info)
1302 continue;
1304 port = info->port;
1306 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1307 __func__, port, info->desc,
1308 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1311 * Do link reset first, some OTHER errors will set FC_ERROR
1312 * again if cleared before or w/o a reset
1314 if (info->action & LINK_RESET) {
1315 dev_err(dev, "%s: FC Port %d: resetting link\n",
1316 __func__, port);
1317 cfg->lr_state = LINK_RESET_REQUIRED;
1318 cfg->lr_port = port;
1319 schedule_work(&cfg->work_q);
1322 if (info->action & CLR_FC_ERROR) {
1323 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1326 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1327 * should be the same and tracing one is sufficient.
1330 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1331 __func__, port, reg);
1333 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1334 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1337 if (info->action & SCAN_HOST) {
1338 atomic_inc(&cfg->scan_host_needed);
1339 schedule_work(&cfg->work_q);
1343 out:
1344 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1345 return IRQ_HANDLED;
1349 * start_context() - starts the master context
1350 * @cfg: Internal structure associated with the host.
1352 * Return: A success or failure value from CXL services.
1354 static int start_context(struct cxlflash_cfg *cfg)
1356 int rc = 0;
1358 rc = cxl_start_context(cfg->mcctx,
1359 cfg->afu->work.work_element_descriptor,
1360 NULL);
1362 pr_debug("%s: returning rc=%d\n", __func__, rc);
1363 return rc;
1367 * read_vpd() - obtains the WWPNs from VPD
1368 * @cfg: Internal structure associated with the host.
1369 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1371 * Return: 0 on success, -errno on failure
1373 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1375 struct pci_dev *dev = cfg->parent_dev;
1376 int rc = 0;
1377 int ro_start, ro_size, i, j, k;
1378 ssize_t vpd_size;
1379 char vpd_data[CXLFLASH_VPD_LEN];
1380 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1381 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1383 /* Get the VPD data from the device */
1384 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
1385 if (unlikely(vpd_size <= 0)) {
1386 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
1387 __func__, vpd_size);
1388 rc = -ENODEV;
1389 goto out;
1392 /* Get the read only section offset */
1393 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1394 PCI_VPD_LRDT_RO_DATA);
1395 if (unlikely(ro_start < 0)) {
1396 dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1397 __func__);
1398 rc = -ENODEV;
1399 goto out;
1402 /* Get the read only section size, cap when extends beyond read VPD */
1403 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1404 j = ro_size;
1405 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1406 if (unlikely((i + j) > vpd_size)) {
1407 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1408 __func__, (i + j), vpd_size);
1409 ro_size = vpd_size - i;
1413 * Find the offset of the WWPN tag within the read only
1414 * VPD data and validate the found field (partials are
1415 * no good to us). Convert the ASCII data to an integer
1416 * value. Note that we must copy to a temporary buffer
1417 * because the conversion service requires that the ASCII
1418 * string be terminated.
1420 for (k = 0; k < NUM_FC_PORTS; k++) {
1421 j = ro_size;
1422 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1424 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1425 if (unlikely(i < 0)) {
1426 dev_err(&dev->dev, "%s: Port %d WWPN not found "
1427 "in VPD\n", __func__, k);
1428 rc = -ENODEV;
1429 goto out;
1432 j = pci_vpd_info_field_size(&vpd_data[i]);
1433 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1434 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1435 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1436 "VPD corrupt\n",
1437 __func__, k);
1438 rc = -ENODEV;
1439 goto out;
1442 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1443 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1444 if (unlikely(rc)) {
1445 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1446 "to integer\n", __func__, k);
1447 rc = -ENODEV;
1448 goto out;
1452 out:
1453 pr_debug("%s: returning rc=%d\n", __func__, rc);
1454 return rc;
1458 * init_pcr() - initialize the provisioning and control registers
1459 * @cfg: Internal structure associated with the host.
1461 * Also sets up fast access to the mapped registers and initializes AFU
1462 * command fields that never change.
1464 static void init_pcr(struct cxlflash_cfg *cfg)
1466 struct afu *afu = cfg->afu;
1467 struct sisl_ctrl_map __iomem *ctrl_map;
1468 int i;
1470 for (i = 0; i < MAX_CONTEXT; i++) {
1471 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1472 /* Disrupt any clients that could be running */
1473 /* e.g. clients that survived a master restart */
1474 writeq_be(0, &ctrl_map->rht_start);
1475 writeq_be(0, &ctrl_map->rht_cnt_id);
1476 writeq_be(0, &ctrl_map->ctx_cap);
1479 /* Copy frequently used fields into afu */
1480 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1481 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1482 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1484 /* Program the Endian Control for the master context */
1485 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1487 /* Initialize cmd fields that never change */
1488 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1489 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1490 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1491 afu->cmd[i].rcb.rrq = 0x0;
1496 * init_global() - initialize AFU global registers
1497 * @cfg: Internal structure associated with the host.
1499 static int init_global(struct cxlflash_cfg *cfg)
1501 struct afu *afu = cfg->afu;
1502 struct device *dev = &cfg->dev->dev;
1503 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1504 int i = 0, num_ports = 0;
1505 int rc = 0;
1506 u64 reg;
1508 rc = read_vpd(cfg, &wwpn[0]);
1509 if (rc) {
1510 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1511 goto out;
1514 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1516 /* Set up RRQ in AFU for master issued cmds */
1517 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1518 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1520 /* AFU configuration */
1521 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1522 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1523 /* enable all auto retry options and control endianness */
1524 /* leave others at default: */
1525 /* CTX_CAP write protected, mbox_r does not clear on read and */
1526 /* checker on if dual afu */
1527 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1529 /* Global port select: select either port */
1530 if (afu->internal_lun) {
1531 /* Only use port 0 */
1532 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1533 num_ports = NUM_FC_PORTS - 1;
1534 } else {
1535 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1536 num_ports = NUM_FC_PORTS;
1539 for (i = 0; i < num_ports; i++) {
1540 /* Unmask all errors (but they are still masked at AFU) */
1541 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1542 /* Clear CRC error cnt & set a threshold */
1543 (void)readq_be(&afu->afu_map->global.
1544 fc_regs[i][FC_CNT_CRCERR / 8]);
1545 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1546 [FC_CRC_THRESH / 8]);
1548 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1549 if (wwpn[i] != 0 &&
1550 afu_set_wwpn(afu, i,
1551 &afu->afu_map->global.fc_regs[i][0],
1552 wwpn[i])) {
1553 dev_err(dev, "%s: failed to set WWPN on port %d\n",
1554 __func__, i);
1555 rc = -EIO;
1556 goto out;
1558 /* Programming WWPN back to back causes additional
1559 * offline/online transitions and a PLOGI
1561 msleep(100);
1564 /* Set up master's own CTX_CAP to allow real mode, host translation */
1565 /* tables, afu cmds and read/write GSCSI cmds. */
1566 /* First, unlock ctx_cap write by reading mbox */
1567 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1568 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1569 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1570 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1571 &afu->ctrl_map->ctx_cap);
1572 /* Initialize heartbeat */
1573 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1575 out:
1576 return rc;
1580 * start_afu() - initializes and starts the AFU
1581 * @cfg: Internal structure associated with the host.
1583 static int start_afu(struct cxlflash_cfg *cfg)
1585 struct afu *afu = cfg->afu;
1586 struct afu_cmd *cmd;
1588 int i = 0;
1589 int rc = 0;
1591 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1592 cmd = &afu->cmd[i];
1594 init_completion(&cmd->cevent);
1595 spin_lock_init(&cmd->slock);
1596 cmd->parent = afu;
1599 init_pcr(cfg);
1601 /* After an AFU reset, RRQ entries are stale, clear them */
1602 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1604 /* Initialize RRQ pointers */
1605 afu->hrrq_start = &afu->rrq_entry[0];
1606 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1607 afu->hrrq_curr = afu->hrrq_start;
1608 afu->toggle = 1;
1610 rc = init_global(cfg);
1612 pr_debug("%s: returning rc=%d\n", __func__, rc);
1613 return rc;
1617 * init_mc() - create and register as the master context
1618 * @cfg: Internal structure associated with the host.
1620 * Return: 0 on success, -errno on failure
1622 static int init_mc(struct cxlflash_cfg *cfg)
1624 struct cxl_context *ctx;
1625 struct device *dev = &cfg->dev->dev;
1626 struct afu *afu = cfg->afu;
1627 int rc = 0;
1628 enum undo_level level;
1630 ctx = cxl_get_context(cfg->dev);
1631 if (unlikely(!ctx))
1632 return -ENOMEM;
1633 cfg->mcctx = ctx;
1635 /* Set it up as a master with the CXL */
1636 cxl_set_master(ctx);
1638 /* During initialization reset the AFU to start from a clean slate */
1639 rc = cxl_afu_reset(cfg->mcctx);
1640 if (unlikely(rc)) {
1641 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1642 __func__, rc);
1643 level = RELEASE_CONTEXT;
1644 goto out;
1647 rc = cxl_allocate_afu_irqs(ctx, 3);
1648 if (unlikely(rc)) {
1649 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1650 __func__, rc);
1651 level = RELEASE_CONTEXT;
1652 goto out;
1655 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1656 "SISL_MSI_SYNC_ERROR");
1657 if (unlikely(rc <= 0)) {
1658 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1659 __func__);
1660 level = FREE_IRQ;
1661 goto out;
1664 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1665 "SISL_MSI_RRQ_UPDATED");
1666 if (unlikely(rc <= 0)) {
1667 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1668 __func__);
1669 level = UNMAP_ONE;
1670 goto out;
1673 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1674 "SISL_MSI_ASYNC_ERROR");
1675 if (unlikely(rc <= 0)) {
1676 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1677 __func__);
1678 level = UNMAP_TWO;
1679 goto out;
1682 rc = 0;
1684 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1685 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1686 * element (pe) that is embedded in the context (ctx)
1688 rc = start_context(cfg);
1689 if (unlikely(rc)) {
1690 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1691 level = UNMAP_THREE;
1692 goto out;
1694 ret:
1695 pr_debug("%s: returning rc=%d\n", __func__, rc);
1696 return rc;
1697 out:
1698 term_mc(cfg, level);
1699 goto ret;
1703 * init_afu() - setup as master context and start AFU
1704 * @cfg: Internal structure associated with the host.
1706 * This routine is a higher level of control for configuring the
1707 * AFU on probe and reset paths.
1709 * Return: 0 on success, -errno on failure
1711 static int init_afu(struct cxlflash_cfg *cfg)
1713 u64 reg;
1714 int rc = 0;
1715 struct afu *afu = cfg->afu;
1716 struct device *dev = &cfg->dev->dev;
1718 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1720 rc = init_mc(cfg);
1721 if (rc) {
1722 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1723 __func__, rc);
1724 goto out;
1727 /* Map the entire MMIO space of the AFU */
1728 afu->afu_map = cxl_psa_map(cfg->mcctx);
1729 if (!afu->afu_map) {
1730 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1731 rc = -ENOMEM;
1732 goto err1;
1735 /* No byte reverse on reading afu_version or string will be backwards */
1736 reg = readq(&afu->afu_map->global.regs.afu_version);
1737 memcpy(afu->version, &reg, sizeof(reg));
1738 afu->interface_version =
1739 readq_be(&afu->afu_map->global.regs.interface_version);
1740 if ((afu->interface_version + 1) == 0) {
1741 pr_err("Back level AFU, please upgrade. AFU version %s "
1742 "interface version 0x%llx\n", afu->version,
1743 afu->interface_version);
1744 rc = -EINVAL;
1745 goto err2;
1748 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1749 afu->version, afu->interface_version);
1751 rc = start_afu(cfg);
1752 if (rc) {
1753 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1754 __func__, rc);
1755 goto err2;
1758 afu_err_intr_init(cfg->afu);
1759 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1761 /* Restore the LUN mappings */
1762 cxlflash_restore_luntable(cfg);
1763 out:
1764 pr_debug("%s: returning rc=%d\n", __func__, rc);
1765 return rc;
1767 err2:
1768 cxl_psa_unmap((void __iomem *)afu->afu_map);
1769 afu->afu_map = NULL;
1770 err1:
1771 term_mc(cfg, UNDO_START);
1772 goto out;
1776 * cxlflash_afu_sync() - builds and sends an AFU sync command
1777 * @afu: AFU associated with the host.
1778 * @ctx_hndl_u: Identifies context requesting sync.
1779 * @res_hndl_u: Identifies resource requesting sync.
1780 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1782 * The AFU can only take 1 sync command at a time. This routine enforces this
1783 * limitation by using a mutex to provide exclusive access to the AFU during
1784 * the sync. This design point requires calling threads to not be on interrupt
1785 * context due to the possibility of sleeping during concurrent sync operations.
1787 * AFU sync operations are only necessary and allowed when the device is
1788 * operating normally. When not operating normally, sync requests can occur as
1789 * part of cleaning up resources associated with an adapter prior to removal.
1790 * In this scenario, these requests are simply ignored (safe due to the AFU
1791 * going away).
1793 * Return:
1794 * 0 on success
1795 * -1 on failure
1797 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1798 res_hndl_t res_hndl_u, u8 mode)
1800 struct cxlflash_cfg *cfg = afu->parent;
1801 struct device *dev = &cfg->dev->dev;
1802 struct afu_cmd *cmd = NULL;
1803 int rc = 0;
1804 int retry_cnt = 0;
1805 static DEFINE_MUTEX(sync_active);
1807 if (cfg->state != STATE_NORMAL) {
1808 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1809 return 0;
1812 mutex_lock(&sync_active);
1813 retry:
1814 cmd = cmd_checkout(afu);
1815 if (unlikely(!cmd)) {
1816 retry_cnt++;
1817 udelay(1000 * retry_cnt);
1818 if (retry_cnt < MC_RETRY_CNT)
1819 goto retry;
1820 dev_err(dev, "%s: could not get a free command\n", __func__);
1821 rc = -1;
1822 goto out;
1825 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1827 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1829 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1830 cmd->rcb.port_sel = 0x0; /* NA */
1831 cmd->rcb.lun_id = 0x0; /* NA */
1832 cmd->rcb.data_len = 0x0;
1833 cmd->rcb.data_ea = 0x0;
1834 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1836 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1837 cmd->rcb.cdb[1] = mode;
1839 /* The cdb is aligned, no unaligned accessors required */
1840 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1841 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1843 rc = send_cmd(afu, cmd);
1844 if (unlikely(rc))
1845 goto out;
1847 wait_resp(afu, cmd);
1849 /* Set on timeout */
1850 if (unlikely((cmd->sa.ioasc != 0) ||
1851 (cmd->sa.host_use_b[0] & B_ERROR)))
1852 rc = -1;
1853 out:
1854 mutex_unlock(&sync_active);
1855 if (cmd)
1856 cmd_checkin(cmd);
1857 pr_debug("%s: returning rc=%d\n", __func__, rc);
1858 return rc;
1862 * afu_reset() - resets the AFU
1863 * @cfg: Internal structure associated with the host.
1865 * Return: 0 on success, -errno on failure
1867 static int afu_reset(struct cxlflash_cfg *cfg)
1869 int rc = 0;
1870 /* Stop the context before the reset. Since the context is
1871 * no longer available restart it after the reset is complete
1874 term_afu(cfg);
1876 rc = init_afu(cfg);
1878 pr_debug("%s: returning rc=%d\n", __func__, rc);
1879 return rc;
1883 * cxlflash_eh_device_reset_handler() - reset a single LUN
1884 * @scp: SCSI command to send.
1886 * Return:
1887 * SUCCESS as defined in scsi/scsi.h
1888 * FAILED as defined in scsi/scsi.h
1890 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1892 int rc = SUCCESS;
1893 struct Scsi_Host *host = scp->device->host;
1894 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1895 struct afu *afu = cfg->afu;
1896 int rcr = 0;
1898 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1899 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1900 host->host_no, scp->device->channel,
1901 scp->device->id, scp->device->lun,
1902 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1903 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1904 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1905 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1907 retry:
1908 switch (cfg->state) {
1909 case STATE_NORMAL:
1910 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1911 if (unlikely(rcr))
1912 rc = FAILED;
1913 break;
1914 case STATE_RESET:
1915 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1916 goto retry;
1917 default:
1918 rc = FAILED;
1919 break;
1922 pr_debug("%s: returning rc=%d\n", __func__, rc);
1923 return rc;
1927 * cxlflash_eh_host_reset_handler() - reset the host adapter
1928 * @scp: SCSI command from stack identifying host.
1930 * Return:
1931 * SUCCESS as defined in scsi/scsi.h
1932 * FAILED as defined in scsi/scsi.h
1934 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1936 int rc = SUCCESS;
1937 int rcr = 0;
1938 struct Scsi_Host *host = scp->device->host;
1939 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1941 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1942 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1943 host->host_no, scp->device->channel,
1944 scp->device->id, scp->device->lun,
1945 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1946 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1947 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1948 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1950 switch (cfg->state) {
1951 case STATE_NORMAL:
1952 cfg->state = STATE_RESET;
1953 cxlflash_mark_contexts_error(cfg);
1954 rcr = afu_reset(cfg);
1955 if (rcr) {
1956 rc = FAILED;
1957 cfg->state = STATE_FAILTERM;
1958 } else
1959 cfg->state = STATE_NORMAL;
1960 wake_up_all(&cfg->reset_waitq);
1961 break;
1962 case STATE_RESET:
1963 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1964 if (cfg->state == STATE_NORMAL)
1965 break;
1966 /* fall through */
1967 default:
1968 rc = FAILED;
1969 break;
1972 pr_debug("%s: returning rc=%d\n", __func__, rc);
1973 return rc;
1977 * cxlflash_change_queue_depth() - change the queue depth for the device
1978 * @sdev: SCSI device destined for queue depth change.
1979 * @qdepth: Requested queue depth value to set.
1981 * The requested queue depth is capped to the maximum supported value.
1983 * Return: The actual queue depth set.
1985 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
1988 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
1989 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
1991 scsi_change_queue_depth(sdev, qdepth);
1992 return sdev->queue_depth;
1996 * cxlflash_show_port_status() - queries and presents the current port status
1997 * @port: Desired port for status reporting.
1998 * @afu: AFU owning the specified port.
1999 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2001 * Return: The size of the ASCII string returned in @buf.
2003 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
2005 char *disp_status;
2006 u64 status;
2007 __be64 __iomem *fc_regs;
2009 if (port >= NUM_FC_PORTS)
2010 return 0;
2012 fc_regs = &afu->afu_map->global.fc_regs[port][0];
2013 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2014 status &= FC_MTIP_STATUS_MASK;
2016 if (status == FC_MTIP_STATUS_ONLINE)
2017 disp_status = "online";
2018 else if (status == FC_MTIP_STATUS_OFFLINE)
2019 disp_status = "offline";
2020 else
2021 disp_status = "unknown";
2023 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2027 * port0_show() - queries and presents the current status of port 0
2028 * @dev: Generic device associated with the host owning the port.
2029 * @attr: Device attribute representing the port.
2030 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2032 * Return: The size of the ASCII string returned in @buf.
2034 static ssize_t port0_show(struct device *dev,
2035 struct device_attribute *attr,
2036 char *buf)
2038 struct Scsi_Host *shost = class_to_shost(dev);
2039 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2040 struct afu *afu = cfg->afu;
2042 return cxlflash_show_port_status(0, afu, buf);
2046 * port1_show() - queries and presents the current status of port 1
2047 * @dev: Generic device associated with the host owning the port.
2048 * @attr: Device attribute representing the port.
2049 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2051 * Return: The size of the ASCII string returned in @buf.
2053 static ssize_t port1_show(struct device *dev,
2054 struct device_attribute *attr,
2055 char *buf)
2057 struct Scsi_Host *shost = class_to_shost(dev);
2058 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2059 struct afu *afu = cfg->afu;
2061 return cxlflash_show_port_status(1, afu, buf);
2065 * lun_mode_show() - presents the current LUN mode of the host
2066 * @dev: Generic device associated with the host.
2067 * @attr: Device attribute representing the LUN mode.
2068 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2070 * Return: The size of the ASCII string returned in @buf.
2072 static ssize_t lun_mode_show(struct device *dev,
2073 struct device_attribute *attr, char *buf)
2075 struct Scsi_Host *shost = class_to_shost(dev);
2076 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2077 struct afu *afu = cfg->afu;
2079 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2083 * lun_mode_store() - sets the LUN mode of the host
2084 * @dev: Generic device associated with the host.
2085 * @attr: Device attribute representing the LUN mode.
2086 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2087 * @count: Length of data resizing in @buf.
2089 * The CXL Flash AFU supports a dummy LUN mode where the external
2090 * links and storage are not required. Space on the FPGA is used
2091 * to create 1 or 2 small LUNs which are presented to the system
2092 * as if they were a normal storage device. This feature is useful
2093 * during development and also provides manufacturing with a way
2094 * to test the AFU without an actual device.
2096 * 0 = external LUN[s] (default)
2097 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2098 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2099 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2100 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2102 * Return: The size of the ASCII string returned in @buf.
2104 static ssize_t lun_mode_store(struct device *dev,
2105 struct device_attribute *attr,
2106 const char *buf, size_t count)
2108 struct Scsi_Host *shost = class_to_shost(dev);
2109 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2110 struct afu *afu = cfg->afu;
2111 int rc;
2112 u32 lun_mode;
2114 rc = kstrtouint(buf, 10, &lun_mode);
2115 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2116 afu->internal_lun = lun_mode;
2117 afu_reset(cfg);
2118 scsi_scan_host(cfg->host);
2121 return count;
2125 * ioctl_version_show() - presents the current ioctl version of the host
2126 * @dev: Generic device associated with the host.
2127 * @attr: Device attribute representing the ioctl version.
2128 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2130 * Return: The size of the ASCII string returned in @buf.
2132 static ssize_t ioctl_version_show(struct device *dev,
2133 struct device_attribute *attr, char *buf)
2135 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2139 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2140 * @port: Desired port for status reporting.
2141 * @afu: AFU owning the specified port.
2142 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2144 * Return: The size of the ASCII string returned in @buf.
2146 static ssize_t cxlflash_show_port_lun_table(u32 port,
2147 struct afu *afu,
2148 char *buf)
2150 int i;
2151 ssize_t bytes = 0;
2152 __be64 __iomem *fc_port;
2154 if (port >= NUM_FC_PORTS)
2155 return 0;
2157 fc_port = &afu->afu_map->global.fc_port[port][0];
2159 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2160 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2161 "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2162 return bytes;
2166 * port0_lun_table_show() - presents the current LUN table of port 0
2167 * @dev: Generic device associated with the host owning the port.
2168 * @attr: Device attribute representing the port.
2169 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2171 * Return: The size of the ASCII string returned in @buf.
2173 static ssize_t port0_lun_table_show(struct device *dev,
2174 struct device_attribute *attr,
2175 char *buf)
2177 struct Scsi_Host *shost = class_to_shost(dev);
2178 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2179 struct afu *afu = cfg->afu;
2181 return cxlflash_show_port_lun_table(0, afu, buf);
2185 * port1_lun_table_show() - presents the current LUN table of port 1
2186 * @dev: Generic device associated with the host owning the port.
2187 * @attr: Device attribute representing the port.
2188 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2190 * Return: The size of the ASCII string returned in @buf.
2192 static ssize_t port1_lun_table_show(struct device *dev,
2193 struct device_attribute *attr,
2194 char *buf)
2196 struct Scsi_Host *shost = class_to_shost(dev);
2197 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2198 struct afu *afu = cfg->afu;
2200 return cxlflash_show_port_lun_table(1, afu, buf);
2204 * mode_show() - presents the current mode of the device
2205 * @dev: Generic device associated with the device.
2206 * @attr: Device attribute representing the device mode.
2207 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2209 * Return: The size of the ASCII string returned in @buf.
2211 static ssize_t mode_show(struct device *dev,
2212 struct device_attribute *attr, char *buf)
2214 struct scsi_device *sdev = to_scsi_device(dev);
2216 return scnprintf(buf, PAGE_SIZE, "%s\n",
2217 sdev->hostdata ? "superpipe" : "legacy");
2221 * Host attributes
2223 static DEVICE_ATTR_RO(port0);
2224 static DEVICE_ATTR_RO(port1);
2225 static DEVICE_ATTR_RW(lun_mode);
2226 static DEVICE_ATTR_RO(ioctl_version);
2227 static DEVICE_ATTR_RO(port0_lun_table);
2228 static DEVICE_ATTR_RO(port1_lun_table);
2230 static struct device_attribute *cxlflash_host_attrs[] = {
2231 &dev_attr_port0,
2232 &dev_attr_port1,
2233 &dev_attr_lun_mode,
2234 &dev_attr_ioctl_version,
2235 &dev_attr_port0_lun_table,
2236 &dev_attr_port1_lun_table,
2237 NULL
2241 * Device attributes
2243 static DEVICE_ATTR_RO(mode);
2245 static struct device_attribute *cxlflash_dev_attrs[] = {
2246 &dev_attr_mode,
2247 NULL
2251 * Host template
2253 static struct scsi_host_template driver_template = {
2254 .module = THIS_MODULE,
2255 .name = CXLFLASH_ADAPTER_NAME,
2256 .info = cxlflash_driver_info,
2257 .ioctl = cxlflash_ioctl,
2258 .proc_name = CXLFLASH_NAME,
2259 .queuecommand = cxlflash_queuecommand,
2260 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2261 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2262 .change_queue_depth = cxlflash_change_queue_depth,
2263 .cmd_per_lun = 16,
2264 .can_queue = CXLFLASH_MAX_CMDS,
2265 .this_id = -1,
2266 .sg_tablesize = SG_NONE, /* No scatter gather support */
2267 .max_sectors = CXLFLASH_MAX_SECTORS,
2268 .use_clustering = ENABLE_CLUSTERING,
2269 .shost_attrs = cxlflash_host_attrs,
2270 .sdev_attrs = cxlflash_dev_attrs,
2274 * Device dependent values
2276 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
2279 * PCI device binding table
2281 static struct pci_device_id cxlflash_pci_table[] = {
2282 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2287 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2290 * cxlflash_worker_thread() - work thread handler for the AFU
2291 * @work: Work structure contained within cxlflash associated with host.
2293 * Handles the following events:
2294 * - Link reset which cannot be performed on interrupt context due to
2295 * blocking up to a few seconds
2296 * - Read AFU command room
2297 * - Rescan the host
2299 static void cxlflash_worker_thread(struct work_struct *work)
2301 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2302 work_q);
2303 struct afu *afu = cfg->afu;
2304 struct device *dev = &cfg->dev->dev;
2305 int port;
2306 ulong lock_flags;
2308 /* Avoid MMIO if the device has failed */
2310 if (cfg->state != STATE_NORMAL)
2311 return;
2313 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2315 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2316 port = cfg->lr_port;
2317 if (port < 0)
2318 dev_err(dev, "%s: invalid port index %d\n",
2319 __func__, port);
2320 else {
2321 spin_unlock_irqrestore(cfg->host->host_lock,
2322 lock_flags);
2324 /* The reset can block... */
2325 afu_link_reset(afu, port,
2326 &afu->afu_map->global.fc_regs[port][0]);
2327 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2330 cfg->lr_state = LINK_RESET_COMPLETE;
2333 if (afu->read_room) {
2334 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2335 afu->read_room = false;
2338 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2340 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2341 scsi_scan_host(cfg->host);
2345 * cxlflash_probe() - PCI entry point to add host
2346 * @pdev: PCI device associated with the host.
2347 * @dev_id: PCI device id associated with device.
2349 * Return: 0 on success, -errno on failure
2351 static int cxlflash_probe(struct pci_dev *pdev,
2352 const struct pci_device_id *dev_id)
2354 struct Scsi_Host *host;
2355 struct cxlflash_cfg *cfg = NULL;
2356 struct device *phys_dev;
2357 struct dev_dependent_vals *ddv;
2358 int rc = 0;
2360 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2361 __func__, pdev->irq);
2363 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2364 driver_template.max_sectors = ddv->max_sectors;
2366 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2367 if (!host) {
2368 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2369 __func__);
2370 rc = -ENOMEM;
2371 goto out;
2374 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2375 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2376 host->max_channel = NUM_FC_PORTS - 1;
2377 host->unique_id = host->host_no;
2378 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2380 cfg = (struct cxlflash_cfg *)host->hostdata;
2381 cfg->host = host;
2382 rc = alloc_mem(cfg);
2383 if (rc) {
2384 dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
2385 __func__);
2386 rc = -ENOMEM;
2387 scsi_host_put(cfg->host);
2388 goto out;
2391 cfg->init_state = INIT_STATE_NONE;
2392 cfg->dev = pdev;
2393 cfg->cxl_fops = cxlflash_cxl_fops;
2396 * The promoted LUNs move to the top of the LUN table. The rest stay
2397 * on the bottom half. The bottom half grows from the end
2398 * (index = 255), whereas the top half grows from the beginning
2399 * (index = 0).
2401 cfg->promote_lun_index = 0;
2402 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2403 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2405 cfg->dev_id = (struct pci_device_id *)dev_id;
2407 init_waitqueue_head(&cfg->tmf_waitq);
2408 init_waitqueue_head(&cfg->reset_waitq);
2410 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2411 cfg->lr_state = LINK_RESET_INVALID;
2412 cfg->lr_port = -1;
2413 spin_lock_init(&cfg->tmf_slock);
2414 mutex_init(&cfg->ctx_tbl_list_mutex);
2415 mutex_init(&cfg->ctx_recovery_mutex);
2416 init_rwsem(&cfg->ioctl_rwsem);
2417 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2418 INIT_LIST_HEAD(&cfg->lluns);
2420 pci_set_drvdata(pdev, cfg);
2423 * Use the special service provided to look up the physical
2424 * PCI device, since we are called on the probe of the virtual
2425 * PCI host bus (vphb)
2427 phys_dev = cxl_get_phys_dev(pdev);
2428 if (!dev_is_pci(phys_dev)) {
2429 dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
2430 rc = -ENODEV;
2431 goto out_remove;
2433 cfg->parent_dev = to_pci_dev(phys_dev);
2435 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2437 rc = init_pci(cfg);
2438 if (rc) {
2439 dev_err(&pdev->dev, "%s: call to init_pci "
2440 "failed rc=%d!\n", __func__, rc);
2441 goto out_remove;
2443 cfg->init_state = INIT_STATE_PCI;
2445 rc = init_afu(cfg);
2446 if (rc) {
2447 dev_err(&pdev->dev, "%s: call to init_afu "
2448 "failed rc=%d!\n", __func__, rc);
2449 goto out_remove;
2451 cfg->init_state = INIT_STATE_AFU;
2453 rc = init_scsi(cfg);
2454 if (rc) {
2455 dev_err(&pdev->dev, "%s: call to init_scsi "
2456 "failed rc=%d!\n", __func__, rc);
2457 goto out_remove;
2459 cfg->init_state = INIT_STATE_SCSI;
2461 out:
2462 pr_debug("%s: returning rc=%d\n", __func__, rc);
2463 return rc;
2465 out_remove:
2466 cxlflash_remove(pdev);
2467 goto out;
2471 * drain_ioctls() - wait until all currently executing ioctls have completed
2472 * @cfg: Internal structure associated with the host.
2474 * Obtain write access to read/write semaphore that wraps ioctl
2475 * handling to 'drain' ioctls currently executing.
2477 static void drain_ioctls(struct cxlflash_cfg *cfg)
2479 down_write(&cfg->ioctl_rwsem);
2480 up_write(&cfg->ioctl_rwsem);
2484 * cxlflash_pci_error_detected() - called when a PCI error is detected
2485 * @pdev: PCI device struct.
2486 * @state: PCI channel state.
2488 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2490 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2491 pci_channel_state_t state)
2493 int rc = 0;
2494 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2495 struct device *dev = &cfg->dev->dev;
2497 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2499 switch (state) {
2500 case pci_channel_io_frozen:
2501 cfg->state = STATE_RESET;
2502 scsi_block_requests(cfg->host);
2503 drain_ioctls(cfg);
2504 rc = cxlflash_mark_contexts_error(cfg);
2505 if (unlikely(rc))
2506 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2507 __func__, rc);
2508 term_mc(cfg, UNDO_START);
2509 stop_afu(cfg);
2510 return PCI_ERS_RESULT_NEED_RESET;
2511 case pci_channel_io_perm_failure:
2512 cfg->state = STATE_FAILTERM;
2513 wake_up_all(&cfg->reset_waitq);
2514 scsi_unblock_requests(cfg->host);
2515 return PCI_ERS_RESULT_DISCONNECT;
2516 default:
2517 break;
2519 return PCI_ERS_RESULT_NEED_RESET;
2523 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2524 * @pdev: PCI device struct.
2526 * This routine is called by the pci error recovery code after the PCI
2527 * slot has been reset, just before we should resume normal operations.
2529 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2531 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2533 int rc = 0;
2534 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2535 struct device *dev = &cfg->dev->dev;
2537 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2539 rc = init_afu(cfg);
2540 if (unlikely(rc)) {
2541 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2542 return PCI_ERS_RESULT_DISCONNECT;
2545 return PCI_ERS_RESULT_RECOVERED;
2549 * cxlflash_pci_resume() - called when normal operation can resume
2550 * @pdev: PCI device struct
2552 static void cxlflash_pci_resume(struct pci_dev *pdev)
2554 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2555 struct device *dev = &cfg->dev->dev;
2557 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2559 cfg->state = STATE_NORMAL;
2560 wake_up_all(&cfg->reset_waitq);
2561 scsi_unblock_requests(cfg->host);
2564 static const struct pci_error_handlers cxlflash_err_handler = {
2565 .error_detected = cxlflash_pci_error_detected,
2566 .slot_reset = cxlflash_pci_slot_reset,
2567 .resume = cxlflash_pci_resume,
2571 * PCI device structure
2573 static struct pci_driver cxlflash_driver = {
2574 .name = CXLFLASH_NAME,
2575 .id_table = cxlflash_pci_table,
2576 .probe = cxlflash_probe,
2577 .remove = cxlflash_remove,
2578 .err_handler = &cxlflash_err_handler,
2582 * init_cxlflash() - module entry point
2584 * Return: 0 on success, -errno on failure
2586 static int __init init_cxlflash(void)
2588 pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
2589 __func__, CXLFLASH_DRIVER_DATE);
2591 cxlflash_list_init();
2593 return pci_register_driver(&cxlflash_driver);
2597 * exit_cxlflash() - module exit point
2599 static void __exit exit_cxlflash(void)
2601 cxlflash_term_global_luns();
2602 cxlflash_free_errpage();
2604 pci_unregister_driver(&cxlflash_driver);
2607 module_init(init_cxlflash);
2608 module_exit(exit_cxlflash);