1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * CXL Flash Device Driver
5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
6 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
8 * Copyright (C) 2015 IBM Corporation
11 #include <linux/delay.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
16 #include <linux/unaligned.h>
18 #include <scsi/scsi_cmnd.h>
19 #include <scsi/scsi_host.h>
20 #include <uapi/scsi/cxlflash_ioctl.h>
26 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME
);
27 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
28 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
29 MODULE_LICENSE("GPL");
31 static char *cxlflash_devnode(const struct device
*dev
, umode_t
*mode
);
32 static const struct class cxlflash_class
= {
34 .devnode
= cxlflash_devnode
,
37 static u32 cxlflash_major
;
38 static DECLARE_BITMAP(cxlflash_minor
, CXLFLASH_MAX_ADAPTERS
);
41 * process_cmd_err() - command error handler
42 * @cmd: AFU command that experienced the error.
43 * @scp: SCSI command associated with the AFU command in error.
45 * Translates error bits from AFU command to SCSI command results.
47 static void process_cmd_err(struct afu_cmd
*cmd
, struct scsi_cmnd
*scp
)
49 struct afu
*afu
= cmd
->parent
;
50 struct cxlflash_cfg
*cfg
= afu
->parent
;
51 struct device
*dev
= &cfg
->dev
->dev
;
52 struct sisl_ioasa
*ioasa
;
57 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_UNDERRUN
) {
59 scsi_set_resid(scp
, resid
);
60 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
61 __func__
, cmd
, scp
, resid
);
64 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
) {
65 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p\n",
67 scp
->result
= (DID_ERROR
<< 16);
70 dev_dbg(dev
, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
71 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__
,
72 ioasa
->rc
.afu_rc
, ioasa
->rc
.scsi_rc
, ioasa
->rc
.fc_rc
,
73 ioasa
->afu_extra
, ioasa
->scsi_extra
, ioasa
->fc_extra
);
75 if (ioasa
->rc
.scsi_rc
) {
76 /* We have a SCSI status */
77 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_SENSE_VALID
) {
78 memcpy(scp
->sense_buffer
, ioasa
->sense_data
,
80 scp
->result
= ioasa
->rc
.scsi_rc
;
82 scp
->result
= ioasa
->rc
.scsi_rc
| (DID_ERROR
<< 16);
86 * We encountered an error. Set scp->result based on nature
89 if (ioasa
->rc
.fc_rc
) {
90 /* We have an FC status */
91 switch (ioasa
->rc
.fc_rc
) {
92 case SISL_FC_RC_LINKDOWN
:
93 scp
->result
= (DID_REQUEUE
<< 16);
95 case SISL_FC_RC_RESID
:
96 /* This indicates an FCP resid underrun */
97 if (!(ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
)) {
98 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
99 * then we will handle this error else where.
100 * If not then we must handle it here.
101 * This is probably an AFU bug.
103 scp
->result
= (DID_ERROR
<< 16);
106 case SISL_FC_RC_RESIDERR
:
107 /* Resid mismatch between adapter and device */
108 case SISL_FC_RC_TGTABORT
:
109 case SISL_FC_RC_ABORTOK
:
110 case SISL_FC_RC_ABORTFAIL
:
111 case SISL_FC_RC_NOLOGI
:
112 case SISL_FC_RC_ABORTPEND
:
113 case SISL_FC_RC_WRABORTPEND
:
114 case SISL_FC_RC_NOEXP
:
115 case SISL_FC_RC_INUSE
:
116 scp
->result
= (DID_ERROR
<< 16);
121 if (ioasa
->rc
.afu_rc
) {
122 /* We have an AFU error */
123 switch (ioasa
->rc
.afu_rc
) {
124 case SISL_AFU_RC_NO_CHANNELS
:
125 scp
->result
= (DID_NO_CONNECT
<< 16);
127 case SISL_AFU_RC_DATA_DMA_ERR
:
128 switch (ioasa
->afu_extra
) {
129 case SISL_AFU_DMA_ERR_PAGE_IN
:
131 scp
->result
= (DID_IMM_RETRY
<< 16);
133 case SISL_AFU_DMA_ERR_INVALID_EA
:
135 scp
->result
= (DID_ERROR
<< 16);
138 case SISL_AFU_RC_OUT_OF_DATA_BUFS
:
140 scp
->result
= (DID_ERROR
<< 16);
143 scp
->result
= (DID_ERROR
<< 16);
149 * cmd_complete() - command completion handler
150 * @cmd: AFU command that has completed.
152 * For SCSI commands this routine prepares and submits commands that have
153 * either completed or timed out to the SCSI stack. For internal commands
154 * (TMF or AFU), this routine simply notifies the originator that the
155 * command has completed.
157 static void cmd_complete(struct afu_cmd
*cmd
)
159 struct scsi_cmnd
*scp
;
161 struct afu
*afu
= cmd
->parent
;
162 struct cxlflash_cfg
*cfg
= afu
->parent
;
163 struct device
*dev
= &cfg
->dev
->dev
;
164 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
166 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
167 list_del(&cmd
->list
);
168 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
172 if (unlikely(cmd
->sa
.ioasc
))
173 process_cmd_err(cmd
, scp
);
175 scp
->result
= (DID_OK
<< 16);
177 dev_dbg_ratelimited(dev
, "%s:scp=%p result=%08x ioasc=%08x\n",
178 __func__
, scp
, scp
->result
, cmd
->sa
.ioasc
);
180 } else if (cmd
->cmd_tmf
) {
181 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
182 cfg
->tmf_active
= false;
183 wake_up_all_locked(&cfg
->tmf_waitq
);
184 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
186 complete(&cmd
->cevent
);
190 * flush_pending_cmds() - flush all pending commands on this hardware queue
191 * @hwq: Hardware queue to flush.
193 * The hardware send queue lock associated with this hardware queue must be
194 * held when calling this routine.
196 static void flush_pending_cmds(struct hwq
*hwq
)
198 struct cxlflash_cfg
*cfg
= hwq
->afu
->parent
;
199 struct afu_cmd
*cmd
, *tmp
;
200 struct scsi_cmnd
*scp
;
203 list_for_each_entry_safe(cmd
, tmp
, &hwq
->pending_cmds
, list
) {
204 /* Bypass command when on a doneq, cmd_complete() will handle */
205 if (!list_empty(&cmd
->queue
))
208 list_del(&cmd
->list
);
212 scp
->result
= (DID_IMM_RETRY
<< 16);
215 cmd
->cmd_aborted
= true;
218 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
219 cfg
->tmf_active
= false;
220 wake_up_all_locked(&cfg
->tmf_waitq
);
221 spin_unlock_irqrestore(&cfg
->tmf_slock
,
224 complete(&cmd
->cevent
);
230 * context_reset() - reset context via specified register
231 * @hwq: Hardware queue owning the context to be reset.
232 * @reset_reg: MMIO register to perform reset.
234 * When the reset is successful, the SISLite specification guarantees that
235 * the AFU has aborted all currently pending I/O. Accordingly, these commands
238 * Return: 0 on success, -errno on failure
240 static int context_reset(struct hwq
*hwq
, __be64 __iomem
*reset_reg
)
242 struct cxlflash_cfg
*cfg
= hwq
->afu
->parent
;
243 struct device
*dev
= &cfg
->dev
->dev
;
249 dev_dbg(dev
, "%s: hwq=%p\n", __func__
, hwq
);
251 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
253 writeq_be(val
, reset_reg
);
255 val
= readq_be(reset_reg
);
256 if ((val
& 0x1) == 0x0) {
261 /* Double delay each time */
263 } while (nretry
++ < MC_ROOM_RETRY_CNT
);
266 flush_pending_cmds(hwq
);
268 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
270 dev_dbg(dev
, "%s: returning rc=%d, val=%016llx nretry=%d\n",
271 __func__
, rc
, val
, nretry
);
276 * context_reset_ioarrin() - reset context via IOARRIN register
277 * @hwq: Hardware queue owning the context to be reset.
279 * Return: 0 on success, -errno on failure
281 static int context_reset_ioarrin(struct hwq
*hwq
)
283 return context_reset(hwq
, &hwq
->host_map
->ioarrin
);
287 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
288 * @hwq: Hardware queue owning the context to be reset.
290 * Return: 0 on success, -errno on failure
292 static int context_reset_sq(struct hwq
*hwq
)
294 return context_reset(hwq
, &hwq
->host_map
->sq_ctx_reset
);
298 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
299 * @afu: AFU associated with the host.
300 * @cmd: AFU command to send.
303 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
305 static int send_cmd_ioarrin(struct afu
*afu
, struct afu_cmd
*cmd
)
307 struct cxlflash_cfg
*cfg
= afu
->parent
;
308 struct device
*dev
= &cfg
->dev
->dev
;
309 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
315 * To avoid the performance penalty of MMIO, spread the update of
316 * 'room' over multiple commands.
318 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
319 if (--hwq
->room
< 0) {
320 room
= readq_be(&hwq
->host_map
->cmd_room
);
322 dev_dbg_ratelimited(dev
, "%s: no cmd_room to send "
323 "0x%02X, room=0x%016llX\n",
324 __func__
, cmd
->rcb
.cdb
[0], room
);
326 rc
= SCSI_MLQUEUE_HOST_BUSY
;
329 hwq
->room
= room
- 1;
332 list_add(&cmd
->list
, &hwq
->pending_cmds
);
333 writeq_be((u64
)&cmd
->rcb
, &hwq
->host_map
->ioarrin
);
335 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
336 dev_dbg_ratelimited(dev
, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
337 __func__
, cmd
, cmd
->rcb
.data_len
, cmd
->rcb
.data_ea
, rc
);
342 * send_cmd_sq() - sends an AFU command via SQ ring
343 * @afu: AFU associated with the host.
344 * @cmd: AFU command to send.
347 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
349 static int send_cmd_sq(struct afu
*afu
, struct afu_cmd
*cmd
)
351 struct cxlflash_cfg
*cfg
= afu
->parent
;
352 struct device
*dev
= &cfg
->dev
->dev
;
353 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
358 newval
= atomic_dec_if_positive(&hwq
->hsq_credits
);
360 rc
= SCSI_MLQUEUE_HOST_BUSY
;
364 cmd
->rcb
.ioasa
= &cmd
->sa
;
366 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
368 *hwq
->hsq_curr
= cmd
->rcb
;
369 if (hwq
->hsq_curr
< hwq
->hsq_end
)
372 hwq
->hsq_curr
= hwq
->hsq_start
;
374 list_add(&cmd
->list
, &hwq
->pending_cmds
);
375 writeq_be((u64
)hwq
->hsq_curr
, &hwq
->host_map
->sq_tail
);
377 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
379 dev_dbg(dev
, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
380 "head=%016llx tail=%016llx\n", __func__
, cmd
, cmd
->rcb
.data_len
,
381 cmd
->rcb
.data_ea
, cmd
->rcb
.ioasa
, rc
, hwq
->hsq_curr
,
382 readq_be(&hwq
->host_map
->sq_head
),
383 readq_be(&hwq
->host_map
->sq_tail
));
388 * wait_resp() - polls for a response or timeout to a sent AFU command
389 * @afu: AFU associated with the host.
390 * @cmd: AFU command that was sent.
392 * Return: 0 on success, -errno on failure
394 static int wait_resp(struct afu
*afu
, struct afu_cmd
*cmd
)
396 struct cxlflash_cfg
*cfg
= afu
->parent
;
397 struct device
*dev
= &cfg
->dev
->dev
;
399 ulong timeout
= msecs_to_jiffies(cmd
->rcb
.timeout
* 2 * 1000);
401 timeout
= wait_for_completion_timeout(&cmd
->cevent
, timeout
);
405 if (cmd
->cmd_aborted
)
408 if (unlikely(cmd
->sa
.ioasc
!= 0)) {
409 dev_err(dev
, "%s: cmd %02x failed, ioasc=%08x\n",
410 __func__
, cmd
->rcb
.cdb
[0], cmd
->sa
.ioasc
);
418 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
419 * @host: SCSI host associated with device.
420 * @scp: SCSI command to send.
421 * @afu: SCSI command to send.
423 * Hashes a command based upon the hardware queue mode.
425 * Return: Trusted index of target hardware queue
427 static u32
cmd_to_target_hwq(struct Scsi_Host
*host
, struct scsi_cmnd
*scp
,
433 if (afu
->num_hwqs
== 1)
436 switch (afu
->hwq_mode
) {
438 hwq
= afu
->hwq_rr_count
++ % afu
->num_hwqs
;
441 tag
= blk_mq_unique_tag(scsi_cmd_to_rq(scp
));
442 hwq
= blk_mq_unique_tag_to_hwq(tag
);
445 hwq
= smp_processor_id() % afu
->num_hwqs
;
455 * send_tmf() - sends a Task Management Function (TMF)
456 * @cfg: Internal structure associated with the host.
457 * @sdev: SCSI device destined for TMF.
458 * @tmfcmd: TMF command to send.
461 * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
463 static int send_tmf(struct cxlflash_cfg
*cfg
, struct scsi_device
*sdev
,
466 struct afu
*afu
= cfg
->afu
;
467 struct afu_cmd
*cmd
= NULL
;
468 struct device
*dev
= &cfg
->dev
->dev
;
469 struct hwq
*hwq
= get_hwq(afu
, PRIMARY_HWQ
);
470 bool needs_deletion
= false;
476 buf
= kzalloc(sizeof(*cmd
) + __alignof__(*cmd
) - 1, GFP_KERNEL
);
477 if (unlikely(!buf
)) {
478 dev_err(dev
, "%s: no memory for command\n", __func__
);
483 cmd
= (struct afu_cmd
*)PTR_ALIGN(buf
, __alignof__(*cmd
));
484 INIT_LIST_HEAD(&cmd
->queue
);
486 /* When Task Management Function is active do not send another */
487 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
489 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
492 cfg
->tmf_active
= true;
493 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
497 cmd
->hwq_index
= hwq
->index
;
499 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
500 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
501 cmd
->rcb
.port_sel
= CHAN2PORTMASK(sdev
->channel
);
502 cmd
->rcb
.lun_id
= lun_to_lunid(sdev
->lun
);
503 cmd
->rcb
.req_flags
= (SISL_REQ_FLAGS_PORT_LUN_ID
|
504 SISL_REQ_FLAGS_SUP_UNDERRUN
|
505 SISL_REQ_FLAGS_TMF_CMD
);
506 memcpy(cmd
->rcb
.cdb
, &tmfcmd
, sizeof(tmfcmd
));
508 rc
= afu
->send_cmd(afu
, cmd
);
510 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
511 cfg
->tmf_active
= false;
512 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
516 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
517 to
= msecs_to_jiffies(5000);
518 to
= wait_event_interruptible_lock_irq_timeout(cfg
->tmf_waitq
,
523 dev_err(dev
, "%s: TMF timed out\n", __func__
);
525 needs_deletion
= true;
526 } else if (cmd
->cmd_aborted
) {
527 dev_err(dev
, "%s: TMF aborted\n", __func__
);
529 } else if (cmd
->sa
.ioasc
) {
530 dev_err(dev
, "%s: TMF failed ioasc=%08x\n",
531 __func__
, cmd
->sa
.ioasc
);
534 cfg
->tmf_active
= false;
535 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
537 if (needs_deletion
) {
538 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
539 list_del(&cmd
->list
);
540 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
548 * cxlflash_driver_info() - information handler for this host driver
549 * @host: SCSI host associated with device.
551 * Return: A string describing the device.
553 static const char *cxlflash_driver_info(struct Scsi_Host
*host
)
555 return CXLFLASH_ADAPTER_NAME
;
559 * cxlflash_queuecommand() - sends a mid-layer request
560 * @host: SCSI host associated with device.
561 * @scp: SCSI command to send.
563 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
565 static int cxlflash_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*scp
)
567 struct cxlflash_cfg
*cfg
= shost_priv(host
);
568 struct afu
*afu
= cfg
->afu
;
569 struct device
*dev
= &cfg
->dev
->dev
;
570 struct afu_cmd
*cmd
= sc_to_afuci(scp
);
571 struct scatterlist
*sg
= scsi_sglist(scp
);
572 int hwq_index
= cmd_to_target_hwq(host
, scp
, afu
);
573 struct hwq
*hwq
= get_hwq(afu
, hwq_index
);
574 u16 req_flags
= SISL_REQ_FLAGS_SUP_UNDERRUN
;
578 dev_dbg_ratelimited(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
579 "cdb=(%08x-%08x-%08x-%08x)\n",
580 __func__
, scp
, host
->host_no
, scp
->device
->channel
,
581 scp
->device
->id
, scp
->device
->lun
,
582 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
583 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
584 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
585 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
588 * If a Task Management Function is active, wait for it to complete
589 * before continuing with regular commands.
591 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
592 if (cfg
->tmf_active
) {
593 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
594 rc
= SCSI_MLQUEUE_HOST_BUSY
;
597 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
599 switch (cfg
->state
) {
603 dev_dbg_ratelimited(dev
, "%s: device is in reset\n", __func__
);
604 rc
= SCSI_MLQUEUE_HOST_BUSY
;
607 dev_dbg_ratelimited(dev
, "%s: device has failed\n", __func__
);
608 scp
->result
= (DID_NO_CONNECT
<< 16);
613 atomic_inc(&afu
->cmds_active
);
618 cmd
->rcb
.data_len
= sg
->length
;
619 cmd
->rcb
.data_ea
= (uintptr_t)sg_virt(sg
);
624 cmd
->hwq_index
= hwq_index
;
627 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
628 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
629 cmd
->rcb
.port_sel
= CHAN2PORTMASK(scp
->device
->channel
);
630 cmd
->rcb
.lun_id
= lun_to_lunid(scp
->device
->lun
);
632 if (scp
->sc_data_direction
== DMA_TO_DEVICE
)
633 req_flags
|= SISL_REQ_FLAGS_HOST_WRITE
;
635 cmd
->rcb
.req_flags
= req_flags
;
636 memcpy(cmd
->rcb
.cdb
, scp
->cmnd
, sizeof(cmd
->rcb
.cdb
));
638 rc
= afu
->send_cmd(afu
, cmd
);
639 atomic_dec(&afu
->cmds_active
);
645 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
646 * @cfg: Internal structure associated with the host.
648 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg
*cfg
)
650 struct pci_dev
*pdev
= cfg
->dev
;
652 if (pci_channel_offline(pdev
))
653 wait_event_timeout(cfg
->reset_waitq
,
654 !pci_channel_offline(pdev
),
655 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT
);
659 * free_mem() - free memory associated with the AFU
660 * @cfg: Internal structure associated with the host.
662 static void free_mem(struct cxlflash_cfg
*cfg
)
664 struct afu
*afu
= cfg
->afu
;
667 free_pages((ulong
)afu
, get_order(sizeof(struct afu
)));
673 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
674 * @cfg: Internal structure associated with the host.
676 static void cxlflash_reset_sync(struct cxlflash_cfg
*cfg
)
678 if (cfg
->async_reset_cookie
== 0)
681 /* Wait until all async calls prior to this cookie have completed */
682 async_synchronize_cookie(cfg
->async_reset_cookie
+ 1);
683 cfg
->async_reset_cookie
= 0;
687 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
688 * @cfg: Internal structure associated with the host.
690 * Safe to call with AFU in a partially allocated/initialized state.
692 * Cancels scheduled worker threads, waits for any active internal AFU
693 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
695 static void stop_afu(struct cxlflash_cfg
*cfg
)
697 struct afu
*afu
= cfg
->afu
;
701 cancel_work_sync(&cfg
->work_q
);
702 if (!current_is_async())
703 cxlflash_reset_sync(cfg
);
706 while (atomic_read(&afu
->cmds_active
))
709 if (afu_is_irqpoll_enabled(afu
)) {
710 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
711 hwq
= get_hwq(afu
, i
);
713 irq_poll_disable(&hwq
->irqpoll
);
717 if (likely(afu
->afu_map
)) {
718 cfg
->ops
->psa_unmap(afu
->afu_map
);
725 * term_intr() - disables all AFU interrupts
726 * @cfg: Internal structure associated with the host.
727 * @level: Depth of allocation, where to begin waterfall tear down.
728 * @index: Index of the hardware queue.
730 * Safe to call with AFU/MC in partially allocated/initialized state.
732 static void term_intr(struct cxlflash_cfg
*cfg
, enum undo_level level
,
735 struct afu
*afu
= cfg
->afu
;
736 struct device
*dev
= &cfg
->dev
->dev
;
740 dev_err(dev
, "%s: returning with NULL afu\n", __func__
);
744 hwq
= get_hwq(afu
, index
);
746 if (!hwq
->ctx_cookie
) {
747 dev_err(dev
, "%s: returning with NULL MC\n", __func__
);
753 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
754 if (index
== PRIMARY_HWQ
)
755 cfg
->ops
->unmap_afu_irq(hwq
->ctx_cookie
, 3, hwq
);
758 cfg
->ops
->unmap_afu_irq(hwq
->ctx_cookie
, 2, hwq
);
761 cfg
->ops
->unmap_afu_irq(hwq
->ctx_cookie
, 1, hwq
);
764 cfg
->ops
->free_afu_irqs(hwq
->ctx_cookie
);
767 /* No action required */
773 * term_mc() - terminates the master context
774 * @cfg: Internal structure associated with the host.
775 * @index: Index of the hardware queue.
777 * Safe to call with AFU/MC in partially allocated/initialized state.
779 static void term_mc(struct cxlflash_cfg
*cfg
, u32 index
)
781 struct afu
*afu
= cfg
->afu
;
782 struct device
*dev
= &cfg
->dev
->dev
;
787 dev_err(dev
, "%s: returning with NULL afu\n", __func__
);
791 hwq
= get_hwq(afu
, index
);
793 if (!hwq
->ctx_cookie
) {
794 dev_err(dev
, "%s: returning with NULL MC\n", __func__
);
798 WARN_ON(cfg
->ops
->stop_context(hwq
->ctx_cookie
));
799 if (index
!= PRIMARY_HWQ
)
800 WARN_ON(cfg
->ops
->release_context(hwq
->ctx_cookie
));
801 hwq
->ctx_cookie
= NULL
;
803 spin_lock_irqsave(&hwq
->hrrq_slock
, lock_flags
);
804 hwq
->hrrq_online
= false;
805 spin_unlock_irqrestore(&hwq
->hrrq_slock
, lock_flags
);
807 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
808 flush_pending_cmds(hwq
);
809 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
813 * term_afu() - terminates the AFU
814 * @cfg: Internal structure associated with the host.
816 * Safe to call with AFU/MC in partially allocated/initialized state.
818 static void term_afu(struct cxlflash_cfg
*cfg
)
820 struct device
*dev
= &cfg
->dev
->dev
;
824 * Tear down is carefully orchestrated to ensure
825 * no interrupts can come in when the problem state
828 * 1) Disable all AFU interrupts for each master
829 * 2) Unmap the problem state area
830 * 3) Stop each master context
832 for (k
= cfg
->afu
->num_hwqs
- 1; k
>= 0; k
--)
833 term_intr(cfg
, UNMAP_THREE
, k
);
837 for (k
= cfg
->afu
->num_hwqs
- 1; k
>= 0; k
--)
840 dev_dbg(dev
, "%s: returning\n", __func__
);
844 * notify_shutdown() - notifies device of pending shutdown
845 * @cfg: Internal structure associated with the host.
846 * @wait: Whether to wait for shutdown processing to complete.
848 * This function will notify the AFU that the adapter is being shutdown
849 * and will wait for shutdown processing to complete if wait is true.
850 * This notification should flush pending I/Os to the device and halt
851 * further I/Os until the next AFU reset is issued and device restarted.
853 static void notify_shutdown(struct cxlflash_cfg
*cfg
, bool wait
)
855 struct afu
*afu
= cfg
->afu
;
856 struct device
*dev
= &cfg
->dev
->dev
;
857 struct dev_dependent_vals
*ddv
;
858 __be64 __iomem
*fc_port_regs
;
860 int i
, retry_cnt
= 0;
862 ddv
= (struct dev_dependent_vals
*)cfg
->dev_id
->driver_data
;
863 if (!(ddv
->flags
& CXLFLASH_NOTIFY_SHUTDOWN
))
866 if (!afu
|| !afu
->afu_map
) {
867 dev_dbg(dev
, "%s: Problem state area not mapped\n", __func__
);
872 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
873 fc_port_regs
= get_fc_port_regs(cfg
, i
);
875 reg
= readq_be(&fc_port_regs
[FC_CONFIG2
/ 8]);
876 reg
|= SISL_FC_SHUTDOWN_NORMAL
;
877 writeq_be(reg
, &fc_port_regs
[FC_CONFIG2
/ 8]);
883 /* Wait up to 1.5 seconds for shutdown processing to complete */
884 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
885 fc_port_regs
= get_fc_port_regs(cfg
, i
);
889 status
= readq_be(&fc_port_regs
[FC_STATUS
/ 8]);
890 if (status
& SISL_STATUS_SHUTDOWN_COMPLETE
)
892 if (++retry_cnt
>= MC_RETRY_CNT
) {
893 dev_dbg(dev
, "%s: port %d shutdown processing "
894 "not yet completed\n", __func__
, i
);
897 msleep(100 * retry_cnt
);
903 * cxlflash_get_minor() - gets the first available minor number
905 * Return: Unique minor number that can be used to create the character device.
907 static int cxlflash_get_minor(void)
912 bit
= find_first_zero_bit(cxlflash_minor
, CXLFLASH_MAX_ADAPTERS
);
913 if (bit
>= CXLFLASH_MAX_ADAPTERS
)
916 minor
= bit
& MINORMASK
;
917 set_bit(minor
, cxlflash_minor
);
922 * cxlflash_put_minor() - releases the minor number
923 * @minor: Minor number that is no longer needed.
925 static void cxlflash_put_minor(int minor
)
927 clear_bit(minor
, cxlflash_minor
);
931 * cxlflash_release_chrdev() - release the character device for the host
932 * @cfg: Internal structure associated with the host.
934 static void cxlflash_release_chrdev(struct cxlflash_cfg
*cfg
)
936 device_unregister(cfg
->chardev
);
938 cdev_del(&cfg
->cdev
);
939 cxlflash_put_minor(MINOR(cfg
->cdev
.dev
));
943 * cxlflash_remove() - PCI entry point to tear down host
944 * @pdev: PCI device associated with the host.
946 * Safe to use as a cleanup in partially allocated/initialized state. Note that
947 * the reset_waitq is flushed as part of the stop/termination of user contexts.
949 static void cxlflash_remove(struct pci_dev
*pdev
)
951 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
952 struct device
*dev
= &pdev
->dev
;
955 if (!pci_is_enabled(pdev
)) {
956 dev_dbg(dev
, "%s: Device is disabled\n", __func__
);
960 /* Yield to running recovery threads before continuing with remove */
961 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
&&
962 cfg
->state
!= STATE_PROBING
);
963 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
965 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
968 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
970 /* Notify AFU and wait for shutdown processing to complete */
971 notify_shutdown(cfg
, true);
973 cfg
->state
= STATE_FAILTERM
;
974 cxlflash_stop_term_user_contexts(cfg
);
976 switch (cfg
->init_state
) {
977 case INIT_STATE_CDEV
:
978 cxlflash_release_chrdev(cfg
);
980 case INIT_STATE_SCSI
:
981 cxlflash_term_local_luns(cfg
);
982 scsi_remove_host(cfg
->host
);
988 cfg
->ops
->destroy_afu(cfg
->afu_cookie
);
989 pci_disable_device(pdev
);
991 case INIT_STATE_NONE
:
993 scsi_host_put(cfg
->host
);
997 dev_dbg(dev
, "%s: returning\n", __func__
);
1001 * alloc_mem() - allocates the AFU and its command pool
1002 * @cfg: Internal structure associated with the host.
1004 * A partially allocated state remains on failure.
1008 * -ENOMEM on failure to allocate memory
1010 static int alloc_mem(struct cxlflash_cfg
*cfg
)
1013 struct device
*dev
= &cfg
->dev
->dev
;
1015 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1016 cfg
->afu
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1017 get_order(sizeof(struct afu
)));
1018 if (unlikely(!cfg
->afu
)) {
1019 dev_err(dev
, "%s: cannot get %d free pages\n",
1020 __func__
, get_order(sizeof(struct afu
)));
1024 cfg
->afu
->parent
= cfg
;
1025 cfg
->afu
->desired_hwqs
= CXLFLASH_DEF_HWQS
;
1026 cfg
->afu
->afu_map
= NULL
;
1032 * init_pci() - initializes the host as a PCI device
1033 * @cfg: Internal structure associated with the host.
1035 * Return: 0 on success, -errno on failure
1037 static int init_pci(struct cxlflash_cfg
*cfg
)
1039 struct pci_dev
*pdev
= cfg
->dev
;
1040 struct device
*dev
= &cfg
->dev
->dev
;
1043 rc
= pci_enable_device(pdev
);
1044 if (rc
|| pci_channel_offline(pdev
)) {
1045 if (pci_channel_offline(pdev
)) {
1046 cxlflash_wait_for_pci_err_recovery(cfg
);
1047 rc
= pci_enable_device(pdev
);
1051 dev_err(dev
, "%s: Cannot enable adapter\n", __func__
);
1052 cxlflash_wait_for_pci_err_recovery(cfg
);
1058 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1063 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1064 * @cfg: Internal structure associated with the host.
1066 * Return: 0 on success, -errno on failure
1068 static int init_scsi(struct cxlflash_cfg
*cfg
)
1070 struct pci_dev
*pdev
= cfg
->dev
;
1071 struct device
*dev
= &cfg
->dev
->dev
;
1074 rc
= scsi_add_host(cfg
->host
, &pdev
->dev
);
1076 dev_err(dev
, "%s: scsi_add_host failed rc=%d\n", __func__
, rc
);
1080 scsi_scan_host(cfg
->host
);
1083 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1088 * set_port_online() - transitions the specified host FC port to online state
1089 * @fc_regs: Top of MMIO region defined for specified port.
1091 * The provided MMIO region must be mapped prior to call. Online state means
1092 * that the FC link layer has synced, completed the handshaking process, and
1093 * is ready for login to start.
1095 static void set_port_online(__be64 __iomem
*fc_regs
)
1099 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1100 cmdcfg
&= (~FC_MTIP_CMDCONFIG_OFFLINE
); /* clear OFF_LINE */
1101 cmdcfg
|= (FC_MTIP_CMDCONFIG_ONLINE
); /* set ON_LINE */
1102 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1106 * set_port_offline() - transitions the specified host FC port to offline state
1107 * @fc_regs: Top of MMIO region defined for specified port.
1109 * The provided MMIO region must be mapped prior to call.
1111 static void set_port_offline(__be64 __iomem
*fc_regs
)
1115 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1116 cmdcfg
&= (~FC_MTIP_CMDCONFIG_ONLINE
); /* clear ON_LINE */
1117 cmdcfg
|= (FC_MTIP_CMDCONFIG_OFFLINE
); /* set OFF_LINE */
1118 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1122 * wait_port_online() - waits for the specified host FC port come online
1123 * @fc_regs: Top of MMIO region defined for specified port.
1124 * @delay_us: Number of microseconds to delay between reading port status.
1125 * @nretry: Number of cycles to retry reading port status.
1127 * The provided MMIO region must be mapped prior to call. This will timeout
1128 * when the cable is not plugged in.
1131 * TRUE (1) when the specified port is online
1132 * FALSE (0) when the specified port fails to come online after timeout
1134 static bool wait_port_online(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
1138 WARN_ON(delay_us
< 1000);
1141 msleep(delay_us
/ 1000);
1142 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
1143 if (status
== U64_MAX
)
1145 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_ONLINE
&&
1148 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_ONLINE
);
1152 * wait_port_offline() - waits for the specified host FC port go offline
1153 * @fc_regs: Top of MMIO region defined for specified port.
1154 * @delay_us: Number of microseconds to delay between reading port status.
1155 * @nretry: Number of cycles to retry reading port status.
1157 * The provided MMIO region must be mapped prior to call.
1160 * TRUE (1) when the specified port is offline
1161 * FALSE (0) when the specified port fails to go offline after timeout
1163 static bool wait_port_offline(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
1167 WARN_ON(delay_us
< 1000);
1170 msleep(delay_us
/ 1000);
1171 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
1172 if (status
== U64_MAX
)
1174 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_OFFLINE
&&
1177 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_OFFLINE
);
1181 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1182 * @afu: AFU associated with the host that owns the specified FC port.
1183 * @port: Port number being configured.
1184 * @fc_regs: Top of MMIO region defined for specified port.
1185 * @wwpn: The world-wide-port-number previously discovered for port.
1187 * The provided MMIO region must be mapped prior to call. As part of the
1188 * sequence to configure the WWPN, the port is toggled offline and then back
1189 * online. This toggling action can cause this routine to delay up to a few
1190 * seconds. When configured to use the internal LUN feature of the AFU, a
1191 * failure to come online is overridden.
1193 static void afu_set_wwpn(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
,
1196 struct cxlflash_cfg
*cfg
= afu
->parent
;
1197 struct device
*dev
= &cfg
->dev
->dev
;
1199 set_port_offline(fc_regs
);
1200 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1201 FC_PORT_STATUS_RETRY_CNT
)) {
1202 dev_dbg(dev
, "%s: wait on port %d to go offline timed out\n",
1206 writeq_be(wwpn
, &fc_regs
[FC_PNAME
/ 8]);
1208 set_port_online(fc_regs
);
1209 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1210 FC_PORT_STATUS_RETRY_CNT
)) {
1211 dev_dbg(dev
, "%s: wait on port %d to go online timed out\n",
1217 * afu_link_reset() - resets the specified host FC port
1218 * @afu: AFU associated with the host that owns the specified FC port.
1219 * @port: Port number being configured.
1220 * @fc_regs: Top of MMIO region defined for specified port.
1222 * The provided MMIO region must be mapped prior to call. The sequence to
1223 * reset the port involves toggling it offline and then back online. This
1224 * action can cause this routine to delay up to a few seconds. An effort
1225 * is made to maintain link with the device by switching to host to use
1226 * the alternate port exclusively while the reset takes place.
1227 * failure to come online is overridden.
1229 static void afu_link_reset(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
)
1231 struct cxlflash_cfg
*cfg
= afu
->parent
;
1232 struct device
*dev
= &cfg
->dev
->dev
;
1235 /* first switch the AFU to the other links, if any */
1236 port_sel
= readq_be(&afu
->afu_map
->global
.regs
.afu_port_sel
);
1237 port_sel
&= ~(1ULL << port
);
1238 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1239 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1241 set_port_offline(fc_regs
);
1242 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1243 FC_PORT_STATUS_RETRY_CNT
))
1244 dev_err(dev
, "%s: wait on port %d to go offline timed out\n",
1247 set_port_online(fc_regs
);
1248 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1249 FC_PORT_STATUS_RETRY_CNT
))
1250 dev_err(dev
, "%s: wait on port %d to go online timed out\n",
1253 /* switch back to include this port */
1254 port_sel
|= (1ULL << port
);
1255 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1256 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1258 dev_dbg(dev
, "%s: returning port_sel=%016llx\n", __func__
, port_sel
);
1262 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1263 * @afu: AFU associated with the host.
1265 static void afu_err_intr_init(struct afu
*afu
)
1267 struct cxlflash_cfg
*cfg
= afu
->parent
;
1268 __be64 __iomem
*fc_port_regs
;
1270 struct hwq
*hwq
= get_hwq(afu
, PRIMARY_HWQ
);
1273 /* global async interrupts: AFU clears afu_ctrl on context exit
1274 * if async interrupts were sent to that context. This prevents
1275 * the AFU form sending further async interrupts when
1277 * nobody to receive them.
1281 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_mask
);
1282 /* set LISN# to send and point to primary master context */
1283 reg
= ((u64
) (((hwq
->ctx_hndl
<< 8) | SISL_MSI_ASYNC_ERROR
)) << 40);
1285 if (afu
->internal_lun
)
1286 reg
|= 1; /* Bit 63 indicates local lun */
1287 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_ctrl
);
1289 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1290 /* unmask bits that are of interest */
1291 /* note: afu can send an interrupt after this step */
1292 writeq_be(SISL_ASTATUS_MASK
, &afu
->afu_map
->global
.regs
.aintr_mask
);
1293 /* clear again in case a bit came on after previous clear but before */
1295 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1297 /* Clear/Set internal lun bits */
1298 fc_port_regs
= get_fc_port_regs(cfg
, 0);
1299 reg
= readq_be(&fc_port_regs
[FC_CONFIG2
/ 8]);
1300 reg
&= SISL_FC_INTERNAL_MASK
;
1301 if (afu
->internal_lun
)
1302 reg
|= ((u64
)(afu
->internal_lun
- 1) << SISL_FC_INTERNAL_SHIFT
);
1303 writeq_be(reg
, &fc_port_regs
[FC_CONFIG2
/ 8]);
1305 /* now clear FC errors */
1306 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
1307 fc_port_regs
= get_fc_port_regs(cfg
, i
);
1309 writeq_be(0xFFFFFFFFU
, &fc_port_regs
[FC_ERROR
/ 8]);
1310 writeq_be(0, &fc_port_regs
[FC_ERRCAP
/ 8]);
1313 /* sync interrupts for master's IOARRIN write */
1314 /* note that unlike asyncs, there can be no pending sync interrupts */
1315 /* at this time (this is a fresh context and master has not written */
1316 /* IOARRIN yet), so there is nothing to clear. */
1318 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1319 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1320 hwq
= get_hwq(afu
, i
);
1322 reg
= readq_be(&hwq
->host_map
->ctx_ctrl
);
1323 WARN_ON((reg
& SISL_CTX_CTRL_LISN_MASK
) != 0);
1324 reg
|= SISL_MSI_SYNC_ERROR
;
1325 writeq_be(reg
, &hwq
->host_map
->ctx_ctrl
);
1326 writeq_be(SISL_ISTATUS_MASK
, &hwq
->host_map
->intr_mask
);
1331 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1332 * @irq: Interrupt number.
1333 * @data: Private data provided at interrupt registration, the AFU.
1335 * Return: Always return IRQ_HANDLED.
1337 static irqreturn_t
cxlflash_sync_err_irq(int irq
, void *data
)
1339 struct hwq
*hwq
= (struct hwq
*)data
;
1340 struct cxlflash_cfg
*cfg
= hwq
->afu
->parent
;
1341 struct device
*dev
= &cfg
->dev
->dev
;
1345 reg
= readq_be(&hwq
->host_map
->intr_status
);
1346 reg_unmasked
= (reg
& SISL_ISTATUS_UNMASK
);
1348 if (reg_unmasked
== 0UL) {
1349 dev_err(dev
, "%s: spurious interrupt, intr_status=%016llx\n",
1351 goto cxlflash_sync_err_irq_exit
;
1354 dev_err(dev
, "%s: unexpected interrupt, intr_status=%016llx\n",
1357 writeq_be(reg_unmasked
, &hwq
->host_map
->intr_clear
);
1359 cxlflash_sync_err_irq_exit
:
1364 * process_hrrq() - process the read-response queue
1365 * @hwq: HWQ associated with the host.
1366 * @doneq: Queue of commands harvested from the RRQ.
1367 * @budget: Threshold of RRQ entries to process.
1369 * This routine must be called holding the disabled RRQ spin lock.
1371 * Return: The number of entries processed.
1373 static int process_hrrq(struct hwq
*hwq
, struct list_head
*doneq
, int budget
)
1375 struct afu
*afu
= hwq
->afu
;
1376 struct afu_cmd
*cmd
;
1377 struct sisl_ioasa
*ioasa
;
1378 struct sisl_ioarcb
*ioarcb
;
1379 bool toggle
= hwq
->toggle
;
1382 *hrrq_start
= hwq
->hrrq_start
,
1383 *hrrq_end
= hwq
->hrrq_end
,
1384 *hrrq_curr
= hwq
->hrrq_curr
;
1386 /* Process ready RRQ entries up to the specified budget (if any) */
1390 if ((entry
& SISL_RESP_HANDLE_T_BIT
) != toggle
)
1393 entry
&= ~SISL_RESP_HANDLE_T_BIT
;
1395 if (afu_is_sq_cmd_mode(afu
)) {
1396 ioasa
= (struct sisl_ioasa
*)entry
;
1397 cmd
= container_of(ioasa
, struct afu_cmd
, sa
);
1399 ioarcb
= (struct sisl_ioarcb
*)entry
;
1400 cmd
= container_of(ioarcb
, struct afu_cmd
, rcb
);
1403 list_add_tail(&cmd
->queue
, doneq
);
1405 /* Advance to next entry or wrap and flip the toggle bit */
1406 if (hrrq_curr
< hrrq_end
)
1409 hrrq_curr
= hrrq_start
;
1410 toggle
^= SISL_RESP_HANDLE_T_BIT
;
1413 atomic_inc(&hwq
->hsq_credits
);
1416 if (budget
> 0 && num_hrrq
>= budget
)
1420 hwq
->hrrq_curr
= hrrq_curr
;
1421 hwq
->toggle
= toggle
;
1427 * process_cmd_doneq() - process a queue of harvested RRQ commands
1428 * @doneq: Queue of completed commands.
1430 * Note that upon return the queue can no longer be trusted.
1432 static void process_cmd_doneq(struct list_head
*doneq
)
1434 struct afu_cmd
*cmd
, *tmp
;
1436 WARN_ON(list_empty(doneq
));
1438 list_for_each_entry_safe(cmd
, tmp
, doneq
, queue
)
1443 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1444 * @irqpoll: IRQ poll structure associated with queue to poll.
1445 * @budget: Threshold of RRQ entries to process per poll.
1447 * Return: The number of entries processed.
1449 static int cxlflash_irqpoll(struct irq_poll
*irqpoll
, int budget
)
1451 struct hwq
*hwq
= container_of(irqpoll
, struct hwq
, irqpoll
);
1452 unsigned long hrrq_flags
;
1454 int num_entries
= 0;
1456 spin_lock_irqsave(&hwq
->hrrq_slock
, hrrq_flags
);
1458 num_entries
= process_hrrq(hwq
, &doneq
, budget
);
1459 if (num_entries
< budget
)
1460 irq_poll_complete(irqpoll
);
1462 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1464 process_cmd_doneq(&doneq
);
1469 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1470 * @irq: Interrupt number.
1471 * @data: Private data provided at interrupt registration, the AFU.
1473 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1475 static irqreturn_t
cxlflash_rrq_irq(int irq
, void *data
)
1477 struct hwq
*hwq
= (struct hwq
*)data
;
1478 struct afu
*afu
= hwq
->afu
;
1479 unsigned long hrrq_flags
;
1481 int num_entries
= 0;
1483 spin_lock_irqsave(&hwq
->hrrq_slock
, hrrq_flags
);
1485 /* Silently drop spurious interrupts when queue is not online */
1486 if (!hwq
->hrrq_online
) {
1487 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1491 if (afu_is_irqpoll_enabled(afu
)) {
1492 irq_poll_sched(&hwq
->irqpoll
);
1493 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1497 num_entries
= process_hrrq(hwq
, &doneq
, -1);
1498 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1500 if (num_entries
== 0)
1503 process_cmd_doneq(&doneq
);
1508 * Asynchronous interrupt information table
1511 * - Order matters here as this array is indexed by bit position.
1513 * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1514 * as complex and complains due to a lack of parentheses/braces.
1516 #define ASTATUS_FC(_a, _b, _c, _d) \
1517 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1519 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1520 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1521 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1522 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1523 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1524 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1525 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1526 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1527 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1529 static const struct asyc_intr_info ainfo
[] = {
1530 BUILD_SISL_ASTATUS_FC_PORT(1),
1531 BUILD_SISL_ASTATUS_FC_PORT(0),
1532 BUILD_SISL_ASTATUS_FC_PORT(3),
1533 BUILD_SISL_ASTATUS_FC_PORT(2)
1537 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1538 * @irq: Interrupt number.
1539 * @data: Private data provided at interrupt registration, the AFU.
1541 * Return: Always return IRQ_HANDLED.
1543 static irqreturn_t
cxlflash_async_err_irq(int irq
, void *data
)
1545 struct hwq
*hwq
= (struct hwq
*)data
;
1546 struct afu
*afu
= hwq
->afu
;
1547 struct cxlflash_cfg
*cfg
= afu
->parent
;
1548 struct device
*dev
= &cfg
->dev
->dev
;
1549 const struct asyc_intr_info
*info
;
1550 struct sisl_global_map __iomem
*global
= &afu
->afu_map
->global
;
1551 __be64 __iomem
*fc_port_regs
;
1557 reg
= readq_be(&global
->regs
.aintr_status
);
1558 reg_unmasked
= (reg
& SISL_ASTATUS_UNMASK
);
1560 if (unlikely(reg_unmasked
== 0)) {
1561 dev_err(dev
, "%s: spurious interrupt, aintr_status=%016llx\n",
1566 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1567 writeq_be(reg_unmasked
, &global
->regs
.aintr_clear
);
1569 /* Check each bit that is on */
1570 for_each_set_bit(bit
, (ulong
*)®_unmasked
, BITS_PER_LONG
) {
1571 if (unlikely(bit
>= ARRAY_SIZE(ainfo
))) {
1577 if (unlikely(info
->status
!= 1ULL << bit
)) {
1583 fc_port_regs
= get_fc_port_regs(cfg
, port
);
1585 dev_err(dev
, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1586 __func__
, port
, info
->desc
,
1587 readq_be(&fc_port_regs
[FC_STATUS
/ 8]));
1590 * Do link reset first, some OTHER errors will set FC_ERROR
1591 * again if cleared before or w/o a reset
1593 if (info
->action
& LINK_RESET
) {
1594 dev_err(dev
, "%s: FC Port %d: resetting link\n",
1596 cfg
->lr_state
= LINK_RESET_REQUIRED
;
1597 cfg
->lr_port
= port
;
1598 schedule_work(&cfg
->work_q
);
1601 if (info
->action
& CLR_FC_ERROR
) {
1602 reg
= readq_be(&fc_port_regs
[FC_ERROR
/ 8]);
1605 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1606 * should be the same and tracing one is sufficient.
1609 dev_err(dev
, "%s: fc %d: clearing fc_error=%016llx\n",
1610 __func__
, port
, reg
);
1612 writeq_be(reg
, &fc_port_regs
[FC_ERROR
/ 8]);
1613 writeq_be(0, &fc_port_regs
[FC_ERRCAP
/ 8]);
1616 if (info
->action
& SCAN_HOST
) {
1617 atomic_inc(&cfg
->scan_host_needed
);
1618 schedule_work(&cfg
->work_q
);
1627 * read_vpd() - obtains the WWPNs from VPD
1628 * @cfg: Internal structure associated with the host.
1629 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1631 * Return: 0 on success, -errno on failure
1633 static int read_vpd(struct cxlflash_cfg
*cfg
, u64 wwpn
[])
1635 struct device
*dev
= &cfg
->dev
->dev
;
1636 struct pci_dev
*pdev
= cfg
->dev
;
1638 unsigned int kw_size
;
1640 char vpd_data
[CXLFLASH_VPD_LEN
];
1641 char tmp_buf
[WWPN_BUF_LEN
] = { 0 };
1642 const struct dev_dependent_vals
*ddv
= (struct dev_dependent_vals
*)
1643 cfg
->dev_id
->driver_data
;
1644 const bool wwpn_vpd_required
= ddv
->flags
& CXLFLASH_WWPN_VPD_REQUIRED
;
1645 const char *wwpn_vpd_tags
[MAX_FC_PORTS
] = { "V5", "V6", "V7", "V8" };
1647 /* Get the VPD data from the device */
1648 vpd_size
= cfg
->ops
->read_adapter_vpd(pdev
, vpd_data
, sizeof(vpd_data
));
1649 if (unlikely(vpd_size
<= 0)) {
1650 dev_err(dev
, "%s: Unable to read VPD (size = %ld)\n",
1651 __func__
, vpd_size
);
1657 * Find the offset of the WWPN tag within the read only
1658 * VPD data and validate the found field (partials are
1659 * no good to us). Convert the ASCII data to an integer
1660 * value. Note that we must copy to a temporary buffer
1661 * because the conversion service requires that the ASCII
1662 * string be terminated.
1664 * Allow for WWPN not being found for all devices, setting
1665 * the returned WWPN to zero when not found. Notify with a
1666 * log error for cards that should have had WWPN keywords
1667 * in the VPD - cards requiring WWPN will not have their
1668 * ports programmed and operate in an undefined state.
1670 for (k
= 0; k
< cfg
->num_fc_ports
; k
++) {
1671 i
= pci_vpd_find_ro_info_keyword(vpd_data
, vpd_size
,
1672 wwpn_vpd_tags
[k
], &kw_size
);
1674 if (wwpn_vpd_required
)
1675 dev_err(dev
, "%s: Port %d WWPN not found\n",
1681 if (i
< 0 || kw_size
!= WWPN_LEN
) {
1682 dev_err(dev
, "%s: Port %d WWPN incomplete or bad VPD\n",
1688 memcpy(tmp_buf
, &vpd_data
[i
], WWPN_LEN
);
1689 rc
= kstrtoul(tmp_buf
, WWPN_LEN
, (ulong
*)&wwpn
[k
]);
1691 dev_err(dev
, "%s: WWPN conversion failed for port %d\n",
1697 dev_dbg(dev
, "%s: wwpn%d=%016llx\n", __func__
, k
, wwpn
[k
]);
1701 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1706 * init_pcr() - initialize the provisioning and control registers
1707 * @cfg: Internal structure associated with the host.
1709 * Also sets up fast access to the mapped registers and initializes AFU
1710 * command fields that never change.
1712 static void init_pcr(struct cxlflash_cfg
*cfg
)
1714 struct afu
*afu
= cfg
->afu
;
1715 struct sisl_ctrl_map __iomem
*ctrl_map
;
1720 for (i
= 0; i
< MAX_CONTEXT
; i
++) {
1721 ctrl_map
= &afu
->afu_map
->ctrls
[i
].ctrl
;
1722 /* Disrupt any clients that could be running */
1723 /* e.g. clients that survived a master restart */
1724 writeq_be(0, &ctrl_map
->rht_start
);
1725 writeq_be(0, &ctrl_map
->rht_cnt_id
);
1726 writeq_be(0, &ctrl_map
->ctx_cap
);
1729 /* Copy frequently used fields into hwq */
1730 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1731 hwq
= get_hwq(afu
, i
);
1732 cookie
= hwq
->ctx_cookie
;
1734 hwq
->ctx_hndl
= (u16
) cfg
->ops
->process_element(cookie
);
1735 hwq
->host_map
= &afu
->afu_map
->hosts
[hwq
->ctx_hndl
].host
;
1736 hwq
->ctrl_map
= &afu
->afu_map
->ctrls
[hwq
->ctx_hndl
].ctrl
;
1738 /* Program the Endian Control for the master context */
1739 writeq_be(SISL_ENDIAN_CTRL
, &hwq
->host_map
->endian_ctrl
);
1744 * init_global() - initialize AFU global registers
1745 * @cfg: Internal structure associated with the host.
1747 static int init_global(struct cxlflash_cfg
*cfg
)
1749 struct afu
*afu
= cfg
->afu
;
1750 struct device
*dev
= &cfg
->dev
->dev
;
1752 struct sisl_host_map __iomem
*hmap
;
1753 __be64 __iomem
*fc_port_regs
;
1754 u64 wwpn
[MAX_FC_PORTS
]; /* wwpn of AFU ports */
1755 int i
= 0, num_ports
= 0;
1761 rc
= read_vpd(cfg
, &wwpn
[0]);
1763 dev_err(dev
, "%s: could not read vpd rc=%d\n", __func__
, rc
);
1767 /* Set up RRQ and SQ in HWQ for master issued cmds */
1768 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1769 hwq
= get_hwq(afu
, i
);
1770 hmap
= hwq
->host_map
;
1772 writeq_be((u64
) hwq
->hrrq_start
, &hmap
->rrq_start
);
1773 writeq_be((u64
) hwq
->hrrq_end
, &hmap
->rrq_end
);
1774 hwq
->hrrq_online
= true;
1776 if (afu_is_sq_cmd_mode(afu
)) {
1777 writeq_be((u64
)hwq
->hsq_start
, &hmap
->sq_start
);
1778 writeq_be((u64
)hwq
->hsq_end
, &hmap
->sq_end
);
1782 /* AFU configuration */
1783 reg
= readq_be(&afu
->afu_map
->global
.regs
.afu_config
);
1784 reg
|= SISL_AFUCONF_AR_ALL
|SISL_AFUCONF_ENDIAN
;
1785 /* enable all auto retry options and control endianness */
1786 /* leave others at default: */
1787 /* CTX_CAP write protected, mbox_r does not clear on read and */
1788 /* checker on if dual afu */
1789 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_config
);
1791 /* Global port select: select either port */
1792 if (afu
->internal_lun
) {
1793 /* Only use port 0 */
1794 writeq_be(PORT0
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1797 writeq_be(PORT_MASK(cfg
->num_fc_ports
),
1798 &afu
->afu_map
->global
.regs
.afu_port_sel
);
1799 num_ports
= cfg
->num_fc_ports
;
1802 for (i
= 0; i
< num_ports
; i
++) {
1803 fc_port_regs
= get_fc_port_regs(cfg
, i
);
1805 /* Unmask all errors (but they are still masked at AFU) */
1806 writeq_be(0, &fc_port_regs
[FC_ERRMSK
/ 8]);
1807 /* Clear CRC error cnt & set a threshold */
1808 (void)readq_be(&fc_port_regs
[FC_CNT_CRCERR
/ 8]);
1809 writeq_be(MC_CRC_THRESH
, &fc_port_regs
[FC_CRC_THRESH
/ 8]);
1811 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1813 afu_set_wwpn(afu
, i
, &fc_port_regs
[0], wwpn
[i
]);
1814 /* Programming WWPN back to back causes additional
1815 * offline/online transitions and a PLOGI
1820 if (afu_is_ocxl_lisn(afu
)) {
1821 /* Set up the LISN effective address for each master */
1822 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1823 hwq
= get_hwq(afu
, i
);
1824 ctx
= hwq
->ctx_cookie
;
1826 for (j
= 0; j
< hwq
->num_irqs
; j
++) {
1827 reg
= cfg
->ops
->get_irq_objhndl(ctx
, j
);
1828 writeq_be(reg
, &hwq
->ctrl_map
->lisn_ea
[j
]);
1831 reg
= hwq
->ctx_hndl
;
1832 writeq_be(SISL_LISN_PASID(reg
, reg
),
1833 &hwq
->ctrl_map
->lisn_pasid
[0]);
1834 writeq_be(SISL_LISN_PASID(0UL, reg
),
1835 &hwq
->ctrl_map
->lisn_pasid
[1]);
1839 /* Set up master's own CTX_CAP to allow real mode, host translation */
1840 /* tables, afu cmds and read/write GSCSI cmds. */
1841 /* First, unlock ctx_cap write by reading mbox */
1842 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1843 hwq
= get_hwq(afu
, i
);
1845 (void)readq_be(&hwq
->ctrl_map
->mbox_r
); /* unlock ctx_cap */
1846 writeq_be((SISL_CTX_CAP_REAL_MODE
| SISL_CTX_CAP_HOST_XLATE
|
1847 SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
|
1848 SISL_CTX_CAP_AFU_CMD
| SISL_CTX_CAP_GSCSI_CMD
),
1849 &hwq
->ctrl_map
->ctx_cap
);
1853 * Determine write-same unmap support for host by evaluating the unmap
1854 * sector support bit of the context control register associated with
1855 * the primary hardware queue. Note that while this status is reflected
1856 * in a context register, the outcome can be assumed to be host-wide.
1858 hwq
= get_hwq(afu
, PRIMARY_HWQ
);
1859 reg
= readq_be(&hwq
->host_map
->ctx_ctrl
);
1860 if (reg
& SISL_CTX_CTRL_UNMAP_SECTOR
)
1861 cfg
->ws_unmap
= true;
1863 /* Initialize heartbeat */
1864 afu
->hb
= readq_be(&afu
->afu_map
->global
.regs
.afu_hb
);
1870 * start_afu() - initializes and starts the AFU
1871 * @cfg: Internal structure associated with the host.
1873 static int start_afu(struct cxlflash_cfg
*cfg
)
1875 struct afu
*afu
= cfg
->afu
;
1876 struct device
*dev
= &cfg
->dev
->dev
;
1883 /* Initialize each HWQ */
1884 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1885 hwq
= get_hwq(afu
, i
);
1887 /* After an AFU reset, RRQ entries are stale, clear them */
1888 memset(&hwq
->rrq_entry
, 0, sizeof(hwq
->rrq_entry
));
1890 /* Initialize RRQ pointers */
1891 hwq
->hrrq_start
= &hwq
->rrq_entry
[0];
1892 hwq
->hrrq_end
= &hwq
->rrq_entry
[NUM_RRQ_ENTRY
- 1];
1893 hwq
->hrrq_curr
= hwq
->hrrq_start
;
1896 /* Initialize spin locks */
1897 spin_lock_init(&hwq
->hrrq_slock
);
1898 spin_lock_init(&hwq
->hsq_slock
);
1901 if (afu_is_sq_cmd_mode(afu
)) {
1902 memset(&hwq
->sq
, 0, sizeof(hwq
->sq
));
1903 hwq
->hsq_start
= &hwq
->sq
[0];
1904 hwq
->hsq_end
= &hwq
->sq
[NUM_SQ_ENTRY
- 1];
1905 hwq
->hsq_curr
= hwq
->hsq_start
;
1907 atomic_set(&hwq
->hsq_credits
, NUM_SQ_ENTRY
- 1);
1910 /* Initialize IRQ poll */
1911 if (afu_is_irqpoll_enabled(afu
))
1912 irq_poll_init(&hwq
->irqpoll
, afu
->irqpoll_weight
,
1917 rc
= init_global(cfg
);
1919 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1924 * init_intr() - setup interrupt handlers for the master context
1925 * @cfg: Internal structure associated with the host.
1926 * @hwq: Hardware queue to initialize.
1928 * Return: 0 on success, -errno on failure
1930 static enum undo_level
init_intr(struct cxlflash_cfg
*cfg
,
1933 struct device
*dev
= &cfg
->dev
->dev
;
1934 void *ctx
= hwq
->ctx_cookie
;
1936 enum undo_level level
= UNDO_NOOP
;
1937 bool is_primary_hwq
= (hwq
->index
== PRIMARY_HWQ
);
1938 int num_irqs
= hwq
->num_irqs
;
1940 rc
= cfg
->ops
->allocate_afu_irqs(ctx
, num_irqs
);
1942 dev_err(dev
, "%s: allocate_afu_irqs failed rc=%d\n",
1948 rc
= cfg
->ops
->map_afu_irq(ctx
, 1, cxlflash_sync_err_irq
, hwq
,
1949 "SISL_MSI_SYNC_ERROR");
1950 if (unlikely(rc
<= 0)) {
1951 dev_err(dev
, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__
);
1956 rc
= cfg
->ops
->map_afu_irq(ctx
, 2, cxlflash_rrq_irq
, hwq
,
1957 "SISL_MSI_RRQ_UPDATED");
1958 if (unlikely(rc
<= 0)) {
1959 dev_err(dev
, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__
);
1964 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1965 if (!is_primary_hwq
)
1968 rc
= cfg
->ops
->map_afu_irq(ctx
, 3, cxlflash_async_err_irq
, hwq
,
1969 "SISL_MSI_ASYNC_ERROR");
1970 if (unlikely(rc
<= 0)) {
1971 dev_err(dev
, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__
);
1980 * init_mc() - create and register as the master context
1981 * @cfg: Internal structure associated with the host.
1982 * @index: HWQ Index of the master context.
1984 * Return: 0 on success, -errno on failure
1986 static int init_mc(struct cxlflash_cfg
*cfg
, u32 index
)
1989 struct device
*dev
= &cfg
->dev
->dev
;
1990 struct hwq
*hwq
= get_hwq(cfg
->afu
, index
);
1993 enum undo_level level
;
1995 hwq
->afu
= cfg
->afu
;
1997 INIT_LIST_HEAD(&hwq
->pending_cmds
);
1999 if (index
== PRIMARY_HWQ
) {
2000 ctx
= cfg
->ops
->get_context(cfg
->dev
, cfg
->afu_cookie
);
2003 ctx
= cfg
->ops
->dev_context_init(cfg
->dev
, cfg
->afu_cookie
);
2006 if (IS_ERR_OR_NULL(ctx
)) {
2011 WARN_ON(hwq
->ctx_cookie
);
2012 hwq
->ctx_cookie
= ctx
;
2013 hwq
->num_irqs
= num_irqs
;
2015 /* Set it up as a master with the CXL */
2016 cfg
->ops
->set_master(ctx
);
2018 /* Reset AFU when initializing primary context */
2019 if (index
== PRIMARY_HWQ
) {
2020 rc
= cfg
->ops
->afu_reset(ctx
);
2022 dev_err(dev
, "%s: AFU reset failed rc=%d\n",
2028 level
= init_intr(cfg
, hwq
);
2029 if (unlikely(level
)) {
2030 dev_err(dev
, "%s: interrupt init failed rc=%d\n", __func__
, rc
);
2034 /* Finally, activate the context by starting it */
2035 rc
= cfg
->ops
->start_context(hwq
->ctx_cookie
);
2037 dev_err(dev
, "%s: start context failed rc=%d\n", __func__
, rc
);
2038 level
= UNMAP_THREE
;
2043 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2046 term_intr(cfg
, level
, index
);
2047 if (index
!= PRIMARY_HWQ
)
2048 cfg
->ops
->release_context(ctx
);
2050 hwq
->ctx_cookie
= NULL
;
2055 * get_num_afu_ports() - determines and configures the number of AFU ports
2056 * @cfg: Internal structure associated with the host.
2058 * This routine determines the number of AFU ports by converting the global
2059 * port selection mask. The converted value is only valid following an AFU
2060 * reset (explicit or power-on). This routine must be invoked shortly after
2061 * mapping as other routines are dependent on the number of ports during the
2062 * initialization sequence.
2064 * To support legacy AFUs that might not have reflected an initial global
2065 * port mask (value read is 0), default to the number of ports originally
2066 * supported by the cxlflash driver (2) before hardware with other port
2067 * offerings was introduced.
2069 static void get_num_afu_ports(struct cxlflash_cfg
*cfg
)
2071 struct afu
*afu
= cfg
->afu
;
2072 struct device
*dev
= &cfg
->dev
->dev
;
2074 int num_fc_ports
= LEGACY_FC_PORTS
;
2076 port_mask
= readq_be(&afu
->afu_map
->global
.regs
.afu_port_sel
);
2077 if (port_mask
!= 0ULL)
2078 num_fc_ports
= min(ilog2(port_mask
) + 1, MAX_FC_PORTS
);
2080 dev_dbg(dev
, "%s: port_mask=%016llx num_fc_ports=%d\n",
2081 __func__
, port_mask
, num_fc_ports
);
2083 cfg
->num_fc_ports
= num_fc_ports
;
2084 cfg
->host
->max_channel
= PORTNUM2CHAN(num_fc_ports
);
2088 * init_afu() - setup as master context and start AFU
2089 * @cfg: Internal structure associated with the host.
2091 * This routine is a higher level of control for configuring the
2092 * AFU on probe and reset paths.
2094 * Return: 0 on success, -errno on failure
2096 static int init_afu(struct cxlflash_cfg
*cfg
)
2100 struct afu
*afu
= cfg
->afu
;
2101 struct device
*dev
= &cfg
->dev
->dev
;
2105 cfg
->ops
->perst_reloads_same_image(cfg
->afu_cookie
, true);
2107 mutex_init(&afu
->sync_active
);
2108 afu
->num_hwqs
= afu
->desired_hwqs
;
2109 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2110 rc
= init_mc(cfg
, i
);
2112 dev_err(dev
, "%s: init_mc failed rc=%d index=%d\n",
2118 /* Map the entire MMIO space of the AFU using the first context */
2119 hwq
= get_hwq(afu
, PRIMARY_HWQ
);
2120 afu
->afu_map
= cfg
->ops
->psa_map(hwq
->ctx_cookie
);
2121 if (!afu
->afu_map
) {
2122 dev_err(dev
, "%s: psa_map failed\n", __func__
);
2127 /* No byte reverse on reading afu_version or string will be backwards */
2128 reg
= readq(&afu
->afu_map
->global
.regs
.afu_version
);
2129 memcpy(afu
->version
, ®
, sizeof(reg
));
2130 afu
->interface_version
=
2131 readq_be(&afu
->afu_map
->global
.regs
.interface_version
);
2132 if ((afu
->interface_version
+ 1) == 0) {
2133 dev_err(dev
, "Back level AFU, please upgrade. AFU version %s "
2134 "interface version %016llx\n", afu
->version
,
2135 afu
->interface_version
);
2140 if (afu_is_sq_cmd_mode(afu
)) {
2141 afu
->send_cmd
= send_cmd_sq
;
2142 afu
->context_reset
= context_reset_sq
;
2144 afu
->send_cmd
= send_cmd_ioarrin
;
2145 afu
->context_reset
= context_reset_ioarrin
;
2148 dev_dbg(dev
, "%s: afu_ver=%s interface_ver=%016llx\n", __func__
,
2149 afu
->version
, afu
->interface_version
);
2151 get_num_afu_ports(cfg
);
2153 rc
= start_afu(cfg
);
2155 dev_err(dev
, "%s: start_afu failed, rc=%d\n", __func__
, rc
);
2159 afu_err_intr_init(cfg
->afu
);
2160 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2161 hwq
= get_hwq(afu
, i
);
2163 hwq
->room
= readq_be(&hwq
->host_map
->cmd_room
);
2166 /* Restore the LUN mappings */
2167 cxlflash_restore_luntable(cfg
);
2169 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2173 for (i
= afu
->num_hwqs
- 1; i
>= 0; i
--) {
2174 term_intr(cfg
, UNMAP_THREE
, i
);
2181 * afu_reset() - resets the AFU
2182 * @cfg: Internal structure associated with the host.
2184 * Return: 0 on success, -errno on failure
2186 static int afu_reset(struct cxlflash_cfg
*cfg
)
2188 struct device
*dev
= &cfg
->dev
->dev
;
2191 /* Stop the context before the reset. Since the context is
2192 * no longer available restart it after the reset is complete
2198 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2203 * drain_ioctls() - wait until all currently executing ioctls have completed
2204 * @cfg: Internal structure associated with the host.
2206 * Obtain write access to read/write semaphore that wraps ioctl
2207 * handling to 'drain' ioctls currently executing.
2209 static void drain_ioctls(struct cxlflash_cfg
*cfg
)
2211 down_write(&cfg
->ioctl_rwsem
);
2212 up_write(&cfg
->ioctl_rwsem
);
2216 * cxlflash_async_reset_host() - asynchronous host reset handler
2217 * @data: Private data provided while scheduling reset.
2218 * @cookie: Cookie that can be used for checkpointing.
2220 static void cxlflash_async_reset_host(void *data
, async_cookie_t cookie
)
2222 struct cxlflash_cfg
*cfg
= data
;
2223 struct device
*dev
= &cfg
->dev
->dev
;
2226 if (cfg
->state
!= STATE_RESET
) {
2227 dev_dbg(dev
, "%s: Not performing a reset, state=%d\n",
2228 __func__
, cfg
->state
);
2233 cxlflash_mark_contexts_error(cfg
);
2234 rc
= afu_reset(cfg
);
2236 cfg
->state
= STATE_FAILTERM
;
2238 cfg
->state
= STATE_NORMAL
;
2239 wake_up_all(&cfg
->reset_waitq
);
2242 scsi_unblock_requests(cfg
->host
);
2246 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2247 * @cfg: Internal structure associated with the host.
2249 static void cxlflash_schedule_async_reset(struct cxlflash_cfg
*cfg
)
2251 struct device
*dev
= &cfg
->dev
->dev
;
2253 if (cfg
->state
!= STATE_NORMAL
) {
2254 dev_dbg(dev
, "%s: Not performing reset state=%d\n",
2255 __func__
, cfg
->state
);
2259 cfg
->state
= STATE_RESET
;
2260 scsi_block_requests(cfg
->host
);
2261 cfg
->async_reset_cookie
= async_schedule(cxlflash_async_reset_host
,
2266 * send_afu_cmd() - builds and sends an internal AFU command
2267 * @afu: AFU associated with the host.
2268 * @rcb: Pre-populated IOARCB describing command to send.
2270 * The AFU can only take one internal AFU command at a time. This limitation is
2271 * enforced by using a mutex to provide exclusive access to the AFU during the
2272 * operation. This design point requires calling threads to not be on interrupt
2273 * context due to the possibility of sleeping during concurrent AFU operations.
2275 * The command status is optionally passed back to the caller when the caller
2276 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2279 * 0 on success, -errno on failure
2281 static int send_afu_cmd(struct afu
*afu
, struct sisl_ioarcb
*rcb
)
2283 struct cxlflash_cfg
*cfg
= afu
->parent
;
2284 struct device
*dev
= &cfg
->dev
->dev
;
2285 struct afu_cmd
*cmd
= NULL
;
2286 struct hwq
*hwq
= get_hwq(afu
, PRIMARY_HWQ
);
2292 if (cfg
->state
!= STATE_NORMAL
) {
2293 dev_dbg(dev
, "%s: Sync not required state=%u\n",
2294 __func__
, cfg
->state
);
2298 mutex_lock(&afu
->sync_active
);
2299 atomic_inc(&afu
->cmds_active
);
2300 buf
= kmalloc(sizeof(*cmd
) + __alignof__(*cmd
) - 1, GFP_KERNEL
);
2301 if (unlikely(!buf
)) {
2302 dev_err(dev
, "%s: no memory for command\n", __func__
);
2307 cmd
= (struct afu_cmd
*)PTR_ALIGN(buf
, __alignof__(*cmd
));
2310 memset(cmd
, 0, sizeof(*cmd
));
2311 memcpy(&cmd
->rcb
, rcb
, sizeof(*rcb
));
2312 INIT_LIST_HEAD(&cmd
->queue
);
2313 init_completion(&cmd
->cevent
);
2315 cmd
->hwq_index
= hwq
->index
;
2316 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
2318 dev_dbg(dev
, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2319 __func__
, afu
, cmd
, cmd
->rcb
.cdb
[0], nretry
);
2321 rc
= afu
->send_cmd(afu
, cmd
);
2327 rc
= wait_resp(afu
, cmd
);
2330 rc
= afu
->context_reset(hwq
);
2332 /* Delete the command from pending_cmds list */
2333 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
2334 list_del(&cmd
->list
);
2335 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
2337 cxlflash_schedule_async_reset(cfg
);
2340 fallthrough
; /* to retry */
2344 fallthrough
; /* to exit */
2350 *rcb
->ioasa
= cmd
->sa
;
2352 atomic_dec(&afu
->cmds_active
);
2353 mutex_unlock(&afu
->sync_active
);
2355 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2360 * cxlflash_afu_sync() - builds and sends an AFU sync command
2361 * @afu: AFU associated with the host.
2362 * @ctx: Identifies context requesting sync.
2363 * @res: Identifies resource requesting sync.
2364 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2366 * AFU sync operations are only necessary and allowed when the device is
2367 * operating normally. When not operating normally, sync requests can occur as
2368 * part of cleaning up resources associated with an adapter prior to removal.
2369 * In this scenario, these requests are simply ignored (safe due to the AFU
2373 * 0 on success, -errno on failure
2375 int cxlflash_afu_sync(struct afu
*afu
, ctx_hndl_t ctx
, res_hndl_t res
, u8 mode
)
2377 struct cxlflash_cfg
*cfg
= afu
->parent
;
2378 struct device
*dev
= &cfg
->dev
->dev
;
2379 struct sisl_ioarcb rcb
= { 0 };
2381 dev_dbg(dev
, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2382 __func__
, afu
, ctx
, res
, mode
);
2384 rcb
.req_flags
= SISL_REQ_FLAGS_AFU_CMD
;
2385 rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
2386 rcb
.timeout
= MC_AFU_SYNC_TIMEOUT
;
2388 rcb
.cdb
[0] = SISL_AFU_CMD_SYNC
;
2390 put_unaligned_be16(ctx
, &rcb
.cdb
[2]);
2391 put_unaligned_be32(res
, &rcb
.cdb
[4]);
2393 return send_afu_cmd(afu
, &rcb
);
2397 * cxlflash_eh_abort_handler() - abort a SCSI command
2398 * @scp: SCSI command to abort.
2400 * CXL Flash devices do not support a single command abort. Reset the context
2401 * as per SISLite specification. Flush any pending commands in the hardware
2402 * queue before the reset.
2404 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2406 static int cxlflash_eh_abort_handler(struct scsi_cmnd
*scp
)
2409 struct Scsi_Host
*host
= scp
->device
->host
;
2410 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2411 struct afu_cmd
*cmd
= sc_to_afuc(scp
);
2412 struct device
*dev
= &cfg
->dev
->dev
;
2413 struct afu
*afu
= cfg
->afu
;
2414 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
2416 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2417 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2418 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2419 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2420 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2421 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2422 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2424 /* When the state is not normal, another reset/reload is in progress.
2425 * Return failed and the mid-layer will invoke host reset handler.
2427 if (cfg
->state
!= STATE_NORMAL
) {
2428 dev_dbg(dev
, "%s: Invalid state for abort, state=%d\n",
2429 __func__
, cfg
->state
);
2433 rc
= afu
->context_reset(hwq
);
2440 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2445 * cxlflash_eh_device_reset_handler() - reset a single LUN
2446 * @scp: SCSI command to send.
2449 * SUCCESS as defined in scsi/scsi.h
2450 * FAILED as defined in scsi/scsi.h
2452 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd
*scp
)
2455 struct scsi_device
*sdev
= scp
->device
;
2456 struct Scsi_Host
*host
= sdev
->host
;
2457 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2458 struct device
*dev
= &cfg
->dev
->dev
;
2461 dev_dbg(dev
, "%s: %d/%d/%d/%llu\n", __func__
,
2462 host
->host_no
, sdev
->channel
, sdev
->id
, sdev
->lun
);
2464 switch (cfg
->state
) {
2466 rcr
= send_tmf(cfg
, sdev
, TMF_LUN_RESET
);
2471 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2478 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2483 * cxlflash_eh_host_reset_handler() - reset the host adapter
2484 * @scp: SCSI command from stack identifying host.
2486 * Following a reset, the state is evaluated again in case an EEH occurred
2487 * during the reset. In such a scenario, the host reset will either yield
2488 * until the EEH recovery is complete or return success or failure based
2489 * upon the current device state.
2492 * SUCCESS as defined in scsi/scsi.h
2493 * FAILED as defined in scsi/scsi.h
2495 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd
*scp
)
2499 struct Scsi_Host
*host
= scp
->device
->host
;
2500 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2501 struct device
*dev
= &cfg
->dev
->dev
;
2503 dev_dbg(dev
, "%s: %d\n", __func__
, host
->host_no
);
2505 switch (cfg
->state
) {
2507 cfg
->state
= STATE_RESET
;
2509 cxlflash_mark_contexts_error(cfg
);
2510 rcr
= afu_reset(cfg
);
2513 cfg
->state
= STATE_FAILTERM
;
2515 cfg
->state
= STATE_NORMAL
;
2516 wake_up_all(&cfg
->reset_waitq
);
2520 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2521 if (cfg
->state
== STATE_NORMAL
)
2529 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2534 * cxlflash_change_queue_depth() - change the queue depth for the device
2535 * @sdev: SCSI device destined for queue depth change.
2536 * @qdepth: Requested queue depth value to set.
2538 * The requested queue depth is capped to the maximum supported value.
2540 * Return: The actual queue depth set.
2542 static int cxlflash_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2545 if (qdepth
> CXLFLASH_MAX_CMDS_PER_LUN
)
2546 qdepth
= CXLFLASH_MAX_CMDS_PER_LUN
;
2548 scsi_change_queue_depth(sdev
, qdepth
);
2549 return sdev
->queue_depth
;
2553 * cxlflash_show_port_status() - queries and presents the current port status
2554 * @port: Desired port for status reporting.
2555 * @cfg: Internal structure associated with the host.
2556 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2558 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2560 static ssize_t
cxlflash_show_port_status(u32 port
,
2561 struct cxlflash_cfg
*cfg
,
2564 struct device
*dev
= &cfg
->dev
->dev
;
2567 __be64 __iomem
*fc_port_regs
;
2569 WARN_ON(port
>= MAX_FC_PORTS
);
2571 if (port
>= cfg
->num_fc_ports
) {
2572 dev_info(dev
, "%s: Port %d not supported on this card.\n",
2577 fc_port_regs
= get_fc_port_regs(cfg
, port
);
2578 status
= readq_be(&fc_port_regs
[FC_MTIP_STATUS
/ 8]);
2579 status
&= FC_MTIP_STATUS_MASK
;
2581 if (status
== FC_MTIP_STATUS_ONLINE
)
2582 disp_status
= "online";
2583 else if (status
== FC_MTIP_STATUS_OFFLINE
)
2584 disp_status
= "offline";
2586 disp_status
= "unknown";
2588 return scnprintf(buf
, PAGE_SIZE
, "%s\n", disp_status
);
2592 * port0_show() - queries and presents the current status of port 0
2593 * @dev: Generic device associated with the host owning the port.
2594 * @attr: Device attribute representing the port.
2595 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2597 * Return: The size of the ASCII string returned in @buf.
2599 static ssize_t
port0_show(struct device
*dev
,
2600 struct device_attribute
*attr
,
2603 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2605 return cxlflash_show_port_status(0, cfg
, buf
);
2609 * port1_show() - queries and presents the current status of port 1
2610 * @dev: Generic device associated with the host owning the port.
2611 * @attr: Device attribute representing the port.
2612 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2614 * Return: The size of the ASCII string returned in @buf.
2616 static ssize_t
port1_show(struct device
*dev
,
2617 struct device_attribute
*attr
,
2620 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2622 return cxlflash_show_port_status(1, cfg
, buf
);
2626 * port2_show() - queries and presents the current status of port 2
2627 * @dev: Generic device associated with the host owning the port.
2628 * @attr: Device attribute representing the port.
2629 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2631 * Return: The size of the ASCII string returned in @buf.
2633 static ssize_t
port2_show(struct device
*dev
,
2634 struct device_attribute
*attr
,
2637 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2639 return cxlflash_show_port_status(2, cfg
, buf
);
2643 * port3_show() - queries and presents the current status of port 3
2644 * @dev: Generic device associated with the host owning the port.
2645 * @attr: Device attribute representing the port.
2646 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2648 * Return: The size of the ASCII string returned in @buf.
2650 static ssize_t
port3_show(struct device
*dev
,
2651 struct device_attribute
*attr
,
2654 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2656 return cxlflash_show_port_status(3, cfg
, buf
);
2660 * lun_mode_show() - presents the current LUN mode of the host
2661 * @dev: Generic device associated with the host.
2662 * @attr: Device attribute representing the LUN mode.
2663 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2665 * Return: The size of the ASCII string returned in @buf.
2667 static ssize_t
lun_mode_show(struct device
*dev
,
2668 struct device_attribute
*attr
, char *buf
)
2670 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2671 struct afu
*afu
= cfg
->afu
;
2673 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->internal_lun
);
2677 * lun_mode_store() - sets the LUN mode of the host
2678 * @dev: Generic device associated with the host.
2679 * @attr: Device attribute representing the LUN mode.
2680 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2681 * @count: Length of data resizing in @buf.
2683 * The CXL Flash AFU supports a dummy LUN mode where the external
2684 * links and storage are not required. Space on the FPGA is used
2685 * to create 1 or 2 small LUNs which are presented to the system
2686 * as if they were a normal storage device. This feature is useful
2687 * during development and also provides manufacturing with a way
2688 * to test the AFU without an actual device.
2690 * 0 = external LUN[s] (default)
2691 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2692 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2693 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2694 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2696 * Return: The size of the ASCII string returned in @buf.
2698 static ssize_t
lun_mode_store(struct device
*dev
,
2699 struct device_attribute
*attr
,
2700 const char *buf
, size_t count
)
2702 struct Scsi_Host
*shost
= class_to_shost(dev
);
2703 struct cxlflash_cfg
*cfg
= shost_priv(shost
);
2704 struct afu
*afu
= cfg
->afu
;
2708 rc
= kstrtouint(buf
, 10, &lun_mode
);
2709 if (!rc
&& (lun_mode
< 5) && (lun_mode
!= afu
->internal_lun
)) {
2710 afu
->internal_lun
= lun_mode
;
2713 * When configured for internal LUN, there is only one channel,
2714 * channel number 0, else there will be one less than the number
2715 * of fc ports for this card.
2717 if (afu
->internal_lun
)
2718 shost
->max_channel
= 0;
2720 shost
->max_channel
= PORTNUM2CHAN(cfg
->num_fc_ports
);
2723 scsi_scan_host(cfg
->host
);
2730 * ioctl_version_show() - presents the current ioctl version of the host
2731 * @dev: Generic device associated with the host.
2732 * @attr: Device attribute representing the ioctl version.
2733 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2735 * Return: The size of the ASCII string returned in @buf.
2737 static ssize_t
ioctl_version_show(struct device
*dev
,
2738 struct device_attribute
*attr
, char *buf
)
2742 bytes
= scnprintf(buf
, PAGE_SIZE
,
2743 "disk: %u\n", DK_CXLFLASH_VERSION_0
);
2744 bytes
+= scnprintf(buf
+ bytes
, PAGE_SIZE
- bytes
,
2745 "host: %u\n", HT_CXLFLASH_VERSION_0
);
2751 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2752 * @port: Desired port for status reporting.
2753 * @cfg: Internal structure associated with the host.
2754 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2756 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2758 static ssize_t
cxlflash_show_port_lun_table(u32 port
,
2759 struct cxlflash_cfg
*cfg
,
2762 struct device
*dev
= &cfg
->dev
->dev
;
2763 __be64 __iomem
*fc_port_luns
;
2767 WARN_ON(port
>= MAX_FC_PORTS
);
2769 if (port
>= cfg
->num_fc_ports
) {
2770 dev_info(dev
, "%s: Port %d not supported on this card.\n",
2775 fc_port_luns
= get_fc_port_luns(cfg
, port
);
2777 for (i
= 0; i
< CXLFLASH_NUM_VLUNS
; i
++)
2778 bytes
+= scnprintf(buf
+ bytes
, PAGE_SIZE
- bytes
,
2780 i
, readq_be(&fc_port_luns
[i
]));
2785 * port0_lun_table_show() - presents the current LUN table of port 0
2786 * @dev: Generic device associated with the host owning the port.
2787 * @attr: Device attribute representing the port.
2788 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2790 * Return: The size of the ASCII string returned in @buf.
2792 static ssize_t
port0_lun_table_show(struct device
*dev
,
2793 struct device_attribute
*attr
,
2796 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2798 return cxlflash_show_port_lun_table(0, cfg
, buf
);
2802 * port1_lun_table_show() - presents the current LUN table of port 1
2803 * @dev: Generic device associated with the host owning the port.
2804 * @attr: Device attribute representing the port.
2805 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2807 * Return: The size of the ASCII string returned in @buf.
2809 static ssize_t
port1_lun_table_show(struct device
*dev
,
2810 struct device_attribute
*attr
,
2813 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2815 return cxlflash_show_port_lun_table(1, cfg
, buf
);
2819 * port2_lun_table_show() - presents the current LUN table of port 2
2820 * @dev: Generic device associated with the host owning the port.
2821 * @attr: Device attribute representing the port.
2822 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2824 * Return: The size of the ASCII string returned in @buf.
2826 static ssize_t
port2_lun_table_show(struct device
*dev
,
2827 struct device_attribute
*attr
,
2830 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2832 return cxlflash_show_port_lun_table(2, cfg
, buf
);
2836 * port3_lun_table_show() - presents the current LUN table of port 3
2837 * @dev: Generic device associated with the host owning the port.
2838 * @attr: Device attribute representing the port.
2839 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2841 * Return: The size of the ASCII string returned in @buf.
2843 static ssize_t
port3_lun_table_show(struct device
*dev
,
2844 struct device_attribute
*attr
,
2847 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2849 return cxlflash_show_port_lun_table(3, cfg
, buf
);
2853 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2854 * @dev: Generic device associated with the host.
2855 * @attr: Device attribute representing the IRQ poll weight.
2856 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2859 * An IRQ poll weight of 0 indicates polling is disabled.
2861 * Return: The size of the ASCII string returned in @buf.
2863 static ssize_t
irqpoll_weight_show(struct device
*dev
,
2864 struct device_attribute
*attr
, char *buf
)
2866 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2867 struct afu
*afu
= cfg
->afu
;
2869 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->irqpoll_weight
);
2873 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2874 * @dev: Generic device associated with the host.
2875 * @attr: Device attribute representing the IRQ poll weight.
2876 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2878 * @count: Length of data resizing in @buf.
2880 * An IRQ poll weight of 0 indicates polling is disabled.
2882 * Return: The size of the ASCII string returned in @buf.
2884 static ssize_t
irqpoll_weight_store(struct device
*dev
,
2885 struct device_attribute
*attr
,
2886 const char *buf
, size_t count
)
2888 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2889 struct device
*cfgdev
= &cfg
->dev
->dev
;
2890 struct afu
*afu
= cfg
->afu
;
2895 rc
= kstrtouint(buf
, 10, &weight
);
2901 "Invalid IRQ poll weight. It must be 256 or less.\n");
2905 if (weight
== afu
->irqpoll_weight
) {
2907 "Current IRQ poll weight has the same weight.\n");
2911 if (afu_is_irqpoll_enabled(afu
)) {
2912 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2913 hwq
= get_hwq(afu
, i
);
2915 irq_poll_disable(&hwq
->irqpoll
);
2919 afu
->irqpoll_weight
= weight
;
2922 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2923 hwq
= get_hwq(afu
, i
);
2925 irq_poll_init(&hwq
->irqpoll
, weight
, cxlflash_irqpoll
);
2933 * num_hwqs_show() - presents the number of hardware queues for the host
2934 * @dev: Generic device associated with the host.
2935 * @attr: Device attribute representing the number of hardware queues.
2936 * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
2939 * Return: The size of the ASCII string returned in @buf.
2941 static ssize_t
num_hwqs_show(struct device
*dev
,
2942 struct device_attribute
*attr
, char *buf
)
2944 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2945 struct afu
*afu
= cfg
->afu
;
2947 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->num_hwqs
);
2951 * num_hwqs_store() - sets the number of hardware queues for the host
2952 * @dev: Generic device associated with the host.
2953 * @attr: Device attribute representing the number of hardware queues.
2954 * @buf: Buffer of length PAGE_SIZE containing the number of hardware
2956 * @count: Length of data resizing in @buf.
2958 * n > 0: num_hwqs = n
2959 * n = 0: num_hwqs = num_online_cpus()
2960 * n < 0: num_online_cpus() / abs(n)
2962 * Return: The size of the ASCII string returned in @buf.
2964 static ssize_t
num_hwqs_store(struct device
*dev
,
2965 struct device_attribute
*attr
,
2966 const char *buf
, size_t count
)
2968 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2969 struct afu
*afu
= cfg
->afu
;
2971 int nhwqs
, num_hwqs
;
2973 rc
= kstrtoint(buf
, 10, &nhwqs
);
2979 else if (nhwqs
== 0)
2980 num_hwqs
= num_online_cpus();
2982 num_hwqs
= num_online_cpus() / abs(nhwqs
);
2984 afu
->desired_hwqs
= min(num_hwqs
, CXLFLASH_MAX_HWQS
);
2985 WARN_ON_ONCE(afu
->desired_hwqs
== 0);
2988 switch (cfg
->state
) {
2990 cfg
->state
= STATE_RESET
;
2992 cxlflash_mark_contexts_error(cfg
);
2993 rc
= afu_reset(cfg
);
2995 cfg
->state
= STATE_FAILTERM
;
2997 cfg
->state
= STATE_NORMAL
;
2998 wake_up_all(&cfg
->reset_waitq
);
3001 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
3002 if (cfg
->state
== STATE_NORMAL
)
3006 /* Ideally should not happen */
3007 dev_err(dev
, "%s: Device is not ready, state=%d\n",
3008 __func__
, cfg
->state
);
3015 static const char *hwq_mode_name
[MAX_HWQ_MODE
] = { "rr", "tag", "cpu" };
3018 * hwq_mode_show() - presents the HWQ steering mode for the host
3019 * @dev: Generic device associated with the host.
3020 * @attr: Device attribute representing the HWQ steering mode.
3021 * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
3022 * as a character string.
3024 * Return: The size of the ASCII string returned in @buf.
3026 static ssize_t
hwq_mode_show(struct device
*dev
,
3027 struct device_attribute
*attr
, char *buf
)
3029 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
3030 struct afu
*afu
= cfg
->afu
;
3032 return scnprintf(buf
, PAGE_SIZE
, "%s\n", hwq_mode_name
[afu
->hwq_mode
]);
3036 * hwq_mode_store() - sets the HWQ steering mode for the host
3037 * @dev: Generic device associated with the host.
3038 * @attr: Device attribute representing the HWQ steering mode.
3039 * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
3040 * as a character string.
3041 * @count: Length of data resizing in @buf.
3044 * tag = Block MQ Tagging
3045 * cpu = CPU Affinity
3047 * Return: The size of the ASCII string returned in @buf.
3049 static ssize_t
hwq_mode_store(struct device
*dev
,
3050 struct device_attribute
*attr
,
3051 const char *buf
, size_t count
)
3053 struct Scsi_Host
*shost
= class_to_shost(dev
);
3054 struct cxlflash_cfg
*cfg
= shost_priv(shost
);
3055 struct device
*cfgdev
= &cfg
->dev
->dev
;
3056 struct afu
*afu
= cfg
->afu
;
3058 u32 mode
= MAX_HWQ_MODE
;
3060 for (i
= 0; i
< MAX_HWQ_MODE
; i
++) {
3061 if (!strncmp(hwq_mode_name
[i
], buf
, strlen(hwq_mode_name
[i
]))) {
3067 if (mode
>= MAX_HWQ_MODE
) {
3068 dev_info(cfgdev
, "Invalid HWQ steering mode.\n");
3072 afu
->hwq_mode
= mode
;
3078 * mode_show() - presents the current mode of the device
3079 * @dev: Generic device associated with the device.
3080 * @attr: Device attribute representing the device mode.
3081 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3083 * Return: The size of the ASCII string returned in @buf.
3085 static ssize_t
mode_show(struct device
*dev
,
3086 struct device_attribute
*attr
, char *buf
)
3088 struct scsi_device
*sdev
= to_scsi_device(dev
);
3090 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
3091 sdev
->hostdata
? "superpipe" : "legacy");
3097 static DEVICE_ATTR_RO(port0
);
3098 static DEVICE_ATTR_RO(port1
);
3099 static DEVICE_ATTR_RO(port2
);
3100 static DEVICE_ATTR_RO(port3
);
3101 static DEVICE_ATTR_RW(lun_mode
);
3102 static DEVICE_ATTR_RO(ioctl_version
);
3103 static DEVICE_ATTR_RO(port0_lun_table
);
3104 static DEVICE_ATTR_RO(port1_lun_table
);
3105 static DEVICE_ATTR_RO(port2_lun_table
);
3106 static DEVICE_ATTR_RO(port3_lun_table
);
3107 static DEVICE_ATTR_RW(irqpoll_weight
);
3108 static DEVICE_ATTR_RW(num_hwqs
);
3109 static DEVICE_ATTR_RW(hwq_mode
);
3111 static struct attribute
*cxlflash_host_attrs
[] = {
3112 &dev_attr_port0
.attr
,
3113 &dev_attr_port1
.attr
,
3114 &dev_attr_port2
.attr
,
3115 &dev_attr_port3
.attr
,
3116 &dev_attr_lun_mode
.attr
,
3117 &dev_attr_ioctl_version
.attr
,
3118 &dev_attr_port0_lun_table
.attr
,
3119 &dev_attr_port1_lun_table
.attr
,
3120 &dev_attr_port2_lun_table
.attr
,
3121 &dev_attr_port3_lun_table
.attr
,
3122 &dev_attr_irqpoll_weight
.attr
,
3123 &dev_attr_num_hwqs
.attr
,
3124 &dev_attr_hwq_mode
.attr
,
3128 ATTRIBUTE_GROUPS(cxlflash_host
);
3133 static DEVICE_ATTR_RO(mode
);
3135 static struct attribute
*cxlflash_dev_attrs
[] = {
3136 &dev_attr_mode
.attr
,
3140 ATTRIBUTE_GROUPS(cxlflash_dev
);
3145 static struct scsi_host_template driver_template
= {
3146 .module
= THIS_MODULE
,
3147 .name
= CXLFLASH_ADAPTER_NAME
,
3148 .info
= cxlflash_driver_info
,
3149 .ioctl
= cxlflash_ioctl
,
3150 .proc_name
= CXLFLASH_NAME
,
3151 .queuecommand
= cxlflash_queuecommand
,
3152 .eh_abort_handler
= cxlflash_eh_abort_handler
,
3153 .eh_device_reset_handler
= cxlflash_eh_device_reset_handler
,
3154 .eh_host_reset_handler
= cxlflash_eh_host_reset_handler
,
3155 .change_queue_depth
= cxlflash_change_queue_depth
,
3156 .cmd_per_lun
= CXLFLASH_MAX_CMDS_PER_LUN
,
3157 .can_queue
= CXLFLASH_MAX_CMDS
,
3158 .cmd_size
= sizeof(struct afu_cmd
) + __alignof__(struct afu_cmd
) - 1,
3160 .sg_tablesize
= 1, /* No scatter gather support */
3161 .max_sectors
= CXLFLASH_MAX_SECTORS
,
3162 .shost_groups
= cxlflash_host_groups
,
3163 .sdev_groups
= cxlflash_dev_groups
,
3167 * Device dependent values
3169 static struct dev_dependent_vals dev_corsa_vals
= { CXLFLASH_MAX_SECTORS
,
3170 CXLFLASH_WWPN_VPD_REQUIRED
};
3171 static struct dev_dependent_vals dev_flash_gt_vals
= { CXLFLASH_MAX_SECTORS
,
3172 CXLFLASH_NOTIFY_SHUTDOWN
};
3173 static struct dev_dependent_vals dev_briard_vals
= { CXLFLASH_MAX_SECTORS
,
3174 (CXLFLASH_NOTIFY_SHUTDOWN
|
3175 CXLFLASH_OCXL_DEV
) };
3178 * PCI device binding table
3180 static struct pci_device_id cxlflash_pci_table
[] = {
3181 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CORSA
,
3182 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_corsa_vals
},
3183 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_FLASH_GT
,
3184 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_flash_gt_vals
},
3185 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_BRIARD
,
3186 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_briard_vals
},
3190 MODULE_DEVICE_TABLE(pci
, cxlflash_pci_table
);
3193 * cxlflash_worker_thread() - work thread handler for the AFU
3194 * @work: Work structure contained within cxlflash associated with host.
3196 * Handles the following events:
3197 * - Link reset which cannot be performed on interrupt context due to
3198 * blocking up to a few seconds
3201 static void cxlflash_worker_thread(struct work_struct
*work
)
3203 struct cxlflash_cfg
*cfg
= container_of(work
, struct cxlflash_cfg
,
3205 struct afu
*afu
= cfg
->afu
;
3206 struct device
*dev
= &cfg
->dev
->dev
;
3207 __be64 __iomem
*fc_port_regs
;
3211 /* Avoid MMIO if the device has failed */
3213 if (cfg
->state
!= STATE_NORMAL
)
3216 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
3218 if (cfg
->lr_state
== LINK_RESET_REQUIRED
) {
3219 port
= cfg
->lr_port
;
3221 dev_err(dev
, "%s: invalid port index %d\n",
3224 spin_unlock_irqrestore(cfg
->host
->host_lock
,
3227 /* The reset can block... */
3228 fc_port_regs
= get_fc_port_regs(cfg
, port
);
3229 afu_link_reset(afu
, port
, fc_port_regs
);
3230 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
3233 cfg
->lr_state
= LINK_RESET_COMPLETE
;
3236 spin_unlock_irqrestore(cfg
->host
->host_lock
, lock_flags
);
3238 if (atomic_dec_if_positive(&cfg
->scan_host_needed
) >= 0)
3239 scsi_scan_host(cfg
->host
);
3243 * cxlflash_chr_open() - character device open handler
3244 * @inode: Device inode associated with this character device.
3245 * @file: File pointer for this device.
3247 * Only users with admin privileges are allowed to open the character device.
3249 * Return: 0 on success, -errno on failure
3251 static int cxlflash_chr_open(struct inode
*inode
, struct file
*file
)
3253 struct cxlflash_cfg
*cfg
;
3255 if (!capable(CAP_SYS_ADMIN
))
3258 cfg
= container_of(inode
->i_cdev
, struct cxlflash_cfg
, cdev
);
3259 file
->private_data
= cfg
;
3265 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3266 * @cmd: The host ioctl command to decode.
3268 * Return: A string identifying the decoded host ioctl.
3270 static char *decode_hioctl(unsigned int cmd
)
3273 case HT_CXLFLASH_LUN_PROVISION
:
3274 return __stringify_1(HT_CXLFLASH_LUN_PROVISION
);
3281 * cxlflash_lun_provision() - host LUN provisioning handler
3282 * @cfg: Internal structure associated with the host.
3283 * @arg: Kernel copy of userspace ioctl data structure.
3285 * Return: 0 on success, -errno on failure
3287 static int cxlflash_lun_provision(struct cxlflash_cfg
*cfg
, void *arg
)
3289 struct ht_cxlflash_lun_provision
*lunprov
= arg
;
3290 struct afu
*afu
= cfg
->afu
;
3291 struct device
*dev
= &cfg
->dev
->dev
;
3292 struct sisl_ioarcb rcb
;
3293 struct sisl_ioasa asa
;
3294 __be64 __iomem
*fc_port_regs
;
3295 u16 port
= lunprov
->port
;
3296 u16 scmd
= lunprov
->hdr
.subcmd
;
3303 if (!afu_is_lun_provision(afu
)) {
3308 if (port
>= cfg
->num_fc_ports
) {
3314 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN
:
3315 type
= SISL_AFU_LUN_PROVISION_CREATE
;
3316 size
= lunprov
->size
;
3319 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN
:
3320 type
= SISL_AFU_LUN_PROVISION_DELETE
;
3322 lun_id
= lunprov
->lun_id
;
3324 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT
:
3325 fc_port_regs
= get_fc_port_regs(cfg
, port
);
3327 reg
= readq_be(&fc_port_regs
[FC_MAX_NUM_LUNS
/ 8]);
3328 lunprov
->max_num_luns
= reg
;
3329 reg
= readq_be(&fc_port_regs
[FC_CUR_NUM_LUNS
/ 8]);
3330 lunprov
->cur_num_luns
= reg
;
3331 reg
= readq_be(&fc_port_regs
[FC_MAX_CAP_PORT
/ 8]);
3332 lunprov
->max_cap_port
= reg
;
3333 reg
= readq_be(&fc_port_regs
[FC_CUR_CAP_PORT
/ 8]);
3334 lunprov
->cur_cap_port
= reg
;
3342 memset(&rcb
, 0, sizeof(rcb
));
3343 memset(&asa
, 0, sizeof(asa
));
3344 rcb
.req_flags
= SISL_REQ_FLAGS_AFU_CMD
;
3345 rcb
.lun_id
= lun_id
;
3346 rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
3347 rcb
.timeout
= MC_LUN_PROV_TIMEOUT
;
3350 rcb
.cdb
[0] = SISL_AFU_CMD_LUN_PROVISION
;
3353 put_unaligned_be64(size
, &rcb
.cdb
[8]);
3355 rc
= send_afu_cmd(afu
, &rcb
);
3357 dev_err(dev
, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3358 __func__
, rc
, asa
.ioasc
, asa
.afu_extra
);
3362 if (scmd
== HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN
) {
3363 lunprov
->lun_id
= (u64
)asa
.lunid_hi
<< 32 | asa
.lunid_lo
;
3364 memcpy(lunprov
->wwid
, asa
.wwid
, sizeof(lunprov
->wwid
));
3367 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
3372 * cxlflash_afu_debug() - host AFU debug handler
3373 * @cfg: Internal structure associated with the host.
3374 * @arg: Kernel copy of userspace ioctl data structure.
3376 * For debug requests requiring a data buffer, always provide an aligned
3377 * (cache line) buffer to the AFU to appease any alignment requirements.
3379 * Return: 0 on success, -errno on failure
3381 static int cxlflash_afu_debug(struct cxlflash_cfg
*cfg
, void *arg
)
3383 struct ht_cxlflash_afu_debug
*afu_dbg
= arg
;
3384 struct afu
*afu
= cfg
->afu
;
3385 struct device
*dev
= &cfg
->dev
->dev
;
3386 struct sisl_ioarcb rcb
;
3387 struct sisl_ioasa asa
;
3390 void __user
*ubuf
= (__force
void __user
*)afu_dbg
->data_ea
;
3391 u16 req_flags
= SISL_REQ_FLAGS_AFU_CMD
;
3392 u32 ulen
= afu_dbg
->data_len
;
3393 bool is_write
= afu_dbg
->hdr
.flags
& HT_CXLFLASH_HOST_WRITE
;
3396 if (!afu_is_afu_debug(afu
)) {
3402 req_flags
|= SISL_REQ_FLAGS_SUP_UNDERRUN
;
3404 if (ulen
> HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN
) {
3409 buf
= kmalloc(ulen
+ cache_line_size() - 1, GFP_KERNEL
);
3410 if (unlikely(!buf
)) {
3415 kbuf
= PTR_ALIGN(buf
, cache_line_size());
3418 req_flags
|= SISL_REQ_FLAGS_HOST_WRITE
;
3420 if (copy_from_user(kbuf
, ubuf
, ulen
)) {
3427 memset(&rcb
, 0, sizeof(rcb
));
3428 memset(&asa
, 0, sizeof(asa
));
3430 rcb
.req_flags
= req_flags
;
3431 rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
3432 rcb
.timeout
= MC_AFU_DEBUG_TIMEOUT
;
3436 rcb
.data_len
= ulen
;
3437 rcb
.data_ea
= (uintptr_t)kbuf
;
3440 rcb
.cdb
[0] = SISL_AFU_CMD_DEBUG
;
3441 memcpy(&rcb
.cdb
[4], afu_dbg
->afu_subcmd
,
3442 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN
);
3444 rc
= send_afu_cmd(afu
, &rcb
);
3446 dev_err(dev
, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3447 __func__
, rc
, asa
.ioasc
, asa
.afu_extra
);
3451 if (ulen
&& !is_write
) {
3452 if (copy_to_user(ubuf
, kbuf
, ulen
))
3457 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
3462 * cxlflash_chr_ioctl() - character device IOCTL handler
3463 * @file: File pointer for this device.
3464 * @cmd: IOCTL command.
3465 * @arg: Userspace ioctl data structure.
3467 * A read/write semaphore is used to implement a 'drain' of currently
3468 * running ioctls. The read semaphore is taken at the beginning of each
3469 * ioctl thread and released upon concluding execution. Additionally the
3470 * semaphore should be released and then reacquired in any ioctl execution
3471 * path which will wait for an event to occur that is outside the scope of
3472 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3473 * a thread simply needs to acquire the write semaphore.
3475 * Return: 0 on success, -errno on failure
3477 static long cxlflash_chr_ioctl(struct file
*file
, unsigned int cmd
,
3480 typedef int (*hioctl
) (struct cxlflash_cfg
*, void *);
3482 struct cxlflash_cfg
*cfg
= file
->private_data
;
3483 struct device
*dev
= &cfg
->dev
->dev
;
3484 char buf
[sizeof(union cxlflash_ht_ioctls
)];
3485 void __user
*uarg
= (void __user
*)arg
;
3486 struct ht_cxlflash_hdr
*hdr
;
3488 bool known_ioctl
= false;
3491 hioctl do_ioctl
= NULL
;
3493 static const struct {
3496 } ioctl_tbl
[] = { /* NOTE: order matters here */
3497 { sizeof(struct ht_cxlflash_lun_provision
), cxlflash_lun_provision
},
3498 { sizeof(struct ht_cxlflash_afu_debug
), cxlflash_afu_debug
},
3501 /* Hold read semaphore so we can drain if needed */
3502 down_read(&cfg
->ioctl_rwsem
);
3504 dev_dbg(dev
, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3505 __func__
, cmd
, idx
, sizeof(ioctl_tbl
));
3508 case HT_CXLFLASH_LUN_PROVISION
:
3509 case HT_CXLFLASH_AFU_DEBUG
:
3511 idx
= _IOC_NR(HT_CXLFLASH_LUN_PROVISION
) - _IOC_NR(cmd
);
3512 size
= ioctl_tbl
[idx
].size
;
3513 do_ioctl
= ioctl_tbl
[idx
].ioctl
;
3515 if (likely(do_ioctl
))
3524 if (unlikely(copy_from_user(&buf
, uarg
, size
))) {
3525 dev_err(dev
, "%s: copy_from_user() fail "
3526 "size=%lu cmd=%d (%s) uarg=%p\n",
3527 __func__
, size
, cmd
, decode_hioctl(cmd
), uarg
);
3532 hdr
= (struct ht_cxlflash_hdr
*)&buf
;
3533 if (hdr
->version
!= HT_CXLFLASH_VERSION_0
) {
3534 dev_dbg(dev
, "%s: Version %u not supported for %s\n",
3535 __func__
, hdr
->version
, decode_hioctl(cmd
));
3540 if (hdr
->rsvd
[0] || hdr
->rsvd
[1] || hdr
->return_flags
) {
3541 dev_dbg(dev
, "%s: Reserved/rflags populated\n", __func__
);
3546 rc
= do_ioctl(cfg
, (void *)&buf
);
3548 if (unlikely(copy_to_user(uarg
, &buf
, size
))) {
3549 dev_err(dev
, "%s: copy_to_user() fail "
3550 "size=%lu cmd=%d (%s) uarg=%p\n",
3551 __func__
, size
, cmd
, decode_hioctl(cmd
), uarg
);
3555 /* fall through to exit */
3558 up_read(&cfg
->ioctl_rwsem
);
3559 if (unlikely(rc
&& known_ioctl
))
3560 dev_err(dev
, "%s: ioctl %s (%08X) returned rc=%d\n",
3561 __func__
, decode_hioctl(cmd
), cmd
, rc
);
3563 dev_dbg(dev
, "%s: ioctl %s (%08X) returned rc=%d\n",
3564 __func__
, decode_hioctl(cmd
), cmd
, rc
);
3569 * Character device file operations
3571 static const struct file_operations cxlflash_chr_fops
= {
3572 .owner
= THIS_MODULE
,
3573 .open
= cxlflash_chr_open
,
3574 .unlocked_ioctl
= cxlflash_chr_ioctl
,
3575 .compat_ioctl
= compat_ptr_ioctl
,
3579 * init_chrdev() - initialize the character device for the host
3580 * @cfg: Internal structure associated with the host.
3582 * Return: 0 on success, -errno on failure
3584 static int init_chrdev(struct cxlflash_cfg
*cfg
)
3586 struct device
*dev
= &cfg
->dev
->dev
;
3587 struct device
*char_dev
;
3592 minor
= cxlflash_get_minor();
3593 if (unlikely(minor
< 0)) {
3594 dev_err(dev
, "%s: Exhausted allowed adapters\n", __func__
);
3599 devno
= MKDEV(cxlflash_major
, minor
);
3600 cdev_init(&cfg
->cdev
, &cxlflash_chr_fops
);
3602 rc
= cdev_add(&cfg
->cdev
, devno
, 1);
3604 dev_err(dev
, "%s: cdev_add failed rc=%d\n", __func__
, rc
);
3608 char_dev
= device_create(&cxlflash_class
, NULL
, devno
,
3609 NULL
, "cxlflash%d", minor
);
3610 if (IS_ERR(char_dev
)) {
3611 rc
= PTR_ERR(char_dev
);
3612 dev_err(dev
, "%s: device_create failed rc=%d\n",
3617 cfg
->chardev
= char_dev
;
3619 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
3622 cdev_del(&cfg
->cdev
);
3624 cxlflash_put_minor(minor
);
3629 * cxlflash_probe() - PCI entry point to add host
3630 * @pdev: PCI device associated with the host.
3631 * @dev_id: PCI device id associated with device.
3633 * The device will initially start out in a 'probing' state and
3634 * transition to the 'normal' state at the end of a successful
3635 * probe. Should an EEH event occur during probe, the notification
3636 * thread (error_detected()) will wait until the probe handler
3637 * is nearly complete. At that time, the device will be moved to
3638 * a 'probed' state and the EEH thread woken up to drive the slot
3639 * reset and recovery (device moves to 'normal' state). Meanwhile,
3640 * the probe will be allowed to exit successfully.
3642 * Return: 0 on success, -errno on failure
3644 static int cxlflash_probe(struct pci_dev
*pdev
,
3645 const struct pci_device_id
*dev_id
)
3647 struct Scsi_Host
*host
;
3648 struct cxlflash_cfg
*cfg
= NULL
;
3649 struct device
*dev
= &pdev
->dev
;
3650 struct dev_dependent_vals
*ddv
;
3654 dev_dbg(&pdev
->dev
, "%s: Found CXLFLASH with IRQ: %d\n",
3655 __func__
, pdev
->irq
);
3657 ddv
= (struct dev_dependent_vals
*)dev_id
->driver_data
;
3658 driver_template
.max_sectors
= ddv
->max_sectors
;
3660 host
= scsi_host_alloc(&driver_template
, sizeof(struct cxlflash_cfg
));
3662 dev_err(dev
, "%s: scsi_host_alloc failed\n", __func__
);
3667 host
->max_id
= CXLFLASH_MAX_NUM_TARGETS_PER_BUS
;
3668 host
->max_lun
= CXLFLASH_MAX_NUM_LUNS_PER_TARGET
;
3669 host
->unique_id
= host
->host_no
;
3670 host
->max_cmd_len
= CXLFLASH_MAX_CDB_LEN
;
3672 cfg
= shost_priv(host
);
3673 cfg
->state
= STATE_PROBING
;
3675 rc
= alloc_mem(cfg
);
3677 dev_err(dev
, "%s: alloc_mem failed\n", __func__
);
3679 scsi_host_put(cfg
->host
);
3683 cfg
->init_state
= INIT_STATE_NONE
;
3685 cfg
->cxl_fops
= cxlflash_cxl_fops
;
3686 cfg
->ops
= cxlflash_assign_ops(ddv
);
3687 WARN_ON_ONCE(!cfg
->ops
);
3690 * Promoted LUNs move to the top of the LUN table. The rest stay on
3691 * the bottom half. The bottom half grows from the end (index = 255),
3692 * whereas the top half grows from the beginning (index = 0).
3694 * Initialize the last LUN index for all possible ports.
3696 cfg
->promote_lun_index
= 0;
3698 for (k
= 0; k
< MAX_FC_PORTS
; k
++)
3699 cfg
->last_lun_index
[k
] = CXLFLASH_NUM_VLUNS
/2 - 1;
3701 cfg
->dev_id
= (struct pci_device_id
*)dev_id
;
3703 init_waitqueue_head(&cfg
->tmf_waitq
);
3704 init_waitqueue_head(&cfg
->reset_waitq
);
3706 INIT_WORK(&cfg
->work_q
, cxlflash_worker_thread
);
3707 cfg
->lr_state
= LINK_RESET_INVALID
;
3709 spin_lock_init(&cfg
->tmf_slock
);
3710 mutex_init(&cfg
->ctx_tbl_list_mutex
);
3711 mutex_init(&cfg
->ctx_recovery_mutex
);
3712 init_rwsem(&cfg
->ioctl_rwsem
);
3713 INIT_LIST_HEAD(&cfg
->ctx_err_recovery
);
3714 INIT_LIST_HEAD(&cfg
->lluns
);
3716 pci_set_drvdata(pdev
, cfg
);
3720 dev_err(dev
, "%s: init_pci failed rc=%d\n", __func__
, rc
);
3723 cfg
->init_state
= INIT_STATE_PCI
;
3725 cfg
->afu_cookie
= cfg
->ops
->create_afu(pdev
);
3726 if (unlikely(!cfg
->afu_cookie
)) {
3727 dev_err(dev
, "%s: create_afu failed\n", __func__
);
3733 if (rc
&& !wq_has_sleeper(&cfg
->reset_waitq
)) {
3734 dev_err(dev
, "%s: init_afu failed rc=%d\n", __func__
, rc
);
3737 cfg
->init_state
= INIT_STATE_AFU
;
3739 rc
= init_scsi(cfg
);
3741 dev_err(dev
, "%s: init_scsi failed rc=%d\n", __func__
, rc
);
3744 cfg
->init_state
= INIT_STATE_SCSI
;
3746 rc
= init_chrdev(cfg
);
3748 dev_err(dev
, "%s: init_chrdev failed rc=%d\n", __func__
, rc
);
3751 cfg
->init_state
= INIT_STATE_CDEV
;
3753 if (wq_has_sleeper(&cfg
->reset_waitq
)) {
3754 cfg
->state
= STATE_PROBED
;
3755 wake_up_all(&cfg
->reset_waitq
);
3757 cfg
->state
= STATE_NORMAL
;
3759 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
3763 cfg
->state
= STATE_PROBED
;
3764 cxlflash_remove(pdev
);
3769 * cxlflash_pci_error_detected() - called when a PCI error is detected
3770 * @pdev: PCI device struct.
3771 * @state: PCI channel state.
3773 * When an EEH occurs during an active reset, wait until the reset is
3774 * complete and then take action based upon the device state.
3776 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3778 static pci_ers_result_t
cxlflash_pci_error_detected(struct pci_dev
*pdev
,
3779 pci_channel_state_t state
)
3782 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3783 struct device
*dev
= &cfg
->dev
->dev
;
3785 dev_dbg(dev
, "%s: pdev=%p state=%u\n", __func__
, pdev
, state
);
3788 case pci_channel_io_frozen
:
3789 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
&&
3790 cfg
->state
!= STATE_PROBING
);
3791 if (cfg
->state
== STATE_FAILTERM
)
3792 return PCI_ERS_RESULT_DISCONNECT
;
3794 cfg
->state
= STATE_RESET
;
3795 scsi_block_requests(cfg
->host
);
3797 rc
= cxlflash_mark_contexts_error(cfg
);
3799 dev_err(dev
, "%s: Failed to mark user contexts rc=%d\n",
3802 return PCI_ERS_RESULT_NEED_RESET
;
3803 case pci_channel_io_perm_failure
:
3804 cfg
->state
= STATE_FAILTERM
;
3805 wake_up_all(&cfg
->reset_waitq
);
3806 scsi_unblock_requests(cfg
->host
);
3807 return PCI_ERS_RESULT_DISCONNECT
;
3811 return PCI_ERS_RESULT_NEED_RESET
;
3815 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3816 * @pdev: PCI device struct.
3818 * This routine is called by the pci error recovery code after the PCI
3819 * slot has been reset, just before we should resume normal operations.
3821 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3823 static pci_ers_result_t
cxlflash_pci_slot_reset(struct pci_dev
*pdev
)
3826 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3827 struct device
*dev
= &cfg
->dev
->dev
;
3829 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
3833 dev_err(dev
, "%s: EEH recovery failed rc=%d\n", __func__
, rc
);
3834 return PCI_ERS_RESULT_DISCONNECT
;
3837 return PCI_ERS_RESULT_RECOVERED
;
3841 * cxlflash_pci_resume() - called when normal operation can resume
3842 * @pdev: PCI device struct
3844 static void cxlflash_pci_resume(struct pci_dev
*pdev
)
3846 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3847 struct device
*dev
= &cfg
->dev
->dev
;
3849 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
3851 cfg
->state
= STATE_NORMAL
;
3852 wake_up_all(&cfg
->reset_waitq
);
3853 scsi_unblock_requests(cfg
->host
);
3857 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3858 * @dev: Character device.
3859 * @mode: Mode that can be used to verify access.
3861 * Return: Allocated string describing the devtmpfs structure.
3863 static char *cxlflash_devnode(const struct device
*dev
, umode_t
*mode
)
3865 return kasprintf(GFP_KERNEL
, "cxlflash/%s", dev_name(dev
));
3869 * cxlflash_class_init() - create character device class
3871 * Return: 0 on success, -errno on failure
3873 static int cxlflash_class_init(void)
3878 rc
= alloc_chrdev_region(&devno
, 0, CXLFLASH_MAX_ADAPTERS
, "cxlflash");
3880 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__
, rc
);
3884 cxlflash_major
= MAJOR(devno
);
3886 rc
= class_register(&cxlflash_class
);
3888 pr_err("%s: class_create failed rc=%d\n", __func__
, rc
);
3893 pr_debug("%s: returning rc=%d\n", __func__
, rc
);
3896 unregister_chrdev_region(devno
, CXLFLASH_MAX_ADAPTERS
);
3901 * cxlflash_class_exit() - destroy character device class
3903 static void cxlflash_class_exit(void)
3905 dev_t devno
= MKDEV(cxlflash_major
, 0);
3907 class_unregister(&cxlflash_class
);
3908 unregister_chrdev_region(devno
, CXLFLASH_MAX_ADAPTERS
);
3911 static const struct pci_error_handlers cxlflash_err_handler
= {
3912 .error_detected
= cxlflash_pci_error_detected
,
3913 .slot_reset
= cxlflash_pci_slot_reset
,
3914 .resume
= cxlflash_pci_resume
,
3918 * PCI device structure
3920 static struct pci_driver cxlflash_driver
= {
3921 .name
= CXLFLASH_NAME
,
3922 .id_table
= cxlflash_pci_table
,
3923 .probe
= cxlflash_probe
,
3924 .remove
= cxlflash_remove
,
3925 .shutdown
= cxlflash_remove
,
3926 .err_handler
= &cxlflash_err_handler
,
3930 * init_cxlflash() - module entry point
3932 * Return: 0 on success, -errno on failure
3934 static int __init
init_cxlflash(void)
3939 cxlflash_list_init();
3940 rc
= cxlflash_class_init();
3944 rc
= pci_register_driver(&cxlflash_driver
);
3948 pr_debug("%s: returning rc=%d\n", __func__
, rc
);
3951 cxlflash_class_exit();
3956 * exit_cxlflash() - module exit point
3958 static void __exit
exit_cxlflash(void)
3960 cxlflash_term_global_luns();
3961 cxlflash_free_errpage();
3963 pci_unregister_driver(&cxlflash_driver
);
3964 cxlflash_class_exit();
3967 module_init(init_cxlflash
);
3968 module_exit(exit_cxlflash
);