1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * CXL Flash Device Driver
5 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
8 * Copyright (C) 2018 IBM Corporation
11 #include <linux/file.h>
12 #include <linux/idr.h>
13 #include <linux/module.h>
14 #include <linux/mount.h>
15 #include <linux/pseudo_fs.h>
16 #include <linux/poll.h>
17 #include <linux/sched/signal.h>
18 #include <linux/interrupt.h>
19 #include <linux/irqdomain.h>
21 #include <misc/ocxl.h>
23 #include <uapi/misc/cxl.h>
29 * Pseudo-filesystem to allocate inodes.
32 #define OCXLFLASH_FS_MAGIC 0x1697698f
34 static int ocxlflash_fs_cnt
;
35 static struct vfsmount
*ocxlflash_vfs_mount
;
37 static int ocxlflash_fs_init_fs_context(struct fs_context
*fc
)
39 return init_pseudo(fc
, OCXLFLASH_FS_MAGIC
) ? 0 : -ENOMEM
;
42 static struct file_system_type ocxlflash_fs_type
= {
45 .init_fs_context
= ocxlflash_fs_init_fs_context
,
46 .kill_sb
= kill_anon_super
,
50 * ocxlflash_release_mapping() - release the memory mapping
51 * @ctx: Context whose mapping is to be released.
53 static void ocxlflash_release_mapping(struct ocxlflash_context
*ctx
)
56 simple_release_fs(&ocxlflash_vfs_mount
, &ocxlflash_fs_cnt
);
61 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
62 * @dev: Generic device of the host.
63 * @name: Name of the pseudo filesystem.
64 * @fops: File operations.
65 * @priv: Private data.
66 * @flags: Flags for the file.
68 * Return: pointer to the file on success, ERR_PTR on failure
70 static struct file
*ocxlflash_getfile(struct device
*dev
, const char *name
,
71 const struct file_operations
*fops
,
72 void *priv
, int flags
)
78 if (fops
->owner
&& !try_module_get(fops
->owner
)) {
79 dev_err(dev
, "%s: Owner does not exist\n", __func__
);
84 rc
= simple_pin_fs(&ocxlflash_fs_type
, &ocxlflash_vfs_mount
,
86 if (unlikely(rc
< 0)) {
87 dev_err(dev
, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
92 inode
= alloc_anon_inode(ocxlflash_vfs_mount
->mnt_sb
);
95 dev_err(dev
, "%s: alloc_anon_inode failed rc=%d\n",
100 file
= alloc_file_pseudo(inode
, ocxlflash_vfs_mount
, name
,
101 flags
& (O_ACCMODE
| O_NONBLOCK
), fops
);
104 dev_err(dev
, "%s: alloc_file failed rc=%d\n",
109 file
->private_data
= priv
;
115 simple_release_fs(&ocxlflash_vfs_mount
, &ocxlflash_fs_cnt
);
117 module_put(fops
->owner
);
124 * ocxlflash_psa_map() - map the process specific MMIO space
125 * @ctx_cookie: Adapter context for which the mapping needs to be done.
127 * Return: MMIO pointer of the mapped region
129 static void __iomem
*ocxlflash_psa_map(void *ctx_cookie
)
131 struct ocxlflash_context
*ctx
= ctx_cookie
;
132 struct device
*dev
= ctx
->hw_afu
->dev
;
134 mutex_lock(&ctx
->state_mutex
);
135 if (ctx
->state
!= STARTED
) {
136 dev_err(dev
, "%s: Context not started, state=%d\n", __func__
,
138 mutex_unlock(&ctx
->state_mutex
);
141 mutex_unlock(&ctx
->state_mutex
);
143 return ioremap(ctx
->psn_phys
, ctx
->psn_size
);
147 * ocxlflash_psa_unmap() - unmap the process specific MMIO space
148 * @addr: MMIO pointer to unmap.
150 static void ocxlflash_psa_unmap(void __iomem
*addr
)
156 * ocxlflash_process_element() - get process element of the adapter context
157 * @ctx_cookie: Adapter context associated with the process element.
159 * Return: process element of the adapter context
161 static int ocxlflash_process_element(void *ctx_cookie
)
163 struct ocxlflash_context
*ctx
= ctx_cookie
;
169 * afu_map_irq() - map the interrupt of the adapter context
171 * @ctx: Adapter context.
172 * @num: Per-context AFU interrupt number.
173 * @handler: Interrupt handler to register.
174 * @cookie: Interrupt handler private data.
175 * @name: Name of the interrupt.
177 * Return: 0 on success, -errno on failure
179 static int afu_map_irq(u64 flags
, struct ocxlflash_context
*ctx
, int num
,
180 irq_handler_t handler
, void *cookie
, char *name
)
182 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
183 struct device
*dev
= afu
->dev
;
184 struct ocxlflash_irqs
*irq
;
185 struct xive_irq_data
*xd
;
189 if (num
< 0 || num
>= ctx
->num_irqs
) {
190 dev_err(dev
, "%s: Interrupt %d not allocated\n", __func__
, num
);
195 irq
= &ctx
->irqs
[num
];
196 virq
= irq_create_mapping(NULL
, irq
->hwirq
);
197 if (unlikely(!virq
)) {
198 dev_err(dev
, "%s: irq_create_mapping failed\n", __func__
);
203 rc
= request_irq(virq
, handler
, 0, name
, cookie
);
205 dev_err(dev
, "%s: request_irq failed rc=%d\n", __func__
, rc
);
209 xd
= irq_get_handler_data(virq
);
211 dev_err(dev
, "%s: Can't get interrupt data\n", __func__
);
217 irq
->vtrig
= xd
->trig_mmio
;
221 free_irq(virq
, cookie
);
223 irq_dispose_mapping(virq
);
228 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
229 * @ctx_cookie: Adapter context.
230 * @num: Per-context AFU interrupt number.
231 * @handler: Interrupt handler to register.
232 * @cookie: Interrupt handler private data.
233 * @name: Name of the interrupt.
235 * Return: 0 on success, -errno on failure
237 static int ocxlflash_map_afu_irq(void *ctx_cookie
, int num
,
238 irq_handler_t handler
, void *cookie
,
241 return afu_map_irq(0, ctx_cookie
, num
, handler
, cookie
, name
);
245 * afu_unmap_irq() - unmap the interrupt
247 * @ctx: Adapter context.
248 * @num: Per-context AFU interrupt number.
249 * @cookie: Interrupt handler private data.
251 static void afu_unmap_irq(u64 flags
, struct ocxlflash_context
*ctx
, int num
,
254 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
255 struct device
*dev
= afu
->dev
;
256 struct ocxlflash_irqs
*irq
;
258 if (num
< 0 || num
>= ctx
->num_irqs
) {
259 dev_err(dev
, "%s: Interrupt %d not allocated\n", __func__
, num
);
263 irq
= &ctx
->irqs
[num
];
265 if (irq_find_mapping(NULL
, irq
->hwirq
)) {
266 free_irq(irq
->virq
, cookie
);
267 irq_dispose_mapping(irq
->virq
);
270 memset(irq
, 0, sizeof(*irq
));
274 * ocxlflash_unmap_afu_irq() - unmap the interrupt
275 * @ctx_cookie: Adapter context.
276 * @num: Per-context AFU interrupt number.
277 * @cookie: Interrupt handler private data.
279 static void ocxlflash_unmap_afu_irq(void *ctx_cookie
, int num
, void *cookie
)
281 return afu_unmap_irq(0, ctx_cookie
, num
, cookie
);
285 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
286 * @ctx_cookie: Context associated with the interrupt.
287 * @irq: Interrupt number.
289 * Return: effective address of the mapped region
291 static u64
ocxlflash_get_irq_objhndl(void *ctx_cookie
, int irq
)
293 struct ocxlflash_context
*ctx
= ctx_cookie
;
295 if (irq
< 0 || irq
>= ctx
->num_irqs
)
298 return (__force u64
)ctx
->irqs
[irq
].vtrig
;
302 * ocxlflash_xsl_fault() - callback when translation error is triggered
303 * @data: Private data provided at callback registration, the context.
304 * @addr: Address that triggered the error.
305 * @dsisr: Value of dsisr register.
307 static void ocxlflash_xsl_fault(void *data
, u64 addr
, u64 dsisr
)
309 struct ocxlflash_context
*ctx
= data
;
311 spin_lock(&ctx
->slock
);
312 ctx
->fault_addr
= addr
;
313 ctx
->fault_dsisr
= dsisr
;
314 ctx
->pending_fault
= true;
315 spin_unlock(&ctx
->slock
);
317 wake_up_all(&ctx
->wq
);
321 * start_context() - local routine to start a context
322 * @ctx: Adapter context to be started.
324 * Assign the context specific MMIO space, add and enable the PE.
326 * Return: 0 on success, -errno on failure
328 static int start_context(struct ocxlflash_context
*ctx
)
330 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
331 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
332 void *link_token
= afu
->link_token
;
333 struct pci_dev
*pdev
= afu
->pdev
;
334 struct device
*dev
= afu
->dev
;
335 bool master
= ctx
->master
;
336 struct mm_struct
*mm
;
340 mutex_lock(&ctx
->state_mutex
);
341 if (ctx
->state
!= OPENED
) {
342 dev_err(dev
, "%s: Context state invalid, state=%d\n",
343 __func__
, ctx
->state
);
349 ctx
->psn_size
= acfg
->global_mmio_size
;
350 ctx
->psn_phys
= afu
->gmmio_phys
;
352 ctx
->psn_size
= acfg
->pp_mmio_stride
;
353 ctx
->psn_phys
= afu
->ppmmio_phys
+ (ctx
->pe
* ctx
->psn_size
);
356 /* pid and mm not set for master contexts */
361 pid
= current
->mm
->context
.id
;
365 rc
= ocxl_link_add_pe(link_token
, ctx
->pe
, pid
, 0, 0,
366 pci_dev_id(pdev
), mm
, ocxlflash_xsl_fault
,
369 dev_err(dev
, "%s: ocxl_link_add_pe failed rc=%d\n",
374 ctx
->state
= STARTED
;
376 mutex_unlock(&ctx
->state_mutex
);
381 * ocxlflash_start_context() - start a kernel context
382 * @ctx_cookie: Adapter context to be started.
384 * Return: 0 on success, -errno on failure
386 static int ocxlflash_start_context(void *ctx_cookie
)
388 struct ocxlflash_context
*ctx
= ctx_cookie
;
390 return start_context(ctx
);
394 * ocxlflash_stop_context() - stop a context
395 * @ctx_cookie: Adapter context to be stopped.
397 * Return: 0 on success, -errno on failure
399 static int ocxlflash_stop_context(void *ctx_cookie
)
401 struct ocxlflash_context
*ctx
= ctx_cookie
;
402 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
403 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
404 struct pci_dev
*pdev
= afu
->pdev
;
405 struct device
*dev
= afu
->dev
;
406 enum ocxlflash_ctx_state state
;
409 mutex_lock(&ctx
->state_mutex
);
412 mutex_unlock(&ctx
->state_mutex
);
413 if (state
!= STARTED
)
416 rc
= ocxl_config_terminate_pasid(pdev
, acfg
->dvsec_afu_control_pos
,
419 dev_err(dev
, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
421 /* If EBUSY, PE could be referenced in future by the AFU */
426 rc
= ocxl_link_remove_pe(afu
->link_token
, ctx
->pe
);
428 dev_err(dev
, "%s: ocxl_link_remove_pe failed rc=%d\n",
437 * ocxlflash_afu_reset() - reset the AFU
438 * @ctx_cookie: Adapter context.
440 static int ocxlflash_afu_reset(void *ctx_cookie
)
442 struct ocxlflash_context
*ctx
= ctx_cookie
;
443 struct device
*dev
= ctx
->hw_afu
->dev
;
445 /* Pending implementation from OCXL transport services */
446 dev_err_once(dev
, "%s: afu_reset() fop not supported\n", __func__
);
448 /* Silently return success until it is implemented */
453 * ocxlflash_set_master() - sets the context as master
454 * @ctx_cookie: Adapter context to set as master.
456 static void ocxlflash_set_master(void *ctx_cookie
)
458 struct ocxlflash_context
*ctx
= ctx_cookie
;
464 * ocxlflash_get_context() - obtains the context associated with the host
465 * @pdev: PCI device associated with the host.
466 * @afu_cookie: Hardware AFU associated with the host.
468 * Return: returns the pointer to host adapter context
470 static void *ocxlflash_get_context(struct pci_dev
*pdev
, void *afu_cookie
)
472 struct ocxl_hw_afu
*afu
= afu_cookie
;
474 return afu
->ocxl_ctx
;
478 * ocxlflash_dev_context_init() - allocate and initialize an adapter context
479 * @pdev: PCI device associated with the host.
480 * @afu_cookie: Hardware AFU associated with the host.
482 * Return: returns the adapter context on success, ERR_PTR on failure
484 static void *ocxlflash_dev_context_init(struct pci_dev
*pdev
, void *afu_cookie
)
486 struct ocxl_hw_afu
*afu
= afu_cookie
;
487 struct device
*dev
= afu
->dev
;
488 struct ocxlflash_context
*ctx
;
491 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
492 if (unlikely(!ctx
)) {
493 dev_err(dev
, "%s: Context allocation failed\n", __func__
);
498 idr_preload(GFP_KERNEL
);
499 rc
= idr_alloc(&afu
->idr
, ctx
, 0, afu
->max_pasid
, GFP_NOWAIT
);
501 if (unlikely(rc
< 0)) {
502 dev_err(dev
, "%s: idr_alloc failed rc=%d\n", __func__
, rc
);
506 spin_lock_init(&ctx
->slock
);
507 init_waitqueue_head(&ctx
->wq
);
508 mutex_init(&ctx
->state_mutex
);
516 ctx
->pending_irq
= false;
517 ctx
->pending_fault
= false;
528 * ocxlflash_release_context() - releases an adapter context
529 * @ctx_cookie: Adapter context to be released.
531 * Return: 0 on success, -errno on failure
533 static int ocxlflash_release_context(void *ctx_cookie
)
535 struct ocxlflash_context
*ctx
= ctx_cookie
;
542 dev
= ctx
->hw_afu
->dev
;
543 mutex_lock(&ctx
->state_mutex
);
544 if (ctx
->state
>= STARTED
) {
545 dev_err(dev
, "%s: Context in use, state=%d\n", __func__
,
547 mutex_unlock(&ctx
->state_mutex
);
551 mutex_unlock(&ctx
->state_mutex
);
553 idr_remove(&ctx
->hw_afu
->idr
, ctx
->pe
);
554 ocxlflash_release_mapping(ctx
);
561 * ocxlflash_perst_reloads_same_image() - sets the image reload policy
562 * @afu_cookie: Hardware AFU associated with the host.
563 * @image: Whether to load the same image on PERST.
565 static void ocxlflash_perst_reloads_same_image(void *afu_cookie
, bool image
)
567 struct ocxl_hw_afu
*afu
= afu_cookie
;
569 afu
->perst_same_image
= image
;
573 * ocxlflash_read_adapter_vpd() - reads the adapter VPD
574 * @pdev: PCI device associated with the host.
575 * @buf: Buffer to get the VPD data.
576 * @count: Size of buffer (maximum bytes that can be read).
578 * Return: size of VPD on success, -errno on failure
580 static ssize_t
ocxlflash_read_adapter_vpd(struct pci_dev
*pdev
, void *buf
,
583 return pci_read_vpd(pdev
, 0, count
, buf
);
587 * free_afu_irqs() - internal service to free interrupts
588 * @ctx: Adapter context.
590 static void free_afu_irqs(struct ocxlflash_context
*ctx
)
592 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
593 struct device
*dev
= afu
->dev
;
597 dev_err(dev
, "%s: Interrupts not allocated\n", __func__
);
601 for (i
= ctx
->num_irqs
; i
>= 0; i
--)
602 ocxl_link_free_irq(afu
->link_token
, ctx
->irqs
[i
].hwirq
);
609 * alloc_afu_irqs() - internal service to allocate interrupts
610 * @ctx: Context associated with the request.
611 * @num: Number of interrupts requested.
613 * Return: 0 on success, -errno on failure
615 static int alloc_afu_irqs(struct ocxlflash_context
*ctx
, int num
)
617 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
618 struct device
*dev
= afu
->dev
;
619 struct ocxlflash_irqs
*irqs
;
625 dev_err(dev
, "%s: Interrupts already allocated\n", __func__
);
630 if (num
> OCXL_MAX_IRQS
) {
631 dev_err(dev
, "%s: Too many interrupts num=%d\n", __func__
, num
);
636 irqs
= kcalloc(num
, sizeof(*irqs
), GFP_KERNEL
);
637 if (unlikely(!irqs
)) {
638 dev_err(dev
, "%s: Context irqs allocation failed\n", __func__
);
643 for (i
= 0; i
< num
; i
++) {
644 rc
= ocxl_link_irq_alloc(afu
->link_token
, &hwirq
);
646 dev_err(dev
, "%s: ocxl_link_irq_alloc failed rc=%d\n",
651 irqs
[i
].hwirq
= hwirq
;
659 for (i
= i
-1; i
>= 0; i
--)
660 ocxl_link_free_irq(afu
->link_token
, irqs
[i
].hwirq
);
666 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
667 * @ctx_cookie: Context associated with the request.
668 * @num: Number of interrupts requested.
670 * Return: 0 on success, -errno on failure
672 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie
, int num
)
674 return alloc_afu_irqs(ctx_cookie
, num
);
678 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
679 * @ctx_cookie: Adapter context.
681 static void ocxlflash_free_afu_irqs(void *ctx_cookie
)
683 free_afu_irqs(ctx_cookie
);
687 * ocxlflash_unconfig_afu() - unconfigure the AFU
688 * @afu: AFU associated with the host.
690 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu
*afu
)
692 if (afu
->gmmio_virt
) {
693 iounmap(afu
->gmmio_virt
);
694 afu
->gmmio_virt
= NULL
;
699 * ocxlflash_destroy_afu() - destroy the AFU structure
700 * @afu_cookie: AFU to be freed.
702 static void ocxlflash_destroy_afu(void *afu_cookie
)
704 struct ocxl_hw_afu
*afu
= afu_cookie
;
710 ocxlflash_release_context(afu
->ocxl_ctx
);
711 idr_destroy(&afu
->idr
);
713 /* Disable the AFU */
714 pos
= afu
->acfg
.dvsec_afu_control_pos
;
715 ocxl_config_set_afu_state(afu
->pdev
, pos
, 0);
717 ocxlflash_unconfig_afu(afu
);
722 * ocxlflash_config_fn() - configure the host function
723 * @pdev: PCI device associated with the host.
724 * @afu: AFU associated with the host.
726 * Return: 0 on success, -errno on failure
728 static int ocxlflash_config_fn(struct pci_dev
*pdev
, struct ocxl_hw_afu
*afu
)
730 struct ocxl_fn_config
*fcfg
= &afu
->fcfg
;
731 struct device
*dev
= &pdev
->dev
;
732 u16 base
, enabled
, supported
;
735 /* Read DVSEC config of the function */
736 rc
= ocxl_config_read_function(pdev
, fcfg
);
738 dev_err(dev
, "%s: ocxl_config_read_function failed rc=%d\n",
743 /* Check if function has AFUs defined, only 1 per function supported */
744 if (fcfg
->max_afu_index
>= 0) {
745 afu
->is_present
= true;
746 if (fcfg
->max_afu_index
!= 0)
747 dev_warn(dev
, "%s: Unexpected AFU index value %d\n",
748 __func__
, fcfg
->max_afu_index
);
751 rc
= ocxl_config_get_actag_info(pdev
, &base
, &enabled
, &supported
);
753 dev_err(dev
, "%s: ocxl_config_get_actag_info failed rc=%d\n",
758 afu
->fn_actag_base
= base
;
759 afu
->fn_actag_enabled
= enabled
;
761 ocxl_config_set_actag(pdev
, fcfg
->dvsec_function_pos
, base
, enabled
);
762 dev_dbg(dev
, "%s: Function acTag range base=%u enabled=%u\n",
763 __func__
, base
, enabled
);
765 rc
= ocxl_link_setup(pdev
, 0, &afu
->link_token
);
767 dev_err(dev
, "%s: ocxl_link_setup failed rc=%d\n",
772 rc
= ocxl_config_set_TL(pdev
, fcfg
->dvsec_tl_pos
);
774 dev_err(dev
, "%s: ocxl_config_set_TL failed rc=%d\n",
781 ocxl_link_release(pdev
, afu
->link_token
);
786 * ocxlflash_unconfig_fn() - unconfigure the host function
787 * @pdev: PCI device associated with the host.
788 * @afu: AFU associated with the host.
790 static void ocxlflash_unconfig_fn(struct pci_dev
*pdev
, struct ocxl_hw_afu
*afu
)
792 ocxl_link_release(pdev
, afu
->link_token
);
796 * ocxlflash_map_mmio() - map the AFU MMIO space
797 * @afu: AFU associated with the host.
799 * Return: 0 on success, -errno on failure
801 static int ocxlflash_map_mmio(struct ocxl_hw_afu
*afu
)
803 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
804 struct pci_dev
*pdev
= afu
->pdev
;
805 struct device
*dev
= afu
->dev
;
806 phys_addr_t gmmio
, ppmmio
;
809 rc
= pci_request_region(pdev
, acfg
->global_mmio_bar
, "ocxlflash");
811 dev_err(dev
, "%s: pci_request_region for global failed rc=%d\n",
815 gmmio
= pci_resource_start(pdev
, acfg
->global_mmio_bar
);
816 gmmio
+= acfg
->global_mmio_offset
;
818 rc
= pci_request_region(pdev
, acfg
->pp_mmio_bar
, "ocxlflash");
820 dev_err(dev
, "%s: pci_request_region for pp bar failed rc=%d\n",
824 ppmmio
= pci_resource_start(pdev
, acfg
->pp_mmio_bar
);
825 ppmmio
+= acfg
->pp_mmio_offset
;
827 afu
->gmmio_virt
= ioremap(gmmio
, acfg
->global_mmio_size
);
828 if (unlikely(!afu
->gmmio_virt
)) {
829 dev_err(dev
, "%s: MMIO mapping failed\n", __func__
);
834 afu
->gmmio_phys
= gmmio
;
835 afu
->ppmmio_phys
= ppmmio
;
839 pci_release_region(pdev
, acfg
->pp_mmio_bar
);
841 pci_release_region(pdev
, acfg
->global_mmio_bar
);
846 * ocxlflash_config_afu() - configure the host AFU
847 * @pdev: PCI device associated with the host.
848 * @afu: AFU associated with the host.
850 * Must be called _after_ host function configuration.
852 * Return: 0 on success, -errno on failure
854 static int ocxlflash_config_afu(struct pci_dev
*pdev
, struct ocxl_hw_afu
*afu
)
856 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
857 struct ocxl_fn_config
*fcfg
= &afu
->fcfg
;
858 struct device
*dev
= &pdev
->dev
;
864 /* This HW AFU function does not have any AFUs defined */
865 if (!afu
->is_present
)
868 /* Read AFU config at index 0 */
869 rc
= ocxl_config_read_afu(pdev
, fcfg
, acfg
, 0);
871 dev_err(dev
, "%s: ocxl_config_read_afu failed rc=%d\n",
876 /* Only one AFU per function is supported, so actag_base is same */
877 base
= afu
->fn_actag_base
;
878 count
= min_t(int, acfg
->actag_supported
, afu
->fn_actag_enabled
);
879 pos
= acfg
->dvsec_afu_control_pos
;
881 ocxl_config_set_afu_actag(pdev
, pos
, base
, count
);
882 dev_dbg(dev
, "%s: acTag base=%d enabled=%d\n", __func__
, base
, count
);
883 afu
->afu_actag_base
= base
;
884 afu
->afu_actag_enabled
= count
;
885 afu
->max_pasid
= 1 << acfg
->pasid_supported_log
;
887 ocxl_config_set_afu_pasid(pdev
, pos
, 0, acfg
->pasid_supported_log
);
889 rc
= ocxlflash_map_mmio(afu
);
891 dev_err(dev
, "%s: ocxlflash_map_mmio failed rc=%d\n",
897 ocxl_config_set_afu_state(pdev
, acfg
->dvsec_afu_control_pos
, 1);
903 * ocxlflash_create_afu() - create the AFU for OCXL
904 * @pdev: PCI device associated with the host.
906 * Return: AFU on success, NULL on failure
908 static void *ocxlflash_create_afu(struct pci_dev
*pdev
)
910 struct device
*dev
= &pdev
->dev
;
911 struct ocxlflash_context
*ctx
;
912 struct ocxl_hw_afu
*afu
;
915 afu
= kzalloc(sizeof(*afu
), GFP_KERNEL
);
916 if (unlikely(!afu
)) {
917 dev_err(dev
, "%s: HW AFU allocation failed\n", __func__
);
925 rc
= ocxlflash_config_fn(pdev
, afu
);
927 dev_err(dev
, "%s: Function configuration failed rc=%d\n",
932 rc
= ocxlflash_config_afu(pdev
, afu
);
934 dev_err(dev
, "%s: AFU configuration failed rc=%d\n",
939 ctx
= ocxlflash_dev_context_init(pdev
, afu
);
942 dev_err(dev
, "%s: ocxlflash_dev_context_init failed rc=%d\n",
951 ocxlflash_unconfig_afu(afu
);
953 ocxlflash_unconfig_fn(pdev
, afu
);
955 idr_destroy(&afu
->idr
);
962 * ctx_event_pending() - check for any event pending on the context
963 * @ctx: Context to be checked.
965 * Return: true if there is an event pending, false if none pending
967 static inline bool ctx_event_pending(struct ocxlflash_context
*ctx
)
969 if (ctx
->pending_irq
|| ctx
->pending_fault
)
976 * afu_poll() - poll the AFU for events on the context
977 * @file: File associated with the adapter context.
978 * @poll: Poll structure from the user.
982 static unsigned int afu_poll(struct file
*file
, struct poll_table_struct
*poll
)
984 struct ocxlflash_context
*ctx
= file
->private_data
;
985 struct device
*dev
= ctx
->hw_afu
->dev
;
989 poll_wait(file
, &ctx
->wq
, poll
);
991 spin_lock_irqsave(&ctx
->slock
, lock_flags
);
992 if (ctx_event_pending(ctx
))
993 mask
|= POLLIN
| POLLRDNORM
;
994 else if (ctx
->state
== CLOSED
)
996 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
998 dev_dbg(dev
, "%s: Poll wait completed for pe %i mask %i\n",
999 __func__
, ctx
->pe
, mask
);
1005 * afu_read() - perform a read on the context for any event
1006 * @file: File associated with the adapter context.
1007 * @buf: Buffer to receive the data.
1008 * @count: Size of buffer (maximum bytes that can be read).
1011 * Return: size of the data read on success, -errno on failure
1013 static ssize_t
afu_read(struct file
*file
, char __user
*buf
, size_t count
,
1016 struct ocxlflash_context
*ctx
= file
->private_data
;
1017 struct device
*dev
= ctx
->hw_afu
->dev
;
1018 struct cxl_event event
;
1023 DEFINE_WAIT(event_wait
);
1026 dev_err(dev
, "%s: Non-zero offset not supported, off=%lld\n",
1032 spin_lock_irqsave(&ctx
->slock
, lock_flags
);
1035 prepare_to_wait(&ctx
->wq
, &event_wait
, TASK_INTERRUPTIBLE
);
1037 if (ctx_event_pending(ctx
) || (ctx
->state
== CLOSED
))
1040 if (file
->f_flags
& O_NONBLOCK
) {
1041 dev_err(dev
, "%s: File cannot be blocked on I/O\n",
1047 if (signal_pending(current
)) {
1048 dev_err(dev
, "%s: Signal pending on the process\n",
1054 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
1056 spin_lock_irqsave(&ctx
->slock
, lock_flags
);
1059 finish_wait(&ctx
->wq
, &event_wait
);
1061 memset(&event
, 0, sizeof(event
));
1062 event
.header
.process_element
= ctx
->pe
;
1063 event
.header
.size
= sizeof(struct cxl_event_header
);
1064 if (ctx
->pending_irq
) {
1065 esize
= sizeof(struct cxl_event_afu_interrupt
);
1066 event
.header
.size
+= esize
;
1067 event
.header
.type
= CXL_EVENT_AFU_INTERRUPT
;
1069 bit
= find_first_bit(&ctx
->irq_bitmap
, ctx
->num_irqs
);
1070 clear_bit(bit
, &ctx
->irq_bitmap
);
1071 event
.irq
.irq
= bit
+ 1;
1072 if (bitmap_empty(&ctx
->irq_bitmap
, ctx
->num_irqs
))
1073 ctx
->pending_irq
= false;
1074 } else if (ctx
->pending_fault
) {
1075 event
.header
.size
+= sizeof(struct cxl_event_data_storage
);
1076 event
.header
.type
= CXL_EVENT_DATA_STORAGE
;
1077 event
.fault
.addr
= ctx
->fault_addr
;
1078 event
.fault
.dsisr
= ctx
->fault_dsisr
;
1079 ctx
->pending_fault
= false;
1082 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
1084 if (copy_to_user(buf
, &event
, event
.header
.size
)) {
1085 dev_err(dev
, "%s: copy_to_user failed\n", __func__
);
1090 rc
= event
.header
.size
;
1094 finish_wait(&ctx
->wq
, &event_wait
);
1095 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
1100 * afu_release() - release and free the context
1101 * @inode: File inode pointer.
1102 * @file: File associated with the context.
1104 * Return: 0 on success, -errno on failure
1106 static int afu_release(struct inode
*inode
, struct file
*file
)
1108 struct ocxlflash_context
*ctx
= file
->private_data
;
1111 /* Unmap and free the interrupts associated with the context */
1112 for (i
= ctx
->num_irqs
; i
>= 0; i
--)
1113 afu_unmap_irq(0, ctx
, i
, ctx
);
1116 return ocxlflash_release_context(ctx
);
1120 * ocxlflash_mmap_fault() - mmap fault handler
1121 * @vmf: VM fault associated with current fault.
1123 * Return: 0 on success, -errno on failure
1125 static vm_fault_t
ocxlflash_mmap_fault(struct vm_fault
*vmf
)
1127 struct vm_area_struct
*vma
= vmf
->vma
;
1128 struct ocxlflash_context
*ctx
= vma
->vm_file
->private_data
;
1129 struct device
*dev
= ctx
->hw_afu
->dev
;
1130 u64 mmio_area
, offset
;
1132 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
1133 if (offset
>= ctx
->psn_size
)
1134 return VM_FAULT_SIGBUS
;
1136 mutex_lock(&ctx
->state_mutex
);
1137 if (ctx
->state
!= STARTED
) {
1138 dev_err(dev
, "%s: Context not started, state=%d\n",
1139 __func__
, ctx
->state
);
1140 mutex_unlock(&ctx
->state_mutex
);
1141 return VM_FAULT_SIGBUS
;
1143 mutex_unlock(&ctx
->state_mutex
);
1145 mmio_area
= ctx
->psn_phys
;
1146 mmio_area
+= offset
;
1148 return vmf_insert_pfn(vma
, vmf
->address
, mmio_area
>> PAGE_SHIFT
);
1151 static const struct vm_operations_struct ocxlflash_vmops
= {
1152 .fault
= ocxlflash_mmap_fault
,
1156 * afu_mmap() - map the fault handler operations
1157 * @file: File associated with the context.
1158 * @vma: VM area associated with mapping.
1160 * Return: 0 on success, -errno on failure
1162 static int afu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1164 struct ocxlflash_context
*ctx
= file
->private_data
;
1166 if ((vma_pages(vma
) + vma
->vm_pgoff
) >
1167 (ctx
->psn_size
>> PAGE_SHIFT
))
1170 vm_flags_set(vma
, VM_IO
| VM_PFNMAP
);
1171 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1172 vma
->vm_ops
= &ocxlflash_vmops
;
1176 static const struct file_operations ocxl_afu_fops
= {
1177 .owner
= THIS_MODULE
,
1180 .release
= afu_release
,
1184 #define PATCH_FOPS(NAME) \
1185 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1188 * ocxlflash_get_fd() - get file descriptor for an adapter context
1189 * @ctx_cookie: Adapter context.
1190 * @fops: File operations to be associated.
1191 * @fd: File descriptor to be returned back.
1193 * Return: pointer to the file on success, ERR_PTR on failure
1195 static struct file
*ocxlflash_get_fd(void *ctx_cookie
,
1196 struct file_operations
*fops
, int *fd
)
1198 struct ocxlflash_context
*ctx
= ctx_cookie
;
1199 struct device
*dev
= ctx
->hw_afu
->dev
;
1205 /* Only allow one fd per context */
1207 dev_err(dev
, "%s: Context is already mapped to an fd\n",
1213 flags
= O_RDWR
| O_CLOEXEC
;
1215 /* This code is similar to anon_inode_getfd() */
1216 rc
= get_unused_fd_flags(flags
);
1217 if (unlikely(rc
< 0)) {
1218 dev_err(dev
, "%s: get_unused_fd_flags failed rc=%d\n",
1224 /* Patch the file ops that are not defined */
1228 PATCH_FOPS(release
);
1230 } else /* Use default ops */
1231 fops
= (struct file_operations
*)&ocxl_afu_fops
;
1233 name
= kasprintf(GFP_KERNEL
, "ocxlflash:%d", ctx
->pe
);
1234 file
= ocxlflash_getfile(dev
, name
, fops
, ctx
, flags
);
1238 dev_err(dev
, "%s: ocxlflash_getfile failed rc=%d\n",
1243 ctx
->mapping
= file
->f_mapping
;
1248 put_unused_fd(fdtmp
);
1255 * ocxlflash_fops_get_context() - get the context associated with the file
1256 * @file: File associated with the adapter context.
1258 * Return: pointer to the context
1260 static void *ocxlflash_fops_get_context(struct file
*file
)
1262 return file
->private_data
;
1266 * ocxlflash_afu_irq() - interrupt handler for user contexts
1267 * @irq: Interrupt number.
1268 * @data: Private data provided at interrupt registration, the context.
1270 * Return: Always return IRQ_HANDLED.
1272 static irqreturn_t
ocxlflash_afu_irq(int irq
, void *data
)
1274 struct ocxlflash_context
*ctx
= data
;
1275 struct device
*dev
= ctx
->hw_afu
->dev
;
1278 dev_dbg(dev
, "%s: Interrupt raised for pe %i virq %i\n",
1279 __func__
, ctx
->pe
, irq
);
1281 for (i
= 0; i
< ctx
->num_irqs
; i
++) {
1282 if (ctx
->irqs
[i
].virq
== irq
)
1285 if (unlikely(i
>= ctx
->num_irqs
)) {
1286 dev_err(dev
, "%s: Received AFU IRQ out of range\n", __func__
);
1290 spin_lock(&ctx
->slock
);
1291 set_bit(i
- 1, &ctx
->irq_bitmap
);
1292 ctx
->pending_irq
= true;
1293 spin_unlock(&ctx
->slock
);
1295 wake_up_all(&ctx
->wq
);
1301 * ocxlflash_start_work() - start a user context
1302 * @ctx_cookie: Context to be started.
1303 * @num_irqs: Number of interrupts requested.
1305 * Return: 0 on success, -errno on failure
1307 static int ocxlflash_start_work(void *ctx_cookie
, u64 num_irqs
)
1309 struct ocxlflash_context
*ctx
= ctx_cookie
;
1310 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
1311 struct device
*dev
= afu
->dev
;
1316 rc
= alloc_afu_irqs(ctx
, num_irqs
);
1317 if (unlikely(rc
< 0)) {
1318 dev_err(dev
, "%s: alloc_afu_irqs failed rc=%d\n", __func__
, rc
);
1322 for (i
= 0; i
< num_irqs
; i
++) {
1323 name
= kasprintf(GFP_KERNEL
, "ocxlflash-%s-pe%i-%i",
1324 dev_name(dev
), ctx
->pe
, i
);
1325 rc
= afu_map_irq(0, ctx
, i
, ocxlflash_afu_irq
, ctx
, name
);
1327 if (unlikely(rc
< 0)) {
1328 dev_err(dev
, "%s: afu_map_irq failed rc=%d\n",
1334 rc
= start_context(ctx
);
1336 dev_err(dev
, "%s: start_context failed rc=%d\n", __func__
, rc
);
1342 for (i
= i
-1; i
>= 0; i
--)
1343 afu_unmap_irq(0, ctx
, i
, ctx
);
1349 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
1350 * @file: File installed with adapter file descriptor.
1351 * @vma: VM area associated with mapping.
1353 * Return: 0 on success, -errno on failure
1355 static int ocxlflash_fd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1357 return afu_mmap(file
, vma
);
1361 * ocxlflash_fd_release() - release the context associated with the file
1362 * @inode: File inode pointer.
1363 * @file: File associated with the adapter context.
1365 * Return: 0 on success, -errno on failure
1367 static int ocxlflash_fd_release(struct inode
*inode
, struct file
*file
)
1369 return afu_release(inode
, file
);
1372 /* Backend ops to ocxlflash services */
1373 const struct cxlflash_backend_ops cxlflash_ocxl_ops
= {
1374 .module
= THIS_MODULE
,
1375 .psa_map
= ocxlflash_psa_map
,
1376 .psa_unmap
= ocxlflash_psa_unmap
,
1377 .process_element
= ocxlflash_process_element
,
1378 .map_afu_irq
= ocxlflash_map_afu_irq
,
1379 .unmap_afu_irq
= ocxlflash_unmap_afu_irq
,
1380 .get_irq_objhndl
= ocxlflash_get_irq_objhndl
,
1381 .start_context
= ocxlflash_start_context
,
1382 .stop_context
= ocxlflash_stop_context
,
1383 .afu_reset
= ocxlflash_afu_reset
,
1384 .set_master
= ocxlflash_set_master
,
1385 .get_context
= ocxlflash_get_context
,
1386 .dev_context_init
= ocxlflash_dev_context_init
,
1387 .release_context
= ocxlflash_release_context
,
1388 .perst_reloads_same_image
= ocxlflash_perst_reloads_same_image
,
1389 .read_adapter_vpd
= ocxlflash_read_adapter_vpd
,
1390 .allocate_afu_irqs
= ocxlflash_allocate_afu_irqs
,
1391 .free_afu_irqs
= ocxlflash_free_afu_irqs
,
1392 .create_afu
= ocxlflash_create_afu
,
1393 .destroy_afu
= ocxlflash_destroy_afu
,
1394 .get_fd
= ocxlflash_get_fd
,
1395 .fops_get_context
= ocxlflash_fops_get_context
,
1396 .start_work
= ocxlflash_start_work
,
1397 .fd_mmap
= ocxlflash_fd_mmap
,
1398 .fd_release
= ocxlflash_fd_release
,