1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * CXL Flash Device Driver
5 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
8 * Copyright (C) 2018 IBM Corporation
11 #include <linux/file.h>
12 #include <linux/idr.h>
13 #include <linux/module.h>
14 #include <linux/mount.h>
15 #include <linux/pseudo_fs.h>
16 #include <linux/poll.h>
17 #include <linux/sched/signal.h>
18 #include <linux/interrupt.h>
20 #include <misc/ocxl.h>
22 #include <uapi/misc/cxl.h>
28 * Pseudo-filesystem to allocate inodes.
31 #define OCXLFLASH_FS_MAGIC 0x1697698f
33 static int ocxlflash_fs_cnt
;
34 static struct vfsmount
*ocxlflash_vfs_mount
;
36 static int ocxlflash_fs_init_fs_context(struct fs_context
*fc
)
38 return init_pseudo(fc
, OCXLFLASH_FS_MAGIC
) ? 0 : -ENOMEM
;
41 static struct file_system_type ocxlflash_fs_type
= {
44 .init_fs_context
= ocxlflash_fs_init_fs_context
,
45 .kill_sb
= kill_anon_super
,
49 * ocxlflash_release_mapping() - release the memory mapping
50 * @ctx: Context whose mapping is to be released.
52 static void ocxlflash_release_mapping(struct ocxlflash_context
*ctx
)
55 simple_release_fs(&ocxlflash_vfs_mount
, &ocxlflash_fs_cnt
);
60 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
61 * @dev: Generic device of the host.
62 * @name: Name of the pseudo filesystem.
63 * @fops: File operations.
64 * @priv: Private data.
65 * @flags: Flags for the file.
67 * Return: pointer to the file on success, ERR_PTR on failure
69 static struct file
*ocxlflash_getfile(struct device
*dev
, const char *name
,
70 const struct file_operations
*fops
,
71 void *priv
, int flags
)
77 if (fops
->owner
&& !try_module_get(fops
->owner
)) {
78 dev_err(dev
, "%s: Owner does not exist\n", __func__
);
83 rc
= simple_pin_fs(&ocxlflash_fs_type
, &ocxlflash_vfs_mount
,
85 if (unlikely(rc
< 0)) {
86 dev_err(dev
, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
91 inode
= alloc_anon_inode(ocxlflash_vfs_mount
->mnt_sb
);
94 dev_err(dev
, "%s: alloc_anon_inode failed rc=%d\n",
99 file
= alloc_file_pseudo(inode
, ocxlflash_vfs_mount
, name
,
100 flags
& (O_ACCMODE
| O_NONBLOCK
), fops
);
103 dev_err(dev
, "%s: alloc_file failed rc=%d\n",
108 file
->private_data
= priv
;
114 simple_release_fs(&ocxlflash_vfs_mount
, &ocxlflash_fs_cnt
);
116 module_put(fops
->owner
);
123 * ocxlflash_psa_map() - map the process specific MMIO space
124 * @ctx_cookie: Adapter context for which the mapping needs to be done.
126 * Return: MMIO pointer of the mapped region
128 static void __iomem
*ocxlflash_psa_map(void *ctx_cookie
)
130 struct ocxlflash_context
*ctx
= ctx_cookie
;
131 struct device
*dev
= ctx
->hw_afu
->dev
;
133 mutex_lock(&ctx
->state_mutex
);
134 if (ctx
->state
!= STARTED
) {
135 dev_err(dev
, "%s: Context not started, state=%d\n", __func__
,
137 mutex_unlock(&ctx
->state_mutex
);
140 mutex_unlock(&ctx
->state_mutex
);
142 return ioremap(ctx
->psn_phys
, ctx
->psn_size
);
146 * ocxlflash_psa_unmap() - unmap the process specific MMIO space
147 * @addr: MMIO pointer to unmap.
149 static void ocxlflash_psa_unmap(void __iomem
*addr
)
155 * ocxlflash_process_element() - get process element of the adapter context
156 * @ctx_cookie: Adapter context associated with the process element.
158 * Return: process element of the adapter context
160 static int ocxlflash_process_element(void *ctx_cookie
)
162 struct ocxlflash_context
*ctx
= ctx_cookie
;
168 * afu_map_irq() - map the interrupt of the adapter context
170 * @ctx: Adapter context.
171 * @num: Per-context AFU interrupt number.
172 * @handler: Interrupt handler to register.
173 * @cookie: Interrupt handler private data.
174 * @name: Name of the interrupt.
176 * Return: 0 on success, -errno on failure
178 static int afu_map_irq(u64 flags
, struct ocxlflash_context
*ctx
, int num
,
179 irq_handler_t handler
, void *cookie
, char *name
)
181 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
182 struct device
*dev
= afu
->dev
;
183 struct ocxlflash_irqs
*irq
;
184 struct xive_irq_data
*xd
;
188 if (num
< 0 || num
>= ctx
->num_irqs
) {
189 dev_err(dev
, "%s: Interrupt %d not allocated\n", __func__
, num
);
194 irq
= &ctx
->irqs
[num
];
195 virq
= irq_create_mapping(NULL
, irq
->hwirq
);
196 if (unlikely(!virq
)) {
197 dev_err(dev
, "%s: irq_create_mapping failed\n", __func__
);
202 rc
= request_irq(virq
, handler
, 0, name
, cookie
);
204 dev_err(dev
, "%s: request_irq failed rc=%d\n", __func__
, rc
);
208 xd
= irq_get_handler_data(virq
);
210 dev_err(dev
, "%s: Can't get interrupt data\n", __func__
);
216 irq
->vtrig
= xd
->trig_mmio
;
220 free_irq(virq
, cookie
);
222 irq_dispose_mapping(virq
);
227 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
228 * @ctx_cookie: Adapter context.
229 * @num: Per-context AFU interrupt number.
230 * @handler: Interrupt handler to register.
231 * @cookie: Interrupt handler private data.
232 * @name: Name of the interrupt.
234 * Return: 0 on success, -errno on failure
236 static int ocxlflash_map_afu_irq(void *ctx_cookie
, int num
,
237 irq_handler_t handler
, void *cookie
,
240 return afu_map_irq(0, ctx_cookie
, num
, handler
, cookie
, name
);
244 * afu_unmap_irq() - unmap the interrupt
246 * @ctx: Adapter context.
247 * @num: Per-context AFU interrupt number.
248 * @cookie: Interrupt handler private data.
250 static void afu_unmap_irq(u64 flags
, struct ocxlflash_context
*ctx
, int num
,
253 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
254 struct device
*dev
= afu
->dev
;
255 struct ocxlflash_irqs
*irq
;
257 if (num
< 0 || num
>= ctx
->num_irqs
) {
258 dev_err(dev
, "%s: Interrupt %d not allocated\n", __func__
, num
);
262 irq
= &ctx
->irqs
[num
];
264 if (irq_find_mapping(NULL
, irq
->hwirq
)) {
265 free_irq(irq
->virq
, cookie
);
266 irq_dispose_mapping(irq
->virq
);
269 memset(irq
, 0, sizeof(*irq
));
273 * ocxlflash_unmap_afu_irq() - unmap the interrupt
274 * @ctx_cookie: Adapter context.
275 * @num: Per-context AFU interrupt number.
276 * @cookie: Interrupt handler private data.
278 static void ocxlflash_unmap_afu_irq(void *ctx_cookie
, int num
, void *cookie
)
280 return afu_unmap_irq(0, ctx_cookie
, num
, cookie
);
284 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
285 * @ctx_cookie: Context associated with the interrupt.
286 * @irq: Interrupt number.
288 * Return: effective address of the mapped region
290 static u64
ocxlflash_get_irq_objhndl(void *ctx_cookie
, int irq
)
292 struct ocxlflash_context
*ctx
= ctx_cookie
;
294 if (irq
< 0 || irq
>= ctx
->num_irqs
)
297 return (__force u64
)ctx
->irqs
[irq
].vtrig
;
301 * ocxlflash_xsl_fault() - callback when translation error is triggered
302 * @data: Private data provided at callback registration, the context.
303 * @addr: Address that triggered the error.
304 * @dsisr: Value of dsisr register.
306 static void ocxlflash_xsl_fault(void *data
, u64 addr
, u64 dsisr
)
308 struct ocxlflash_context
*ctx
= data
;
310 spin_lock(&ctx
->slock
);
311 ctx
->fault_addr
= addr
;
312 ctx
->fault_dsisr
= dsisr
;
313 ctx
->pending_fault
= true;
314 spin_unlock(&ctx
->slock
);
316 wake_up_all(&ctx
->wq
);
320 * start_context() - local routine to start a context
321 * @ctx: Adapter context to be started.
323 * Assign the context specific MMIO space, add and enable the PE.
325 * Return: 0 on success, -errno on failure
327 static int start_context(struct ocxlflash_context
*ctx
)
329 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
330 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
331 void *link_token
= afu
->link_token
;
332 struct pci_dev
*pdev
= afu
->pdev
;
333 struct device
*dev
= afu
->dev
;
334 bool master
= ctx
->master
;
335 struct mm_struct
*mm
;
339 mutex_lock(&ctx
->state_mutex
);
340 if (ctx
->state
!= OPENED
) {
341 dev_err(dev
, "%s: Context state invalid, state=%d\n",
342 __func__
, ctx
->state
);
348 ctx
->psn_size
= acfg
->global_mmio_size
;
349 ctx
->psn_phys
= afu
->gmmio_phys
;
351 ctx
->psn_size
= acfg
->pp_mmio_stride
;
352 ctx
->psn_phys
= afu
->ppmmio_phys
+ (ctx
->pe
* ctx
->psn_size
);
355 /* pid and mm not set for master contexts */
360 pid
= current
->mm
->context
.id
;
364 rc
= ocxl_link_add_pe(link_token
, ctx
->pe
, pid
, 0, 0,
365 pci_dev_id(pdev
), mm
, ocxlflash_xsl_fault
,
368 dev_err(dev
, "%s: ocxl_link_add_pe failed rc=%d\n",
373 ctx
->state
= STARTED
;
375 mutex_unlock(&ctx
->state_mutex
);
380 * ocxlflash_start_context() - start a kernel context
381 * @ctx_cookie: Adapter context to be started.
383 * Return: 0 on success, -errno on failure
385 static int ocxlflash_start_context(void *ctx_cookie
)
387 struct ocxlflash_context
*ctx
= ctx_cookie
;
389 return start_context(ctx
);
393 * ocxlflash_stop_context() - stop a context
394 * @ctx_cookie: Adapter context to be stopped.
396 * Return: 0 on success, -errno on failure
398 static int ocxlflash_stop_context(void *ctx_cookie
)
400 struct ocxlflash_context
*ctx
= ctx_cookie
;
401 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
402 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
403 struct pci_dev
*pdev
= afu
->pdev
;
404 struct device
*dev
= afu
->dev
;
405 enum ocxlflash_ctx_state state
;
408 mutex_lock(&ctx
->state_mutex
);
411 mutex_unlock(&ctx
->state_mutex
);
412 if (state
!= STARTED
)
415 rc
= ocxl_config_terminate_pasid(pdev
, acfg
->dvsec_afu_control_pos
,
418 dev_err(dev
, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
420 /* If EBUSY, PE could be referenced in future by the AFU */
425 rc
= ocxl_link_remove_pe(afu
->link_token
, ctx
->pe
);
427 dev_err(dev
, "%s: ocxl_link_remove_pe failed rc=%d\n",
436 * ocxlflash_afu_reset() - reset the AFU
437 * @ctx_cookie: Adapter context.
439 static int ocxlflash_afu_reset(void *ctx_cookie
)
441 struct ocxlflash_context
*ctx
= ctx_cookie
;
442 struct device
*dev
= ctx
->hw_afu
->dev
;
444 /* Pending implementation from OCXL transport services */
445 dev_err_once(dev
, "%s: afu_reset() fop not supported\n", __func__
);
447 /* Silently return success until it is implemented */
452 * ocxlflash_set_master() - sets the context as master
453 * @ctx_cookie: Adapter context to set as master.
455 static void ocxlflash_set_master(void *ctx_cookie
)
457 struct ocxlflash_context
*ctx
= ctx_cookie
;
463 * ocxlflash_get_context() - obtains the context associated with the host
464 * @pdev: PCI device associated with the host.
465 * @afu_cookie: Hardware AFU associated with the host.
467 * Return: returns the pointer to host adapter context
469 static void *ocxlflash_get_context(struct pci_dev
*pdev
, void *afu_cookie
)
471 struct ocxl_hw_afu
*afu
= afu_cookie
;
473 return afu
->ocxl_ctx
;
477 * ocxlflash_dev_context_init() - allocate and initialize an adapter context
478 * @pdev: PCI device associated with the host.
479 * @afu_cookie: Hardware AFU associated with the host.
481 * Return: returns the adapter context on success, ERR_PTR on failure
483 static void *ocxlflash_dev_context_init(struct pci_dev
*pdev
, void *afu_cookie
)
485 struct ocxl_hw_afu
*afu
= afu_cookie
;
486 struct device
*dev
= afu
->dev
;
487 struct ocxlflash_context
*ctx
;
490 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
491 if (unlikely(!ctx
)) {
492 dev_err(dev
, "%s: Context allocation failed\n", __func__
);
497 idr_preload(GFP_KERNEL
);
498 rc
= idr_alloc(&afu
->idr
, ctx
, 0, afu
->max_pasid
, GFP_NOWAIT
);
500 if (unlikely(rc
< 0)) {
501 dev_err(dev
, "%s: idr_alloc failed rc=%d\n", __func__
, rc
);
505 spin_lock_init(&ctx
->slock
);
506 init_waitqueue_head(&ctx
->wq
);
507 mutex_init(&ctx
->state_mutex
);
515 ctx
->pending_irq
= false;
516 ctx
->pending_fault
= false;
527 * ocxlflash_release_context() - releases an adapter context
528 * @ctx_cookie: Adapter context to be released.
530 * Return: 0 on success, -errno on failure
532 static int ocxlflash_release_context(void *ctx_cookie
)
534 struct ocxlflash_context
*ctx
= ctx_cookie
;
541 dev
= ctx
->hw_afu
->dev
;
542 mutex_lock(&ctx
->state_mutex
);
543 if (ctx
->state
>= STARTED
) {
544 dev_err(dev
, "%s: Context in use, state=%d\n", __func__
,
546 mutex_unlock(&ctx
->state_mutex
);
550 mutex_unlock(&ctx
->state_mutex
);
552 idr_remove(&ctx
->hw_afu
->idr
, ctx
->pe
);
553 ocxlflash_release_mapping(ctx
);
560 * ocxlflash_perst_reloads_same_image() - sets the image reload policy
561 * @afu_cookie: Hardware AFU associated with the host.
562 * @image: Whether to load the same image on PERST.
564 static void ocxlflash_perst_reloads_same_image(void *afu_cookie
, bool image
)
566 struct ocxl_hw_afu
*afu
= afu_cookie
;
568 afu
->perst_same_image
= image
;
572 * ocxlflash_read_adapter_vpd() - reads the adapter VPD
573 * @pdev: PCI device associated with the host.
574 * @buf: Buffer to get the VPD data.
575 * @count: Size of buffer (maximum bytes that can be read).
577 * Return: size of VPD on success, -errno on failure
579 static ssize_t
ocxlflash_read_adapter_vpd(struct pci_dev
*pdev
, void *buf
,
582 return pci_read_vpd(pdev
, 0, count
, buf
);
586 * free_afu_irqs() - internal service to free interrupts
587 * @ctx: Adapter context.
589 static void free_afu_irqs(struct ocxlflash_context
*ctx
)
591 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
592 struct device
*dev
= afu
->dev
;
596 dev_err(dev
, "%s: Interrupts not allocated\n", __func__
);
600 for (i
= ctx
->num_irqs
; i
>= 0; i
--)
601 ocxl_link_free_irq(afu
->link_token
, ctx
->irqs
[i
].hwirq
);
608 * alloc_afu_irqs() - internal service to allocate interrupts
609 * @ctx: Context associated with the request.
610 * @num: Number of interrupts requested.
612 * Return: 0 on success, -errno on failure
614 static int alloc_afu_irqs(struct ocxlflash_context
*ctx
, int num
)
616 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
617 struct device
*dev
= afu
->dev
;
618 struct ocxlflash_irqs
*irqs
;
624 dev_err(dev
, "%s: Interrupts already allocated\n", __func__
);
629 if (num
> OCXL_MAX_IRQS
) {
630 dev_err(dev
, "%s: Too many interrupts num=%d\n", __func__
, num
);
635 irqs
= kcalloc(num
, sizeof(*irqs
), GFP_KERNEL
);
636 if (unlikely(!irqs
)) {
637 dev_err(dev
, "%s: Context irqs allocation failed\n", __func__
);
642 for (i
= 0; i
< num
; i
++) {
643 rc
= ocxl_link_irq_alloc(afu
->link_token
, &hwirq
);
645 dev_err(dev
, "%s: ocxl_link_irq_alloc failed rc=%d\n",
650 irqs
[i
].hwirq
= hwirq
;
658 for (i
= i
-1; i
>= 0; i
--)
659 ocxl_link_free_irq(afu
->link_token
, irqs
[i
].hwirq
);
665 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
666 * @ctx_cookie: Context associated with the request.
667 * @num: Number of interrupts requested.
669 * Return: 0 on success, -errno on failure
671 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie
, int num
)
673 return alloc_afu_irqs(ctx_cookie
, num
);
677 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
678 * @ctx_cookie: Adapter context.
680 static void ocxlflash_free_afu_irqs(void *ctx_cookie
)
682 free_afu_irqs(ctx_cookie
);
686 * ocxlflash_unconfig_afu() - unconfigure the AFU
687 * @afu: AFU associated with the host.
689 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu
*afu
)
691 if (afu
->gmmio_virt
) {
692 iounmap(afu
->gmmio_virt
);
693 afu
->gmmio_virt
= NULL
;
698 * ocxlflash_destroy_afu() - destroy the AFU structure
699 * @afu_cookie: AFU to be freed.
701 static void ocxlflash_destroy_afu(void *afu_cookie
)
703 struct ocxl_hw_afu
*afu
= afu_cookie
;
709 ocxlflash_release_context(afu
->ocxl_ctx
);
710 idr_destroy(&afu
->idr
);
712 /* Disable the AFU */
713 pos
= afu
->acfg
.dvsec_afu_control_pos
;
714 ocxl_config_set_afu_state(afu
->pdev
, pos
, 0);
716 ocxlflash_unconfig_afu(afu
);
721 * ocxlflash_config_fn() - configure the host function
722 * @pdev: PCI device associated with the host.
723 * @afu: AFU associated with the host.
725 * Return: 0 on success, -errno on failure
727 static int ocxlflash_config_fn(struct pci_dev
*pdev
, struct ocxl_hw_afu
*afu
)
729 struct ocxl_fn_config
*fcfg
= &afu
->fcfg
;
730 struct device
*dev
= &pdev
->dev
;
731 u16 base
, enabled
, supported
;
734 /* Read DVSEC config of the function */
735 rc
= ocxl_config_read_function(pdev
, fcfg
);
737 dev_err(dev
, "%s: ocxl_config_read_function failed rc=%d\n",
742 /* Check if function has AFUs defined, only 1 per function supported */
743 if (fcfg
->max_afu_index
>= 0) {
744 afu
->is_present
= true;
745 if (fcfg
->max_afu_index
!= 0)
746 dev_warn(dev
, "%s: Unexpected AFU index value %d\n",
747 __func__
, fcfg
->max_afu_index
);
750 rc
= ocxl_config_get_actag_info(pdev
, &base
, &enabled
, &supported
);
752 dev_err(dev
, "%s: ocxl_config_get_actag_info failed rc=%d\n",
757 afu
->fn_actag_base
= base
;
758 afu
->fn_actag_enabled
= enabled
;
760 ocxl_config_set_actag(pdev
, fcfg
->dvsec_function_pos
, base
, enabled
);
761 dev_dbg(dev
, "%s: Function acTag range base=%u enabled=%u\n",
762 __func__
, base
, enabled
);
764 rc
= ocxl_link_setup(pdev
, 0, &afu
->link_token
);
766 dev_err(dev
, "%s: ocxl_link_setup failed rc=%d\n",
771 rc
= ocxl_config_set_TL(pdev
, fcfg
->dvsec_tl_pos
);
773 dev_err(dev
, "%s: ocxl_config_set_TL failed rc=%d\n",
780 ocxl_link_release(pdev
, afu
->link_token
);
785 * ocxlflash_unconfig_fn() - unconfigure the host function
786 * @pdev: PCI device associated with the host.
787 * @afu: AFU associated with the host.
789 static void ocxlflash_unconfig_fn(struct pci_dev
*pdev
, struct ocxl_hw_afu
*afu
)
791 ocxl_link_release(pdev
, afu
->link_token
);
795 * ocxlflash_map_mmio() - map the AFU MMIO space
796 * @afu: AFU associated with the host.
798 * Return: 0 on success, -errno on failure
800 static int ocxlflash_map_mmio(struct ocxl_hw_afu
*afu
)
802 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
803 struct pci_dev
*pdev
= afu
->pdev
;
804 struct device
*dev
= afu
->dev
;
805 phys_addr_t gmmio
, ppmmio
;
808 rc
= pci_request_region(pdev
, acfg
->global_mmio_bar
, "ocxlflash");
810 dev_err(dev
, "%s: pci_request_region for global failed rc=%d\n",
814 gmmio
= pci_resource_start(pdev
, acfg
->global_mmio_bar
);
815 gmmio
+= acfg
->global_mmio_offset
;
817 rc
= pci_request_region(pdev
, acfg
->pp_mmio_bar
, "ocxlflash");
819 dev_err(dev
, "%s: pci_request_region for pp bar failed rc=%d\n",
823 ppmmio
= pci_resource_start(pdev
, acfg
->pp_mmio_bar
);
824 ppmmio
+= acfg
->pp_mmio_offset
;
826 afu
->gmmio_virt
= ioremap(gmmio
, acfg
->global_mmio_size
);
827 if (unlikely(!afu
->gmmio_virt
)) {
828 dev_err(dev
, "%s: MMIO mapping failed\n", __func__
);
833 afu
->gmmio_phys
= gmmio
;
834 afu
->ppmmio_phys
= ppmmio
;
838 pci_release_region(pdev
, acfg
->pp_mmio_bar
);
840 pci_release_region(pdev
, acfg
->global_mmio_bar
);
845 * ocxlflash_config_afu() - configure the host AFU
846 * @pdev: PCI device associated with the host.
847 * @afu: AFU associated with the host.
849 * Must be called _after_ host function configuration.
851 * Return: 0 on success, -errno on failure
853 static int ocxlflash_config_afu(struct pci_dev
*pdev
, struct ocxl_hw_afu
*afu
)
855 struct ocxl_afu_config
*acfg
= &afu
->acfg
;
856 struct ocxl_fn_config
*fcfg
= &afu
->fcfg
;
857 struct device
*dev
= &pdev
->dev
;
863 /* This HW AFU function does not have any AFUs defined */
864 if (!afu
->is_present
)
867 /* Read AFU config at index 0 */
868 rc
= ocxl_config_read_afu(pdev
, fcfg
, acfg
, 0);
870 dev_err(dev
, "%s: ocxl_config_read_afu failed rc=%d\n",
875 /* Only one AFU per function is supported, so actag_base is same */
876 base
= afu
->fn_actag_base
;
877 count
= min_t(int, acfg
->actag_supported
, afu
->fn_actag_enabled
);
878 pos
= acfg
->dvsec_afu_control_pos
;
880 ocxl_config_set_afu_actag(pdev
, pos
, base
, count
);
881 dev_dbg(dev
, "%s: acTag base=%d enabled=%d\n", __func__
, base
, count
);
882 afu
->afu_actag_base
= base
;
883 afu
->afu_actag_enabled
= count
;
884 afu
->max_pasid
= 1 << acfg
->pasid_supported_log
;
886 ocxl_config_set_afu_pasid(pdev
, pos
, 0, acfg
->pasid_supported_log
);
888 rc
= ocxlflash_map_mmio(afu
);
890 dev_err(dev
, "%s: ocxlflash_map_mmio failed rc=%d\n",
896 ocxl_config_set_afu_state(pdev
, acfg
->dvsec_afu_control_pos
, 1);
902 * ocxlflash_create_afu() - create the AFU for OCXL
903 * @pdev: PCI device associated with the host.
905 * Return: AFU on success, NULL on failure
907 static void *ocxlflash_create_afu(struct pci_dev
*pdev
)
909 struct device
*dev
= &pdev
->dev
;
910 struct ocxlflash_context
*ctx
;
911 struct ocxl_hw_afu
*afu
;
914 afu
= kzalloc(sizeof(*afu
), GFP_KERNEL
);
915 if (unlikely(!afu
)) {
916 dev_err(dev
, "%s: HW AFU allocation failed\n", __func__
);
924 rc
= ocxlflash_config_fn(pdev
, afu
);
926 dev_err(dev
, "%s: Function configuration failed rc=%d\n",
931 rc
= ocxlflash_config_afu(pdev
, afu
);
933 dev_err(dev
, "%s: AFU configuration failed rc=%d\n",
938 ctx
= ocxlflash_dev_context_init(pdev
, afu
);
941 dev_err(dev
, "%s: ocxlflash_dev_context_init failed rc=%d\n",
950 ocxlflash_unconfig_afu(afu
);
952 ocxlflash_unconfig_fn(pdev
, afu
);
954 idr_destroy(&afu
->idr
);
961 * ctx_event_pending() - check for any event pending on the context
962 * @ctx: Context to be checked.
964 * Return: true if there is an event pending, false if none pending
966 static inline bool ctx_event_pending(struct ocxlflash_context
*ctx
)
968 if (ctx
->pending_irq
|| ctx
->pending_fault
)
975 * afu_poll() - poll the AFU for events on the context
976 * @file: File associated with the adapter context.
977 * @poll: Poll structure from the user.
981 static unsigned int afu_poll(struct file
*file
, struct poll_table_struct
*poll
)
983 struct ocxlflash_context
*ctx
= file
->private_data
;
984 struct device
*dev
= ctx
->hw_afu
->dev
;
988 poll_wait(file
, &ctx
->wq
, poll
);
990 spin_lock_irqsave(&ctx
->slock
, lock_flags
);
991 if (ctx_event_pending(ctx
))
992 mask
|= POLLIN
| POLLRDNORM
;
993 else if (ctx
->state
== CLOSED
)
995 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
997 dev_dbg(dev
, "%s: Poll wait completed for pe %i mask %i\n",
998 __func__
, ctx
->pe
, mask
);
1004 * afu_read() - perform a read on the context for any event
1005 * @file: File associated with the adapter context.
1006 * @buf: Buffer to receive the data.
1007 * @count: Size of buffer (maximum bytes that can be read).
1010 * Return: size of the data read on success, -errno on failure
1012 static ssize_t
afu_read(struct file
*file
, char __user
*buf
, size_t count
,
1015 struct ocxlflash_context
*ctx
= file
->private_data
;
1016 struct device
*dev
= ctx
->hw_afu
->dev
;
1017 struct cxl_event event
;
1022 DEFINE_WAIT(event_wait
);
1025 dev_err(dev
, "%s: Non-zero offset not supported, off=%lld\n",
1031 spin_lock_irqsave(&ctx
->slock
, lock_flags
);
1034 prepare_to_wait(&ctx
->wq
, &event_wait
, TASK_INTERRUPTIBLE
);
1036 if (ctx_event_pending(ctx
) || (ctx
->state
== CLOSED
))
1039 if (file
->f_flags
& O_NONBLOCK
) {
1040 dev_err(dev
, "%s: File cannot be blocked on I/O\n",
1046 if (signal_pending(current
)) {
1047 dev_err(dev
, "%s: Signal pending on the process\n",
1053 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
1055 spin_lock_irqsave(&ctx
->slock
, lock_flags
);
1058 finish_wait(&ctx
->wq
, &event_wait
);
1060 memset(&event
, 0, sizeof(event
));
1061 event
.header
.process_element
= ctx
->pe
;
1062 event
.header
.size
= sizeof(struct cxl_event_header
);
1063 if (ctx
->pending_irq
) {
1064 esize
= sizeof(struct cxl_event_afu_interrupt
);
1065 event
.header
.size
+= esize
;
1066 event
.header
.type
= CXL_EVENT_AFU_INTERRUPT
;
1068 bit
= find_first_bit(&ctx
->irq_bitmap
, ctx
->num_irqs
);
1069 clear_bit(bit
, &ctx
->irq_bitmap
);
1070 event
.irq
.irq
= bit
+ 1;
1071 if (bitmap_empty(&ctx
->irq_bitmap
, ctx
->num_irqs
))
1072 ctx
->pending_irq
= false;
1073 } else if (ctx
->pending_fault
) {
1074 event
.header
.size
+= sizeof(struct cxl_event_data_storage
);
1075 event
.header
.type
= CXL_EVENT_DATA_STORAGE
;
1076 event
.fault
.addr
= ctx
->fault_addr
;
1077 event
.fault
.dsisr
= ctx
->fault_dsisr
;
1078 ctx
->pending_fault
= false;
1081 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
1083 if (copy_to_user(buf
, &event
, event
.header
.size
)) {
1084 dev_err(dev
, "%s: copy_to_user failed\n", __func__
);
1089 rc
= event
.header
.size
;
1093 finish_wait(&ctx
->wq
, &event_wait
);
1094 spin_unlock_irqrestore(&ctx
->slock
, lock_flags
);
1099 * afu_release() - release and free the context
1100 * @inode: File inode pointer.
1101 * @file: File associated with the context.
1103 * Return: 0 on success, -errno on failure
1105 static int afu_release(struct inode
*inode
, struct file
*file
)
1107 struct ocxlflash_context
*ctx
= file
->private_data
;
1110 /* Unmap and free the interrupts associated with the context */
1111 for (i
= ctx
->num_irqs
; i
>= 0; i
--)
1112 afu_unmap_irq(0, ctx
, i
, ctx
);
1115 return ocxlflash_release_context(ctx
);
1119 * ocxlflash_mmap_fault() - mmap fault handler
1120 * @vmf: VM fault associated with current fault.
1122 * Return: 0 on success, -errno on failure
1124 static vm_fault_t
ocxlflash_mmap_fault(struct vm_fault
*vmf
)
1126 struct vm_area_struct
*vma
= vmf
->vma
;
1127 struct ocxlflash_context
*ctx
= vma
->vm_file
->private_data
;
1128 struct device
*dev
= ctx
->hw_afu
->dev
;
1129 u64 mmio_area
, offset
;
1131 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
1132 if (offset
>= ctx
->psn_size
)
1133 return VM_FAULT_SIGBUS
;
1135 mutex_lock(&ctx
->state_mutex
);
1136 if (ctx
->state
!= STARTED
) {
1137 dev_err(dev
, "%s: Context not started, state=%d\n",
1138 __func__
, ctx
->state
);
1139 mutex_unlock(&ctx
->state_mutex
);
1140 return VM_FAULT_SIGBUS
;
1142 mutex_unlock(&ctx
->state_mutex
);
1144 mmio_area
= ctx
->psn_phys
;
1145 mmio_area
+= offset
;
1147 return vmf_insert_pfn(vma
, vmf
->address
, mmio_area
>> PAGE_SHIFT
);
1150 static const struct vm_operations_struct ocxlflash_vmops
= {
1151 .fault
= ocxlflash_mmap_fault
,
1155 * afu_mmap() - map the fault handler operations
1156 * @file: File associated with the context.
1157 * @vma: VM area associated with mapping.
1159 * Return: 0 on success, -errno on failure
1161 static int afu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1163 struct ocxlflash_context
*ctx
= file
->private_data
;
1165 if ((vma_pages(vma
) + vma
->vm_pgoff
) >
1166 (ctx
->psn_size
>> PAGE_SHIFT
))
1169 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1170 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1171 vma
->vm_ops
= &ocxlflash_vmops
;
1175 static const struct file_operations ocxl_afu_fops
= {
1176 .owner
= THIS_MODULE
,
1179 .release
= afu_release
,
1183 #define PATCH_FOPS(NAME) \
1184 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1187 * ocxlflash_get_fd() - get file descriptor for an adapter context
1188 * @ctx_cookie: Adapter context.
1189 * @fops: File operations to be associated.
1190 * @fd: File descriptor to be returned back.
1192 * Return: pointer to the file on success, ERR_PTR on failure
1194 static struct file
*ocxlflash_get_fd(void *ctx_cookie
,
1195 struct file_operations
*fops
, int *fd
)
1197 struct ocxlflash_context
*ctx
= ctx_cookie
;
1198 struct device
*dev
= ctx
->hw_afu
->dev
;
1204 /* Only allow one fd per context */
1206 dev_err(dev
, "%s: Context is already mapped to an fd\n",
1212 flags
= O_RDWR
| O_CLOEXEC
;
1214 /* This code is similar to anon_inode_getfd() */
1215 rc
= get_unused_fd_flags(flags
);
1216 if (unlikely(rc
< 0)) {
1217 dev_err(dev
, "%s: get_unused_fd_flags failed rc=%d\n",
1223 /* Patch the file ops that are not defined */
1227 PATCH_FOPS(release
);
1229 } else /* Use default ops */
1230 fops
= (struct file_operations
*)&ocxl_afu_fops
;
1232 name
= kasprintf(GFP_KERNEL
, "ocxlflash:%d", ctx
->pe
);
1233 file
= ocxlflash_getfile(dev
, name
, fops
, ctx
, flags
);
1237 dev_err(dev
, "%s: ocxlflash_getfile failed rc=%d\n",
1242 ctx
->mapping
= file
->f_mapping
;
1247 put_unused_fd(fdtmp
);
1254 * ocxlflash_fops_get_context() - get the context associated with the file
1255 * @file: File associated with the adapter context.
1257 * Return: pointer to the context
1259 static void *ocxlflash_fops_get_context(struct file
*file
)
1261 return file
->private_data
;
1265 * ocxlflash_afu_irq() - interrupt handler for user contexts
1266 * @irq: Interrupt number.
1267 * @data: Private data provided at interrupt registration, the context.
1269 * Return: Always return IRQ_HANDLED.
1271 static irqreturn_t
ocxlflash_afu_irq(int irq
, void *data
)
1273 struct ocxlflash_context
*ctx
= data
;
1274 struct device
*dev
= ctx
->hw_afu
->dev
;
1277 dev_dbg(dev
, "%s: Interrupt raised for pe %i virq %i\n",
1278 __func__
, ctx
->pe
, irq
);
1280 for (i
= 0; i
< ctx
->num_irqs
; i
++) {
1281 if (ctx
->irqs
[i
].virq
== irq
)
1284 if (unlikely(i
>= ctx
->num_irqs
)) {
1285 dev_err(dev
, "%s: Received AFU IRQ out of range\n", __func__
);
1289 spin_lock(&ctx
->slock
);
1290 set_bit(i
- 1, &ctx
->irq_bitmap
);
1291 ctx
->pending_irq
= true;
1292 spin_unlock(&ctx
->slock
);
1294 wake_up_all(&ctx
->wq
);
1300 * ocxlflash_start_work() - start a user context
1301 * @ctx_cookie: Context to be started.
1302 * @num_irqs: Number of interrupts requested.
1304 * Return: 0 on success, -errno on failure
1306 static int ocxlflash_start_work(void *ctx_cookie
, u64 num_irqs
)
1308 struct ocxlflash_context
*ctx
= ctx_cookie
;
1309 struct ocxl_hw_afu
*afu
= ctx
->hw_afu
;
1310 struct device
*dev
= afu
->dev
;
1315 rc
= alloc_afu_irqs(ctx
, num_irqs
);
1316 if (unlikely(rc
< 0)) {
1317 dev_err(dev
, "%s: alloc_afu_irqs failed rc=%d\n", __func__
, rc
);
1321 for (i
= 0; i
< num_irqs
; i
++) {
1322 name
= kasprintf(GFP_KERNEL
, "ocxlflash-%s-pe%i-%i",
1323 dev_name(dev
), ctx
->pe
, i
);
1324 rc
= afu_map_irq(0, ctx
, i
, ocxlflash_afu_irq
, ctx
, name
);
1326 if (unlikely(rc
< 0)) {
1327 dev_err(dev
, "%s: afu_map_irq failed rc=%d\n",
1333 rc
= start_context(ctx
);
1335 dev_err(dev
, "%s: start_context failed rc=%d\n", __func__
, rc
);
1341 for (i
= i
-1; i
>= 0; i
--)
1342 afu_unmap_irq(0, ctx
, i
, ctx
);
1348 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
1349 * @file: File installed with adapter file descriptor.
1350 * @vma: VM area associated with mapping.
1352 * Return: 0 on success, -errno on failure
1354 static int ocxlflash_fd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1356 return afu_mmap(file
, vma
);
1360 * ocxlflash_fd_release() - release the context associated with the file
1361 * @inode: File inode pointer.
1362 * @file: File associated with the adapter context.
1364 * Return: 0 on success, -errno on failure
1366 static int ocxlflash_fd_release(struct inode
*inode
, struct file
*file
)
1368 return afu_release(inode
, file
);
1371 /* Backend ops to ocxlflash services */
1372 const struct cxlflash_backend_ops cxlflash_ocxl_ops
= {
1373 .module
= THIS_MODULE
,
1374 .psa_map
= ocxlflash_psa_map
,
1375 .psa_unmap
= ocxlflash_psa_unmap
,
1376 .process_element
= ocxlflash_process_element
,
1377 .map_afu_irq
= ocxlflash_map_afu_irq
,
1378 .unmap_afu_irq
= ocxlflash_unmap_afu_irq
,
1379 .get_irq_objhndl
= ocxlflash_get_irq_objhndl
,
1380 .start_context
= ocxlflash_start_context
,
1381 .stop_context
= ocxlflash_stop_context
,
1382 .afu_reset
= ocxlflash_afu_reset
,
1383 .set_master
= ocxlflash_set_master
,
1384 .get_context
= ocxlflash_get_context
,
1385 .dev_context_init
= ocxlflash_dev_context_init
,
1386 .release_context
= ocxlflash_release_context
,
1387 .perst_reloads_same_image
= ocxlflash_perst_reloads_same_image
,
1388 .read_adapter_vpd
= ocxlflash_read_adapter_vpd
,
1389 .allocate_afu_irqs
= ocxlflash_allocate_afu_irqs
,
1390 .free_afu_irqs
= ocxlflash_free_afu_irqs
,
1391 .create_afu
= ocxlflash_create_afu
,
1392 .destroy_afu
= ocxlflash_destroy_afu
,
1393 .get_fd
= ocxlflash_get_fd
,
1394 .fops_get_context
= ocxlflash_fops_get_context
,
1395 .start_work
= ocxlflash_start_work
,
1396 .fd_mmap
= ocxlflash_fd_mmap
,
1397 .fd_release
= ocxlflash_fd_release
,