1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
4 #include <linux/poll.h>
5 #include <linux/sched/signal.h>
6 #include <linux/eventfd.h>
7 #include <linux/uaccess.h>
8 #include <uapi/misc/ocxl.h>
10 #include <asm/switch_to.h>
11 #include "ocxl_internal.h"
14 #define OCXL_NUM_MINORS 256 /* Total to reserve */
16 static dev_t ocxl_dev
;
17 static DEFINE_MUTEX(minors_idr_lock
);
18 static struct idr minors_idr
;
20 static struct ocxl_file_info
*find_and_get_file_info(dev_t devno
)
22 struct ocxl_file_info
*info
;
24 mutex_lock(&minors_idr_lock
);
25 info
= idr_find(&minors_idr
, MINOR(devno
));
27 get_device(&info
->dev
);
28 mutex_unlock(&minors_idr_lock
);
32 static int allocate_minor(struct ocxl_file_info
*info
)
36 mutex_lock(&minors_idr_lock
);
37 minor
= idr_alloc(&minors_idr
, info
, 0, OCXL_NUM_MINORS
, GFP_KERNEL
);
38 mutex_unlock(&minors_idr_lock
);
42 static void free_minor(struct ocxl_file_info
*info
)
44 mutex_lock(&minors_idr_lock
);
45 idr_remove(&minors_idr
, MINOR(info
->dev
.devt
));
46 mutex_unlock(&minors_idr_lock
);
49 static int afu_open(struct inode
*inode
, struct file
*file
)
51 struct ocxl_file_info
*info
;
52 struct ocxl_context
*ctx
;
55 pr_debug("%s for device %x\n", __func__
, inode
->i_rdev
);
57 info
= find_and_get_file_info(inode
->i_rdev
);
61 rc
= ocxl_context_alloc(&ctx
, info
->afu
, inode
->i_mapping
);
63 put_device(&info
->dev
);
66 put_device(&info
->dev
);
67 file
->private_data
= ctx
;
71 static long afu_ioctl_attach(struct ocxl_context
*ctx
,
72 struct ocxl_ioctl_attach __user
*uarg
)
74 struct ocxl_ioctl_attach arg
;
77 pr_debug("%s for context %d\n", __func__
, ctx
->pasid
);
79 if (copy_from_user(&arg
, uarg
, sizeof(arg
)))
82 /* Make sure reserved fields are not set for forward compatibility */
83 if (arg
.reserved1
|| arg
.reserved2
|| arg
.reserved3
)
86 amr
= arg
.amr
& mfspr(SPRN_UAMOR
);
87 return ocxl_context_attach(ctx
, amr
, current
->mm
);
90 static long afu_ioctl_get_metadata(struct ocxl_context
*ctx
,
91 struct ocxl_ioctl_metadata __user
*uarg
)
93 struct ocxl_ioctl_metadata arg
;
95 memset(&arg
, 0, sizeof(arg
));
99 arg
.afu_version_major
= ctx
->afu
->config
.version_major
;
100 arg
.afu_version_minor
= ctx
->afu
->config
.version_minor
;
101 arg
.pasid
= ctx
->pasid
;
102 arg
.pp_mmio_size
= ctx
->afu
->config
.pp_mmio_stride
;
103 arg
.global_mmio_size
= ctx
->afu
->config
.global_mmio_size
;
105 if (copy_to_user(uarg
, &arg
, sizeof(arg
)))
112 static long afu_ioctl_enable_p9_wait(struct ocxl_context
*ctx
,
113 struct ocxl_ioctl_p9_wait __user
*uarg
)
115 struct ocxl_ioctl_p9_wait arg
;
117 memset(&arg
, 0, sizeof(arg
));
119 if (cpu_has_feature(CPU_FTR_P9_TIDR
)) {
120 enum ocxl_context_status status
;
122 // Locks both status & tidr
123 mutex_lock(&ctx
->status_mutex
);
125 if (set_thread_tidr(current
)) {
126 mutex_unlock(&ctx
->status_mutex
);
130 ctx
->tidr
= current
->thread
.tidr
;
133 status
= ctx
->status
;
134 mutex_unlock(&ctx
->status_mutex
);
136 if (status
== ATTACHED
) {
137 int rc
= ocxl_link_update_pe(ctx
->afu
->fn
->link
,
138 ctx
->pasid
, ctx
->tidr
);
144 arg
.thread_id
= ctx
->tidr
;
148 if (copy_to_user(uarg
, &arg
, sizeof(arg
)))
156 static long afu_ioctl_get_features(struct ocxl_context
*ctx
,
157 struct ocxl_ioctl_features __user
*uarg
)
159 struct ocxl_ioctl_features arg
;
161 memset(&arg
, 0, sizeof(arg
));
164 if (cpu_has_feature(CPU_FTR_P9_TIDR
))
165 arg
.flags
[0] |= OCXL_IOCTL_FEATURES_FLAGS0_P9_WAIT
;
168 if (copy_to_user(uarg
, &arg
, sizeof(arg
)))
174 #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \
175 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \
176 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \
177 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \
178 x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \
179 x == OCXL_IOCTL_ENABLE_P9_WAIT ? "ENABLE_P9_WAIT" : \
180 x == OCXL_IOCTL_GET_FEATURES ? "GET_FEATURES" : \
183 static irqreturn_t
irq_handler(void *private)
185 struct eventfd_ctx
*ev_ctx
= private;
187 eventfd_signal(ev_ctx
);
191 static void irq_free(void *private)
193 struct eventfd_ctx
*ev_ctx
= private;
195 eventfd_ctx_put(ev_ctx
);
198 static long afu_ioctl(struct file
*file
, unsigned int cmd
,
201 struct ocxl_context
*ctx
= file
->private_data
;
202 struct ocxl_ioctl_irq_fd irq_fd
;
203 struct eventfd_ctx
*ev_ctx
;
209 pr_debug("%s for context %d, command %s\n", __func__
, ctx
->pasid
,
212 mutex_lock(&ctx
->status_mutex
);
213 closed
= (ctx
->status
== CLOSED
);
214 mutex_unlock(&ctx
->status_mutex
);
220 case OCXL_IOCTL_ATTACH
:
221 rc
= afu_ioctl_attach(ctx
,
222 (struct ocxl_ioctl_attach __user
*) args
);
225 case OCXL_IOCTL_IRQ_ALLOC
:
226 rc
= ocxl_afu_irq_alloc(ctx
, &irq_id
);
228 irq_offset
= ocxl_irq_id_to_offset(ctx
, irq_id
);
229 rc
= copy_to_user((u64 __user
*) args
, &irq_offset
,
232 ocxl_afu_irq_free(ctx
, irq_id
);
238 case OCXL_IOCTL_IRQ_FREE
:
239 rc
= copy_from_user(&irq_offset
, (u64 __user
*) args
,
243 irq_id
= ocxl_irq_offset_to_id(ctx
, irq_offset
);
244 rc
= ocxl_afu_irq_free(ctx
, irq_id
);
247 case OCXL_IOCTL_IRQ_SET_FD
:
248 rc
= copy_from_user(&irq_fd
, (u64 __user
*) args
,
254 irq_id
= ocxl_irq_offset_to_id(ctx
, irq_fd
.irq_offset
);
255 ev_ctx
= eventfd_ctx_fdget(irq_fd
.eventfd
);
257 return PTR_ERR(ev_ctx
);
258 rc
= ocxl_irq_set_handler(ctx
, irq_id
, irq_handler
, irq_free
, ev_ctx
);
260 eventfd_ctx_put(ev_ctx
);
263 case OCXL_IOCTL_GET_METADATA
:
264 rc
= afu_ioctl_get_metadata(ctx
,
265 (struct ocxl_ioctl_metadata __user
*) args
);
269 case OCXL_IOCTL_ENABLE_P9_WAIT
:
270 rc
= afu_ioctl_enable_p9_wait(ctx
,
271 (struct ocxl_ioctl_p9_wait __user
*) args
);
275 case OCXL_IOCTL_GET_FEATURES
:
276 rc
= afu_ioctl_get_features(ctx
,
277 (struct ocxl_ioctl_features __user
*) args
);
286 static long afu_compat_ioctl(struct file
*file
, unsigned int cmd
,
289 return afu_ioctl(file
, cmd
, args
);
292 static int afu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
294 struct ocxl_context
*ctx
= file
->private_data
;
296 pr_debug("%s for context %d\n", __func__
, ctx
->pasid
);
297 return ocxl_context_mmap(ctx
, vma
);
300 static bool has_xsl_error(struct ocxl_context
*ctx
)
304 mutex_lock(&ctx
->xsl_error_lock
);
305 ret
= !!ctx
->xsl_error
.addr
;
306 mutex_unlock(&ctx
->xsl_error_lock
);
312 * Are there any events pending on the AFU
313 * ctx: The AFU context
314 * Returns: true if there are events pending
316 static bool afu_events_pending(struct ocxl_context
*ctx
)
318 if (has_xsl_error(ctx
))
323 static unsigned int afu_poll(struct file
*file
, struct poll_table_struct
*wait
)
325 struct ocxl_context
*ctx
= file
->private_data
;
326 unsigned int mask
= 0;
329 pr_debug("%s for context %d\n", __func__
, ctx
->pasid
);
331 poll_wait(file
, &ctx
->events_wq
, wait
);
333 mutex_lock(&ctx
->status_mutex
);
334 closed
= (ctx
->status
== CLOSED
);
335 mutex_unlock(&ctx
->status_mutex
);
337 if (afu_events_pending(ctx
))
338 mask
= EPOLLIN
| EPOLLRDNORM
;
346 * Populate the supplied buffer with a single XSL error
347 * ctx: The AFU context to report the error from
348 * header: the event header to populate
349 * buf: The buffer to write the body into (should be at least
350 * AFU_EVENT_BODY_XSL_ERROR_SIZE)
351 * Return: the amount of buffer that was populated
353 static ssize_t
append_xsl_error(struct ocxl_context
*ctx
,
354 struct ocxl_kernel_event_header
*header
,
357 struct ocxl_kernel_event_xsl_fault_error body
;
359 memset(&body
, 0, sizeof(body
));
361 mutex_lock(&ctx
->xsl_error_lock
);
362 if (!ctx
->xsl_error
.addr
) {
363 mutex_unlock(&ctx
->xsl_error_lock
);
367 body
.addr
= ctx
->xsl_error
.addr
;
368 body
.dsisr
= ctx
->xsl_error
.dsisr
;
369 body
.count
= ctx
->xsl_error
.count
;
371 ctx
->xsl_error
.addr
= 0;
372 ctx
->xsl_error
.dsisr
= 0;
373 ctx
->xsl_error
.count
= 0;
375 mutex_unlock(&ctx
->xsl_error_lock
);
377 header
->type
= OCXL_AFU_EVENT_XSL_FAULT_ERROR
;
379 if (copy_to_user(buf
, &body
, sizeof(body
)))
385 #define AFU_EVENT_BODY_MAX_SIZE sizeof(struct ocxl_kernel_event_xsl_fault_error)
388 * Reports events on the AFU
390 * Header (struct ocxl_kernel_event_header)
391 * Body (struct ocxl_kernel_event_*)
394 static ssize_t
afu_read(struct file
*file
, char __user
*buf
, size_t count
,
397 struct ocxl_context
*ctx
= file
->private_data
;
398 struct ocxl_kernel_event_header header
;
401 DEFINE_WAIT(event_wait
);
403 memset(&header
, 0, sizeof(header
));
405 /* Require offset to be 0 */
409 if (count
< (sizeof(struct ocxl_kernel_event_header
) +
410 AFU_EVENT_BODY_MAX_SIZE
))
414 prepare_to_wait(&ctx
->events_wq
, &event_wait
,
417 if (afu_events_pending(ctx
))
420 if (ctx
->status
== CLOSED
)
423 if (file
->f_flags
& O_NONBLOCK
) {
424 finish_wait(&ctx
->events_wq
, &event_wait
);
428 if (signal_pending(current
)) {
429 finish_wait(&ctx
->events_wq
, &event_wait
);
436 finish_wait(&ctx
->events_wq
, &event_wait
);
438 if (has_xsl_error(ctx
)) {
439 used
= append_xsl_error(ctx
, &header
, buf
+ sizeof(header
));
444 if (!afu_events_pending(ctx
))
445 header
.flags
|= OCXL_KERNEL_EVENT_FLAG_LAST
;
447 if (copy_to_user(buf
, &header
, sizeof(header
)))
450 used
+= sizeof(header
);
456 static int afu_release(struct inode
*inode
, struct file
*file
)
458 struct ocxl_context
*ctx
= file
->private_data
;
461 pr_debug("%s for device %x\n", __func__
, inode
->i_rdev
);
462 rc
= ocxl_context_detach(ctx
);
463 mutex_lock(&ctx
->mapping_lock
);
465 mutex_unlock(&ctx
->mapping_lock
);
466 wake_up_all(&ctx
->events_wq
);
468 ocxl_context_free(ctx
);
472 static const struct file_operations ocxl_afu_fops
= {
473 .owner
= THIS_MODULE
,
475 .unlocked_ioctl
= afu_ioctl
,
476 .compat_ioctl
= afu_compat_ioctl
,
480 .release
= afu_release
,
483 // Free the info struct
484 static void info_release(struct device
*dev
)
486 struct ocxl_file_info
*info
= container_of(dev
, struct ocxl_file_info
, dev
);
488 ocxl_afu_put(info
->afu
);
492 static int ocxl_file_make_visible(struct ocxl_file_info
*info
)
496 cdev_init(&info
->cdev
, &ocxl_afu_fops
);
497 rc
= cdev_add(&info
->cdev
, info
->dev
.devt
, 1);
499 dev_err(&info
->dev
, "Unable to add afu char device: %d\n", rc
);
506 static void ocxl_file_make_invisible(struct ocxl_file_info
*info
)
508 cdev_del(&info
->cdev
);
511 static char *ocxl_devnode(const struct device
*dev
, umode_t
*mode
)
513 return kasprintf(GFP_KERNEL
, "ocxl/%s", dev_name(dev
));
516 static const struct class ocxl_class
= {
518 .devnode
= ocxl_devnode
,
521 int ocxl_file_register_afu(struct ocxl_afu
*afu
)
525 struct ocxl_file_info
*info
;
526 struct ocxl_fn
*fn
= afu
->fn
;
527 struct pci_dev
*pci_dev
= to_pci_dev(fn
->dev
.parent
);
529 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
533 minor
= allocate_minor(info
);
539 info
->dev
.parent
= &fn
->dev
;
540 info
->dev
.devt
= MKDEV(MAJOR(ocxl_dev
), minor
);
541 info
->dev
.class = &ocxl_class
;
542 info
->dev
.release
= info_release
;
547 rc
= dev_set_name(&info
->dev
, "%s.%s.%hhu",
548 afu
->config
.name
, dev_name(&pci_dev
->dev
), afu
->config
.idx
);
552 rc
= device_register(&info
->dev
);
555 put_device(&info
->dev
);
559 rc
= ocxl_sysfs_register_afu(info
);
563 rc
= ocxl_file_make_visible(info
);
567 ocxl_afu_set_private(afu
, info
);
572 ocxl_sysfs_unregister_afu(info
); // safe to call even if register failed
574 device_unregister(&info
->dev
);
583 void ocxl_file_unregister_afu(struct ocxl_afu
*afu
)
585 struct ocxl_file_info
*info
= ocxl_afu_get_private(afu
);
590 ocxl_file_make_invisible(info
);
591 ocxl_sysfs_unregister_afu(info
);
593 device_unregister(&info
->dev
);
596 int ocxl_file_init(void)
600 idr_init(&minors_idr
);
602 rc
= alloc_chrdev_region(&ocxl_dev
, 0, OCXL_NUM_MINORS
, "ocxl");
604 pr_err("Unable to allocate ocxl major number: %d\n", rc
);
608 rc
= class_register(&ocxl_class
);
610 pr_err("Unable to create ocxl class\n");
611 unregister_chrdev_region(ocxl_dev
, OCXL_NUM_MINORS
);
618 void ocxl_file_exit(void)
620 class_unregister(&ocxl_class
);
621 unregister_chrdev_region(ocxl_dev
, OCXL_NUM_MINORS
);
622 idr_destroy(&minors_idr
);