1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/compat.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/iommu.h>
5 #include <linux/module.h>
6 #include <linux/poll.h>
7 #include <linux/slab.h>
8 #include <linux/uacce.h>
10 static dev_t uacce_devt
;
11 static DEFINE_XARRAY_ALLOC(uacce_xa
);
13 static const struct class uacce_class
= {
18 * If the parent driver or the device disappears, the queue state is invalid and
19 * ops are not usable anymore.
21 static bool uacce_queue_is_valid(struct uacce_queue
*q
)
23 return q
->state
== UACCE_Q_INIT
|| q
->state
== UACCE_Q_STARTED
;
26 static int uacce_start_queue(struct uacce_queue
*q
)
30 if (q
->state
!= UACCE_Q_INIT
)
33 if (q
->uacce
->ops
->start_queue
) {
34 ret
= q
->uacce
->ops
->start_queue(q
);
39 q
->state
= UACCE_Q_STARTED
;
43 static int uacce_put_queue(struct uacce_queue
*q
)
45 struct uacce_device
*uacce
= q
->uacce
;
47 if ((q
->state
== UACCE_Q_STARTED
) && uacce
->ops
->stop_queue
)
48 uacce
->ops
->stop_queue(q
);
50 if ((q
->state
== UACCE_Q_INIT
|| q
->state
== UACCE_Q_STARTED
) &&
51 uacce
->ops
->put_queue
)
52 uacce
->ops
->put_queue(q
);
54 q
->state
= UACCE_Q_ZOMBIE
;
59 static long uacce_fops_unl_ioctl(struct file
*filep
,
60 unsigned int cmd
, unsigned long arg
)
62 struct uacce_queue
*q
= filep
->private_data
;
63 struct uacce_device
*uacce
= q
->uacce
;
67 * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
68 * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
69 * gets called with mmap_lock held, by taking uacce->mutex instead of
70 * q->mutex. Doing this in uacce_fops_mmap() is not possible because
71 * uacce_fops_open() calls iommu_sva_bind_device(), which takes
72 * mmap_lock, while holding uacce->mutex.
74 mutex_lock(&uacce
->mutex
);
75 if (!uacce_queue_is_valid(q
))
79 case UACCE_CMD_START_Q
:
80 ret
= uacce_start_queue(q
);
83 ret
= uacce_put_queue(q
);
86 if (uacce
->ops
->ioctl
)
87 ret
= uacce
->ops
->ioctl(q
, cmd
, arg
);
92 mutex_unlock(&uacce
->mutex
);
97 static long uacce_fops_compat_ioctl(struct file
*filep
,
98 unsigned int cmd
, unsigned long arg
)
100 arg
= (unsigned long)compat_ptr(arg
);
102 return uacce_fops_unl_ioctl(filep
, cmd
, arg
);
106 static int uacce_bind_queue(struct uacce_device
*uacce
, struct uacce_queue
*q
)
109 struct iommu_sva
*handle
;
111 if (!(uacce
->flags
& UACCE_DEV_SVA
))
114 handle
= iommu_sva_bind_device(uacce
->parent
, current
->mm
);
116 return PTR_ERR(handle
);
118 pasid
= iommu_sva_get_pasid(handle
);
119 if (pasid
== IOMMU_PASID_INVALID
) {
120 iommu_sva_unbind_device(handle
);
129 static void uacce_unbind_queue(struct uacce_queue
*q
)
133 iommu_sva_unbind_device(q
->handle
);
137 static int uacce_fops_open(struct inode
*inode
, struct file
*filep
)
139 struct uacce_device
*uacce
;
140 struct uacce_queue
*q
;
143 uacce
= xa_load(&uacce_xa
, iminor(inode
));
147 q
= kzalloc(sizeof(struct uacce_queue
), GFP_KERNEL
);
151 mutex_lock(&uacce
->mutex
);
153 if (!uacce
->parent
) {
158 ret
= uacce_bind_queue(uacce
, q
);
164 if (uacce
->ops
->get_queue
) {
165 ret
= uacce
->ops
->get_queue(uacce
, q
->pasid
, q
);
170 init_waitqueue_head(&q
->wait
);
171 filep
->private_data
= q
;
172 q
->state
= UACCE_Q_INIT
;
173 q
->mapping
= filep
->f_mapping
;
174 mutex_init(&q
->mutex
);
175 list_add(&q
->list
, &uacce
->queues
);
176 mutex_unlock(&uacce
->mutex
);
181 uacce_unbind_queue(q
);
184 mutex_unlock(&uacce
->mutex
);
188 static int uacce_fops_release(struct inode
*inode
, struct file
*filep
)
190 struct uacce_queue
*q
= filep
->private_data
;
191 struct uacce_device
*uacce
= q
->uacce
;
193 mutex_lock(&uacce
->mutex
);
195 uacce_unbind_queue(q
);
197 mutex_unlock(&uacce
->mutex
);
203 static void uacce_vma_close(struct vm_area_struct
*vma
)
205 struct uacce_queue
*q
= vma
->vm_private_data
;
207 if (vma
->vm_pgoff
< UACCE_MAX_REGION
) {
208 struct uacce_qfile_region
*qfr
= q
->qfrs
[vma
->vm_pgoff
];
210 mutex_lock(&q
->mutex
);
211 q
->qfrs
[vma
->vm_pgoff
] = NULL
;
212 mutex_unlock(&q
->mutex
);
217 static const struct vm_operations_struct uacce_vm_ops
= {
218 .close
= uacce_vma_close
,
221 static int uacce_fops_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
223 struct uacce_queue
*q
= filep
->private_data
;
224 struct uacce_device
*uacce
= q
->uacce
;
225 struct uacce_qfile_region
*qfr
;
226 enum uacce_qfrt type
= UACCE_MAX_REGION
;
229 if (vma
->vm_pgoff
< UACCE_MAX_REGION
)
230 type
= vma
->vm_pgoff
;
234 qfr
= kzalloc(sizeof(*qfr
), GFP_KERNEL
);
238 vm_flags_set(vma
, VM_DONTCOPY
| VM_DONTEXPAND
| VM_WIPEONFORK
);
239 vma
->vm_ops
= &uacce_vm_ops
;
240 vma
->vm_private_data
= q
;
243 mutex_lock(&q
->mutex
);
244 if (!uacce_queue_is_valid(q
)) {
255 case UACCE_QFRT_MMIO
:
257 if (!uacce
->ops
->mmap
) {
262 ret
= uacce
->ops
->mmap(q
, vma
, qfr
);
273 mutex_unlock(&q
->mutex
);
278 mutex_unlock(&q
->mutex
);
283 static __poll_t
uacce_fops_poll(struct file
*file
, poll_table
*wait
)
285 struct uacce_queue
*q
= file
->private_data
;
286 struct uacce_device
*uacce
= q
->uacce
;
289 mutex_lock(&q
->mutex
);
290 if (!uacce_queue_is_valid(q
))
293 poll_wait(file
, &q
->wait
, wait
);
295 if (uacce
->ops
->is_q_updated
&& uacce
->ops
->is_q_updated(q
))
296 ret
= EPOLLIN
| EPOLLRDNORM
;
299 mutex_unlock(&q
->mutex
);
303 static const struct file_operations uacce_fops
= {
304 .owner
= THIS_MODULE
,
305 .open
= uacce_fops_open
,
306 .release
= uacce_fops_release
,
307 .unlocked_ioctl
= uacce_fops_unl_ioctl
,
309 .compat_ioctl
= uacce_fops_compat_ioctl
,
311 .mmap
= uacce_fops_mmap
,
312 .poll
= uacce_fops_poll
,
315 #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
317 static ssize_t
api_show(struct device
*dev
,
318 struct device_attribute
*attr
, char *buf
)
320 struct uacce_device
*uacce
= to_uacce_device(dev
);
322 return sysfs_emit(buf
, "%s\n", uacce
->api_ver
);
325 static ssize_t
flags_show(struct device
*dev
,
326 struct device_attribute
*attr
, char *buf
)
328 struct uacce_device
*uacce
= to_uacce_device(dev
);
330 return sysfs_emit(buf
, "%u\n", uacce
->flags
);
333 static ssize_t
available_instances_show(struct device
*dev
,
334 struct device_attribute
*attr
,
337 struct uacce_device
*uacce
= to_uacce_device(dev
);
339 if (!uacce
->ops
->get_available_instances
)
342 return sysfs_emit(buf
, "%d\n",
343 uacce
->ops
->get_available_instances(uacce
));
346 static ssize_t
algorithms_show(struct device
*dev
,
347 struct device_attribute
*attr
, char *buf
)
349 struct uacce_device
*uacce
= to_uacce_device(dev
);
351 return sysfs_emit(buf
, "%s\n", uacce
->algs
);
354 static ssize_t
region_mmio_size_show(struct device
*dev
,
355 struct device_attribute
*attr
, char *buf
)
357 struct uacce_device
*uacce
= to_uacce_device(dev
);
359 return sysfs_emit(buf
, "%lu\n",
360 uacce
->qf_pg_num
[UACCE_QFRT_MMIO
] << PAGE_SHIFT
);
363 static ssize_t
region_dus_size_show(struct device
*dev
,
364 struct device_attribute
*attr
, char *buf
)
366 struct uacce_device
*uacce
= to_uacce_device(dev
);
368 return sysfs_emit(buf
, "%lu\n",
369 uacce
->qf_pg_num
[UACCE_QFRT_DUS
] << PAGE_SHIFT
);
372 static ssize_t
isolate_show(struct device
*dev
,
373 struct device_attribute
*attr
, char *buf
)
375 struct uacce_device
*uacce
= to_uacce_device(dev
);
377 return sysfs_emit(buf
, "%d\n", uacce
->ops
->get_isolate_state(uacce
));
380 static ssize_t
isolate_strategy_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
382 struct uacce_device
*uacce
= to_uacce_device(dev
);
385 val
= uacce
->ops
->isolate_err_threshold_read(uacce
);
387 return sysfs_emit(buf
, "%u\n", val
);
390 static ssize_t
isolate_strategy_store(struct device
*dev
, struct device_attribute
*attr
,
391 const char *buf
, size_t count
)
393 struct uacce_device
*uacce
= to_uacce_device(dev
);
397 if (kstrtoul(buf
, 0, &val
) < 0)
400 if (val
> UACCE_MAX_ERR_THRESHOLD
)
403 ret
= uacce
->ops
->isolate_err_threshold_write(uacce
, val
);
410 static DEVICE_ATTR_RO(api
);
411 static DEVICE_ATTR_RO(flags
);
412 static DEVICE_ATTR_RO(available_instances
);
413 static DEVICE_ATTR_RO(algorithms
);
414 static DEVICE_ATTR_RO(region_mmio_size
);
415 static DEVICE_ATTR_RO(region_dus_size
);
416 static DEVICE_ATTR_RO(isolate
);
417 static DEVICE_ATTR_RW(isolate_strategy
);
419 static struct attribute
*uacce_dev_attrs
[] = {
421 &dev_attr_flags
.attr
,
422 &dev_attr_available_instances
.attr
,
423 &dev_attr_algorithms
.attr
,
424 &dev_attr_region_mmio_size
.attr
,
425 &dev_attr_region_dus_size
.attr
,
426 &dev_attr_isolate
.attr
,
427 &dev_attr_isolate_strategy
.attr
,
431 static umode_t
uacce_dev_is_visible(struct kobject
*kobj
,
432 struct attribute
*attr
, int n
)
434 struct device
*dev
= kobj_to_dev(kobj
);
435 struct uacce_device
*uacce
= to_uacce_device(dev
);
437 if (((attr
== &dev_attr_region_mmio_size
.attr
) &&
438 (!uacce
->qf_pg_num
[UACCE_QFRT_MMIO
])) ||
439 ((attr
== &dev_attr_region_dus_size
.attr
) &&
440 (!uacce
->qf_pg_num
[UACCE_QFRT_DUS
])))
443 if (attr
== &dev_attr_isolate_strategy
.attr
&&
444 (!uacce
->ops
->isolate_err_threshold_read
&&
445 !uacce
->ops
->isolate_err_threshold_write
))
448 if (attr
== &dev_attr_isolate
.attr
&& !uacce
->ops
->get_isolate_state
)
454 static struct attribute_group uacce_dev_group
= {
455 .is_visible
= uacce_dev_is_visible
,
456 .attrs
= uacce_dev_attrs
,
459 __ATTRIBUTE_GROUPS(uacce_dev
);
461 static void uacce_release(struct device
*dev
)
463 struct uacce_device
*uacce
= to_uacce_device(dev
);
468 static unsigned int uacce_enable_sva(struct device
*parent
, unsigned int flags
)
472 if (!(flags
& UACCE_DEV_SVA
))
475 flags
&= ~UACCE_DEV_SVA
;
477 ret
= iommu_dev_enable_feature(parent
, IOMMU_DEV_FEAT_IOPF
);
479 dev_err(parent
, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret
));
483 ret
= iommu_dev_enable_feature(parent
, IOMMU_DEV_FEAT_SVA
);
485 dev_err(parent
, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret
));
486 iommu_dev_disable_feature(parent
, IOMMU_DEV_FEAT_IOPF
);
490 return flags
| UACCE_DEV_SVA
;
493 static void uacce_disable_sva(struct uacce_device
*uacce
)
495 if (!(uacce
->flags
& UACCE_DEV_SVA
))
498 iommu_dev_disable_feature(uacce
->parent
, IOMMU_DEV_FEAT_SVA
);
499 iommu_dev_disable_feature(uacce
->parent
, IOMMU_DEV_FEAT_IOPF
);
503 * uacce_alloc() - alloc an accelerator
504 * @parent: pointer of uacce parent device
505 * @interface: pointer of uacce_interface for register
507 * Returns uacce pointer if success and ERR_PTR if not
508 * Need check returned negotiated uacce->flags
510 struct uacce_device
*uacce_alloc(struct device
*parent
,
511 struct uacce_interface
*interface
)
513 unsigned int flags
= interface
->flags
;
514 struct uacce_device
*uacce
;
517 uacce
= kzalloc(sizeof(struct uacce_device
), GFP_KERNEL
);
519 return ERR_PTR(-ENOMEM
);
521 flags
= uacce_enable_sva(parent
, flags
);
523 uacce
->parent
= parent
;
524 uacce
->flags
= flags
;
525 uacce
->ops
= interface
->ops
;
527 ret
= xa_alloc(&uacce_xa
, &uacce
->dev_id
, uacce
, xa_limit_32b
,
532 INIT_LIST_HEAD(&uacce
->queues
);
533 mutex_init(&uacce
->mutex
);
534 device_initialize(&uacce
->dev
);
535 uacce
->dev
.devt
= MKDEV(MAJOR(uacce_devt
), uacce
->dev_id
);
536 uacce
->dev
.class = &uacce_class
;
537 uacce
->dev
.groups
= uacce_dev_groups
;
538 uacce
->dev
.parent
= uacce
->parent
;
539 uacce
->dev
.release
= uacce_release
;
540 dev_set_name(&uacce
->dev
, "%s-%d", interface
->name
, uacce
->dev_id
);
545 uacce_disable_sva(uacce
);
549 EXPORT_SYMBOL_GPL(uacce_alloc
);
552 * uacce_register() - add the accelerator to cdev and export to user space
553 * @uacce: The initialized uacce device
555 * Return 0 if register succeeded, or an error.
557 int uacce_register(struct uacce_device
*uacce
)
562 uacce
->cdev
= cdev_alloc();
566 uacce
->cdev
->ops
= &uacce_fops
;
567 uacce
->cdev
->owner
= THIS_MODULE
;
569 return cdev_device_add(uacce
->cdev
, &uacce
->dev
);
571 EXPORT_SYMBOL_GPL(uacce_register
);
574 * uacce_remove() - remove the accelerator
575 * @uacce: the accelerator to remove
577 void uacce_remove(struct uacce_device
*uacce
)
579 struct uacce_queue
*q
, *next_q
;
585 * uacce_fops_open() may be running concurrently, even after we remove
586 * the cdev. Holding uacce->mutex ensures that open() does not obtain a
587 * removed uacce device.
589 mutex_lock(&uacce
->mutex
);
590 /* ensure no open queue remains */
591 list_for_each_entry_safe(q
, next_q
, &uacce
->queues
, list
) {
593 * Taking q->mutex ensures that fops do not use the defunct
594 * uacce->ops after the queue is disabled.
596 mutex_lock(&q
->mutex
);
598 mutex_unlock(&q
->mutex
);
599 uacce_unbind_queue(q
);
602 * unmap remaining mapping from user space, preventing user still
603 * access the mmaped area while parent device is already removed
605 unmap_mapping_range(q
->mapping
, 0, 0, 1);
608 /* disable sva now since no opened queues */
609 uacce_disable_sva(uacce
);
612 cdev_device_del(uacce
->cdev
, &uacce
->dev
);
613 xa_erase(&uacce_xa
, uacce
->dev_id
);
615 * uacce exists as long as there are open fds, but ops will be freed
616 * now. Ensure that bugs cause NULL deref rather than use-after-free.
619 uacce
->parent
= NULL
;
620 mutex_unlock(&uacce
->mutex
);
621 put_device(&uacce
->dev
);
623 EXPORT_SYMBOL_GPL(uacce_remove
);
625 static int __init
uacce_init(void)
629 ret
= class_register(&uacce_class
);
633 ret
= alloc_chrdev_region(&uacce_devt
, 0, MINORMASK
, UACCE_NAME
);
635 class_unregister(&uacce_class
);
640 static __exit
void uacce_exit(void)
642 unregister_chrdev_region(uacce_devt
, MINORMASK
);
643 class_unregister(&uacce_class
);
646 subsys_initcall(uacce_init
);
647 module_exit(uacce_exit
);
649 MODULE_LICENSE("GPL");
650 MODULE_AUTHOR("HiSilicon Tech. Co., Ltd.");
651 MODULE_DESCRIPTION("Accelerator interface for Userland applications");