drm/nouveau: fix kernel-doc comments
[drm/drm-misc.git] / drivers / dma-buf / udmabuf.c
blob8ce1f074c2d32a0a9f59ff7184359e37d56548c6
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-resv.h>
6 #include <linux/highmem.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/memfd.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/shmem_fs.h>
13 #include <linux/hugetlb.h>
14 #include <linux/slab.h>
15 #include <linux/udmabuf.h>
16 #include <linux/vmalloc.h>
17 #include <linux/iosys-map.h>
19 static int list_limit = 1024;
20 module_param(list_limit, int, 0644);
21 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
23 static int size_limit_mb = 64;
24 module_param(size_limit_mb, int, 0644);
25 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
27 struct udmabuf {
28 pgoff_t pagecount;
29 struct folio **folios;
31 /**
32 * Unlike folios, pinned_folios is only used for unpin.
33 * So, nr_pinned is not the same to pagecount, the pinned_folios
34 * only set each folio which already pinned when udmabuf_create.
35 * Note that, since a folio may be pinned multiple times, each folio
36 * can be added to pinned_folios multiple times, depending on how many
37 * times the folio has been pinned when create.
39 pgoff_t nr_pinned;
40 struct folio **pinned_folios;
42 struct sg_table *sg;
43 struct miscdevice *device;
44 pgoff_t *offsets;
47 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
49 struct vm_area_struct *vma = vmf->vma;
50 struct udmabuf *ubuf = vma->vm_private_data;
51 pgoff_t pgoff = vmf->pgoff;
52 unsigned long addr, pfn;
53 vm_fault_t ret;
55 if (pgoff >= ubuf->pagecount)
56 return VM_FAULT_SIGBUS;
58 pfn = folio_pfn(ubuf->folios[pgoff]);
59 pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
61 ret = vmf_insert_pfn(vma, vmf->address, pfn);
62 if (ret & VM_FAULT_ERROR)
63 return ret;
65 /* pre fault */
66 pgoff = vma->vm_pgoff;
67 addr = vma->vm_start;
69 for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) {
70 if (addr == vmf->address)
71 continue;
73 if (WARN_ON(pgoff >= ubuf->pagecount))
74 break;
76 pfn = folio_pfn(ubuf->folios[pgoff]);
77 pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
79 /**
80 * If the below vmf_insert_pfn() fails, we do not return an
81 * error here during this pre-fault step. However, an error
82 * will be returned if the failure occurs when the addr is
83 * truly accessed.
85 if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
86 break;
89 return ret;
92 static const struct vm_operations_struct udmabuf_vm_ops = {
93 .fault = udmabuf_vm_fault,
96 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
98 struct udmabuf *ubuf = buf->priv;
100 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
101 return -EINVAL;
103 vma->vm_ops = &udmabuf_vm_ops;
104 vma->vm_private_data = ubuf;
105 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
106 return 0;
109 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
111 struct udmabuf *ubuf = buf->priv;
112 unsigned long *pfns;
113 void *vaddr;
114 pgoff_t pg;
116 dma_resv_assert_held(buf->resv);
119 * HVO may free tail pages, so just use pfn to map each folio
120 * into vmalloc area.
122 pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
123 if (!pfns)
124 return -ENOMEM;
126 for (pg = 0; pg < ubuf->pagecount; pg++) {
127 unsigned long pfn = folio_pfn(ubuf->folios[pg]);
129 pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
130 pfns[pg] = pfn;
133 vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
134 kvfree(pfns);
135 if (!vaddr)
136 return -EINVAL;
138 iosys_map_set_vaddr(map, vaddr);
139 return 0;
142 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
144 struct udmabuf *ubuf = buf->priv;
146 dma_resv_assert_held(buf->resv);
148 vm_unmap_ram(map->vaddr, ubuf->pagecount);
151 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
152 enum dma_data_direction direction)
154 struct udmabuf *ubuf = buf->priv;
155 struct sg_table *sg;
156 struct scatterlist *sgl;
157 unsigned int i = 0;
158 int ret;
160 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
161 if (!sg)
162 return ERR_PTR(-ENOMEM);
164 ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
165 if (ret < 0)
166 goto err_alloc;
168 for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
169 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
170 ubuf->offsets[i]);
172 ret = dma_map_sgtable(dev, sg, direction, 0);
173 if (ret < 0)
174 goto err_map;
175 return sg;
177 err_map:
178 sg_free_table(sg);
179 err_alloc:
180 kfree(sg);
181 return ERR_PTR(ret);
184 static void put_sg_table(struct device *dev, struct sg_table *sg,
185 enum dma_data_direction direction)
187 dma_unmap_sgtable(dev, sg, direction, 0);
188 sg_free_table(sg);
189 kfree(sg);
192 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
193 enum dma_data_direction direction)
195 return get_sg_table(at->dev, at->dmabuf, direction);
198 static void unmap_udmabuf(struct dma_buf_attachment *at,
199 struct sg_table *sg,
200 enum dma_data_direction direction)
202 return put_sg_table(at->dev, sg, direction);
205 static void unpin_all_folios(struct udmabuf *ubuf)
207 pgoff_t i;
209 for (i = 0; i < ubuf->nr_pinned; ++i)
210 unpin_folio(ubuf->pinned_folios[i]);
212 kvfree(ubuf->pinned_folios);
215 static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
217 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
218 if (!ubuf->folios)
219 return -ENOMEM;
221 ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
222 if (!ubuf->offsets)
223 return -ENOMEM;
225 ubuf->pinned_folios = kvmalloc_array(pgcnt,
226 sizeof(*ubuf->pinned_folios),
227 GFP_KERNEL);
228 if (!ubuf->pinned_folios)
229 return -ENOMEM;
231 return 0;
234 static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
236 unpin_all_folios(ubuf);
237 kvfree(ubuf->offsets);
238 kvfree(ubuf->folios);
241 static void release_udmabuf(struct dma_buf *buf)
243 struct udmabuf *ubuf = buf->priv;
244 struct device *dev = ubuf->device->this_device;
246 if (ubuf->sg)
247 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
249 deinit_udmabuf(ubuf);
250 kfree(ubuf);
253 static int begin_cpu_udmabuf(struct dma_buf *buf,
254 enum dma_data_direction direction)
256 struct udmabuf *ubuf = buf->priv;
257 struct device *dev = ubuf->device->this_device;
258 int ret = 0;
260 if (!ubuf->sg) {
261 ubuf->sg = get_sg_table(dev, buf, direction);
262 if (IS_ERR(ubuf->sg)) {
263 ret = PTR_ERR(ubuf->sg);
264 ubuf->sg = NULL;
266 } else {
267 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
268 direction);
271 return ret;
274 static int end_cpu_udmabuf(struct dma_buf *buf,
275 enum dma_data_direction direction)
277 struct udmabuf *ubuf = buf->priv;
278 struct device *dev = ubuf->device->this_device;
280 if (!ubuf->sg)
281 return -EINVAL;
283 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
284 return 0;
287 static const struct dma_buf_ops udmabuf_ops = {
288 .cache_sgt_mapping = true,
289 .map_dma_buf = map_udmabuf,
290 .unmap_dma_buf = unmap_udmabuf,
291 .release = release_udmabuf,
292 .mmap = mmap_udmabuf,
293 .vmap = vmap_udmabuf,
294 .vunmap = vunmap_udmabuf,
295 .begin_cpu_access = begin_cpu_udmabuf,
296 .end_cpu_access = end_cpu_udmabuf,
299 #define SEALS_WANTED (F_SEAL_SHRINK)
300 #define SEALS_DENIED (F_SEAL_WRITE)
302 static int check_memfd_seals(struct file *memfd)
304 int seals;
306 if (!shmem_file(memfd) && !is_file_hugepages(memfd))
307 return -EBADFD;
309 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
310 if (seals == -EINVAL)
311 return -EBADFD;
313 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
314 (seals & SEALS_DENIED) != 0)
315 return -EINVAL;
317 return 0;
320 static int export_udmabuf(struct udmabuf *ubuf,
321 struct miscdevice *device,
322 u32 flags)
324 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
325 struct dma_buf *buf;
327 ubuf->device = device;
328 exp_info.ops = &udmabuf_ops;
329 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
330 exp_info.priv = ubuf;
331 exp_info.flags = O_RDWR;
333 buf = dma_buf_export(&exp_info);
334 if (IS_ERR(buf))
335 return PTR_ERR(buf);
337 return dma_buf_fd(buf, flags);
340 static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
341 loff_t start, loff_t size, struct folio **folios)
343 pgoff_t nr_pinned = ubuf->nr_pinned;
344 pgoff_t upgcnt = ubuf->pagecount;
345 u32 cur_folio, cur_pgcnt;
346 pgoff_t pgoff, pgcnt;
347 long nr_folios;
348 loff_t end;
350 pgcnt = size >> PAGE_SHIFT;
351 end = start + (pgcnt << PAGE_SHIFT) - 1;
352 nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff);
353 if (nr_folios <= 0)
354 return nr_folios ? nr_folios : -EINVAL;
356 cur_pgcnt = 0;
357 for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) {
358 pgoff_t subpgoff = pgoff;
359 size_t fsize = folio_size(folios[cur_folio]);
361 ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
363 for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
364 ubuf->folios[upgcnt] = folios[cur_folio];
365 ubuf->offsets[upgcnt] = subpgoff;
366 ++upgcnt;
368 if (++cur_pgcnt >= pgcnt)
369 goto end;
373 * In a given range, only the first subpage of the first folio
374 * has an offset, that is returned by memfd_pin_folios().
375 * The first subpages of other folios (in the range) have an
376 * offset of 0.
378 pgoff = 0;
380 end:
381 ubuf->pagecount = upgcnt;
382 ubuf->nr_pinned = nr_pinned;
383 return 0;
386 static long udmabuf_create(struct miscdevice *device,
387 struct udmabuf_create_list *head,
388 struct udmabuf_create_item *list)
390 unsigned long max_nr_folios = 0;
391 struct folio **folios = NULL;
392 pgoff_t pgcnt = 0, pglimit;
393 struct udmabuf *ubuf;
394 long ret = -EINVAL;
395 u32 i, flags;
397 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
398 if (!ubuf)
399 return -ENOMEM;
401 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
402 for (i = 0; i < head->count; i++) {
403 pgoff_t subpgcnt;
405 if (!PAGE_ALIGNED(list[i].offset))
406 goto err_noinit;
407 if (!PAGE_ALIGNED(list[i].size))
408 goto err_noinit;
410 subpgcnt = list[i].size >> PAGE_SHIFT;
411 pgcnt += subpgcnt;
412 if (pgcnt > pglimit)
413 goto err_noinit;
415 max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios);
418 if (!pgcnt)
419 goto err_noinit;
421 ret = init_udmabuf(ubuf, pgcnt);
422 if (ret)
423 goto err;
425 folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL);
426 if (!folios) {
427 ret = -ENOMEM;
428 goto err;
431 for (i = 0; i < head->count; i++) {
432 struct file *memfd = fget(list[i].memfd);
434 if (!memfd) {
435 ret = -EBADFD;
436 goto err;
439 ret = check_memfd_seals(memfd);
440 if (ret < 0) {
441 fput(memfd);
442 goto err;
445 ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
446 list[i].size, folios);
447 fput(memfd);
448 if (ret)
449 goto err;
452 flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
453 ret = export_udmabuf(ubuf, device, flags);
454 if (ret < 0)
455 goto err;
457 kvfree(folios);
458 return ret;
460 err:
461 deinit_udmabuf(ubuf);
462 err_noinit:
463 kfree(ubuf);
464 kvfree(folios);
465 return ret;
468 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
470 struct udmabuf_create create;
471 struct udmabuf_create_list head;
472 struct udmabuf_create_item list;
474 if (copy_from_user(&create, (void __user *)arg,
475 sizeof(create)))
476 return -EFAULT;
478 head.flags = create.flags;
479 head.count = 1;
480 list.memfd = create.memfd;
481 list.offset = create.offset;
482 list.size = create.size;
484 return udmabuf_create(filp->private_data, &head, &list);
487 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
489 struct udmabuf_create_list head;
490 struct udmabuf_create_item *list;
491 int ret = -EINVAL;
492 u32 lsize;
494 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
495 return -EFAULT;
496 if (head.count > list_limit)
497 return -EINVAL;
498 lsize = sizeof(struct udmabuf_create_item) * head.count;
499 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
500 if (IS_ERR(list))
501 return PTR_ERR(list);
503 ret = udmabuf_create(filp->private_data, &head, list);
504 kfree(list);
505 return ret;
508 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
509 unsigned long arg)
511 long ret;
513 switch (ioctl) {
514 case UDMABUF_CREATE:
515 ret = udmabuf_ioctl_create(filp, arg);
516 break;
517 case UDMABUF_CREATE_LIST:
518 ret = udmabuf_ioctl_create_list(filp, arg);
519 break;
520 default:
521 ret = -ENOTTY;
522 break;
524 return ret;
527 static const struct file_operations udmabuf_fops = {
528 .owner = THIS_MODULE,
529 .unlocked_ioctl = udmabuf_ioctl,
530 #ifdef CONFIG_COMPAT
531 .compat_ioctl = udmabuf_ioctl,
532 #endif
535 static struct miscdevice udmabuf_misc = {
536 .minor = MISC_DYNAMIC_MINOR,
537 .name = "udmabuf",
538 .fops = &udmabuf_fops,
541 static int __init udmabuf_dev_init(void)
543 int ret;
545 ret = misc_register(&udmabuf_misc);
546 if (ret < 0) {
547 pr_err("Could not initialize udmabuf device\n");
548 return ret;
551 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
552 DMA_BIT_MASK(64));
553 if (ret < 0) {
554 pr_err("Could not setup DMA mask for udmabuf device\n");
555 misc_deregister(&udmabuf_misc);
556 return ret;
559 return 0;
562 static void __exit udmabuf_dev_exit(void)
564 misc_deregister(&udmabuf_misc);
567 module_init(udmabuf_dev_init)
568 module_exit(udmabuf_dev_exit)
570 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");