1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/highmem.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/memfd.h>
9 #include <linux/miscdevice.h>
10 #include <linux/module.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/slab.h>
13 #include <linux/udmabuf.h>
15 static const u32 list_limit
= 1024; /* udmabuf_create_list->count limit */
16 static const size_t size_limit_mb
= 64; /* total dmabuf size, in megabytes */
23 static vm_fault_t
udmabuf_vm_fault(struct vm_fault
*vmf
)
25 struct vm_area_struct
*vma
= vmf
->vma
;
26 struct udmabuf
*ubuf
= vma
->vm_private_data
;
28 vmf
->page
= ubuf
->pages
[vmf
->pgoff
];
33 static const struct vm_operations_struct udmabuf_vm_ops
= {
34 .fault
= udmabuf_vm_fault
,
37 static int mmap_udmabuf(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
39 struct udmabuf
*ubuf
= buf
->priv
;
41 if ((vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
)) == 0)
44 vma
->vm_ops
= &udmabuf_vm_ops
;
45 vma
->vm_private_data
= ubuf
;
49 static struct sg_table
*map_udmabuf(struct dma_buf_attachment
*at
,
50 enum dma_data_direction direction
)
52 struct udmabuf
*ubuf
= at
->dmabuf
->priv
;
56 sg
= kzalloc(sizeof(*sg
), GFP_KERNEL
);
58 return ERR_PTR(-ENOMEM
);
59 ret
= sg_alloc_table_from_pages(sg
, ubuf
->pages
, ubuf
->pagecount
,
60 0, ubuf
->pagecount
<< PAGE_SHIFT
,
64 if (!dma_map_sg(at
->dev
, sg
->sgl
, sg
->nents
, direction
)) {
76 static void unmap_udmabuf(struct dma_buf_attachment
*at
,
78 enum dma_data_direction direction
)
80 dma_unmap_sg(at
->dev
, sg
->sgl
, sg
->nents
, direction
);
85 static void release_udmabuf(struct dma_buf
*buf
)
87 struct udmabuf
*ubuf
= buf
->priv
;
90 for (pg
= 0; pg
< ubuf
->pagecount
; pg
++)
91 put_page(ubuf
->pages
[pg
]);
96 static void *kmap_udmabuf(struct dma_buf
*buf
, unsigned long page_num
)
98 struct udmabuf
*ubuf
= buf
->priv
;
99 struct page
*page
= ubuf
->pages
[page_num
];
104 static void kunmap_udmabuf(struct dma_buf
*buf
, unsigned long page_num
,
110 static const struct dma_buf_ops udmabuf_ops
= {
111 .map_dma_buf
= map_udmabuf
,
112 .unmap_dma_buf
= unmap_udmabuf
,
113 .release
= release_udmabuf
,
115 .unmap
= kunmap_udmabuf
,
116 .mmap
= mmap_udmabuf
,
119 #define SEALS_WANTED (F_SEAL_SHRINK)
120 #define SEALS_DENIED (F_SEAL_WRITE)
122 static long udmabuf_create(const struct udmabuf_create_list
*head
,
123 const struct udmabuf_create_item
*list
)
125 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
126 struct file
*memfd
= NULL
;
127 struct udmabuf
*ubuf
;
129 pgoff_t pgoff
, pgcnt
, pgidx
, pgbuf
= 0, pglimit
;
131 int seals
, ret
= -EINVAL
;
134 ubuf
= kzalloc(sizeof(*ubuf
), GFP_KERNEL
);
138 pglimit
= (size_limit_mb
* 1024 * 1024) >> PAGE_SHIFT
;
139 for (i
= 0; i
< head
->count
; i
++) {
140 if (!IS_ALIGNED(list
[i
].offset
, PAGE_SIZE
))
142 if (!IS_ALIGNED(list
[i
].size
, PAGE_SIZE
))
144 ubuf
->pagecount
+= list
[i
].size
>> PAGE_SHIFT
;
145 if (ubuf
->pagecount
> pglimit
)
148 ubuf
->pages
= kmalloc_array(ubuf
->pagecount
, sizeof(*ubuf
->pages
),
156 for (i
= 0; i
< head
->count
; i
++) {
158 memfd
= fget(list
[i
].memfd
);
161 if (!shmem_mapping(file_inode(memfd
)->i_mapping
))
163 seals
= memfd_fcntl(memfd
, F_GET_SEALS
, 0);
164 if (seals
== -EINVAL
)
167 if ((seals
& SEALS_WANTED
) != SEALS_WANTED
||
168 (seals
& SEALS_DENIED
) != 0)
170 pgoff
= list
[i
].offset
>> PAGE_SHIFT
;
171 pgcnt
= list
[i
].size
>> PAGE_SHIFT
;
172 for (pgidx
= 0; pgidx
< pgcnt
; pgidx
++) {
173 page
= shmem_read_mapping_page(
174 file_inode(memfd
)->i_mapping
, pgoff
+ pgidx
);
179 ubuf
->pages
[pgbuf
++] = page
;
185 exp_info
.ops
= &udmabuf_ops
;
186 exp_info
.size
= ubuf
->pagecount
<< PAGE_SHIFT
;
187 exp_info
.priv
= ubuf
;
188 exp_info
.flags
= O_RDWR
;
190 buf
= dma_buf_export(&exp_info
);
197 if (head
->flags
& UDMABUF_FLAGS_CLOEXEC
)
199 return dma_buf_fd(buf
, flags
);
203 put_page(ubuf
->pages
[--pgbuf
]);
211 static long udmabuf_ioctl_create(struct file
*filp
, unsigned long arg
)
213 struct udmabuf_create create
;
214 struct udmabuf_create_list head
;
215 struct udmabuf_create_item list
;
217 if (copy_from_user(&create
, (void __user
*)arg
,
221 head
.flags
= create
.flags
;
223 list
.memfd
= create
.memfd
;
224 list
.offset
= create
.offset
;
225 list
.size
= create
.size
;
227 return udmabuf_create(&head
, &list
);
230 static long udmabuf_ioctl_create_list(struct file
*filp
, unsigned long arg
)
232 struct udmabuf_create_list head
;
233 struct udmabuf_create_item
*list
;
237 if (copy_from_user(&head
, (void __user
*)arg
, sizeof(head
)))
239 if (head
.count
> list_limit
)
241 lsize
= sizeof(struct udmabuf_create_item
) * head
.count
;
242 list
= memdup_user((void __user
*)(arg
+ sizeof(head
)), lsize
);
244 return PTR_ERR(list
);
246 ret
= udmabuf_create(&head
, list
);
251 static long udmabuf_ioctl(struct file
*filp
, unsigned int ioctl
,
258 ret
= udmabuf_ioctl_create(filp
, arg
);
260 case UDMABUF_CREATE_LIST
:
261 ret
= udmabuf_ioctl_create_list(filp
, arg
);
270 static const struct file_operations udmabuf_fops
= {
271 .owner
= THIS_MODULE
,
272 .unlocked_ioctl
= udmabuf_ioctl
,
275 static struct miscdevice udmabuf_misc
= {
276 .minor
= MISC_DYNAMIC_MINOR
,
278 .fops
= &udmabuf_fops
,
281 static int __init
udmabuf_dev_init(void)
283 return misc_register(&udmabuf_misc
);
286 static void __exit
udmabuf_dev_exit(void)
288 misc_deregister(&udmabuf_misc
);
291 module_init(udmabuf_dev_init
)
292 module_exit(udmabuf_dev_exit
)
294 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
295 MODULE_LICENSE("GPL v2");