1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 /******************************************************************************
6 * Mmap of hypercall buffers.
8 * Copyright (c) 2018 Juergen Gross
11 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/list.h>
16 #include <linux/miscdevice.h>
18 #include <linux/slab.h>
22 MODULE_LICENSE("GPL");
24 struct privcmd_buf_private
{
26 struct list_head list
;
29 struct privcmd_buf_vma_private
{
30 struct privcmd_buf_private
*file_priv
;
31 struct list_head list
;
37 static int privcmd_buf_open(struct inode
*ino
, struct file
*file
)
39 struct privcmd_buf_private
*file_priv
;
41 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
45 mutex_init(&file_priv
->lock
);
46 INIT_LIST_HEAD(&file_priv
->list
);
48 file
->private_data
= file_priv
;
53 static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private
*vma_priv
)
57 list_del(&vma_priv
->list
);
59 for (i
= 0; i
< vma_priv
->n_pages
; i
++)
60 __free_page(vma_priv
->pages
[i
]);
65 static int privcmd_buf_release(struct inode
*ino
, struct file
*file
)
67 struct privcmd_buf_private
*file_priv
= file
->private_data
;
68 struct privcmd_buf_vma_private
*vma_priv
;
70 mutex_lock(&file_priv
->lock
);
72 while (!list_empty(&file_priv
->list
)) {
73 vma_priv
= list_first_entry(&file_priv
->list
,
74 struct privcmd_buf_vma_private
,
76 privcmd_buf_vmapriv_free(vma_priv
);
79 mutex_unlock(&file_priv
->lock
);
86 static void privcmd_buf_vma_open(struct vm_area_struct
*vma
)
88 struct privcmd_buf_vma_private
*vma_priv
= vma
->vm_private_data
;
93 mutex_lock(&vma_priv
->file_priv
->lock
);
95 mutex_unlock(&vma_priv
->file_priv
->lock
);
98 static void privcmd_buf_vma_close(struct vm_area_struct
*vma
)
100 struct privcmd_buf_vma_private
*vma_priv
= vma
->vm_private_data
;
101 struct privcmd_buf_private
*file_priv
;
106 file_priv
= vma_priv
->file_priv
;
108 mutex_lock(&file_priv
->lock
);
111 if (!vma_priv
->users
)
112 privcmd_buf_vmapriv_free(vma_priv
);
114 mutex_unlock(&file_priv
->lock
);
117 static vm_fault_t
privcmd_buf_vma_fault(struct vm_fault
*vmf
)
119 pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
120 vmf
->vma
, vmf
->vma
->vm_start
, vmf
->vma
->vm_end
,
121 vmf
->pgoff
, (void *)vmf
->address
);
123 return VM_FAULT_SIGBUS
;
126 static const struct vm_operations_struct privcmd_buf_vm_ops
= {
127 .open
= privcmd_buf_vma_open
,
128 .close
= privcmd_buf_vma_close
,
129 .fault
= privcmd_buf_vma_fault
,
132 static int privcmd_buf_mmap(struct file
*file
, struct vm_area_struct
*vma
)
134 struct privcmd_buf_private
*file_priv
= file
->private_data
;
135 struct privcmd_buf_vma_private
*vma_priv
;
136 unsigned long count
= vma_pages(vma
);
140 if (!(vma
->vm_flags
& VM_SHARED
))
143 vma_priv
= kzalloc(sizeof(*vma_priv
) + count
* sizeof(void *),
148 for (i
= 0; i
< count
; i
++) {
149 vma_priv
->pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_ZERO
);
150 if (!vma_priv
->pages
[i
])
155 mutex_lock(&file_priv
->lock
);
157 vma_priv
->file_priv
= file_priv
;
160 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
;
161 vma
->vm_ops
= &privcmd_buf_vm_ops
;
162 vma
->vm_private_data
= vma_priv
;
164 list_add(&vma_priv
->list
, &file_priv
->list
);
166 if (vma_priv
->n_pages
!= count
)
169 for (i
= 0; i
< vma_priv
->n_pages
; i
++) {
170 ret
= vm_insert_page(vma
, vma
->vm_start
+ i
* PAGE_SIZE
,
177 privcmd_buf_vmapriv_free(vma_priv
);
179 mutex_unlock(&file_priv
->lock
);
184 const struct file_operations xen_privcmdbuf_fops
= {
185 .owner
= THIS_MODULE
,
186 .open
= privcmd_buf_open
,
187 .release
= privcmd_buf_release
,
188 .mmap
= privcmd_buf_mmap
,
190 EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops
);
192 struct miscdevice xen_privcmdbuf_dev
= {
193 .minor
= MISC_DYNAMIC_MINOR
,
194 .name
= "xen/hypercall",
195 .fops
= &xen_privcmdbuf_fops
,