2 * Linux kernel wrapper for KQEMU
4 * Copyright (C) 2004-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
24 #include <linux/proc_fs.h>
25 #include <linux/version.h>
26 #include <linux/ioctl.h>
27 #include <linux/smp_lock.h>
28 #include <linux/miscdevice.h>
29 #include <asm/atomic.h>
30 #include <asm/processor.h>
31 #include <asm/uaccess.h>
34 #include "kqemu-kernel.h"
36 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
37 #error "Linux 2.4.19 or above needed"
40 /* The pfn_to_page() API appeared in 2.5.14 and changed to function during 2.6.x */
41 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pfn_to_page)
42 #define page_to_pfn(page) ((page) - mem_map)
43 #define pfn_to_page(pfn) (mem_map + (pfn))
46 #ifdef PAGE_KERNEL_EXEC
48 /* problem : i386 kernels usually don't export __PAGE_KERNEL_EXEC */
49 #undef PAGE_KERNEL_EXEC
50 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX)
53 #define PAGE_KERNEL_EXEC PAGE_KERNEL
63 /* if 0 is used, then devfs/udev is used to automatically create the
66 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
67 module_param(major
, int, 0);
69 MODULE_PARM(major
,"i");
72 /* Lock the page at virtual address 'user_addr' and return its
73 physical address (page index). Return a host OS private user page
74 identifier or NULL if error */
75 struct kqemu_user_page
*CDECL
kqemu_lock_user_page(unsigned long *ppage_index
,
76 unsigned long user_addr
)
81 ret
= get_user_pages(current
, current
->mm
,
84 1, /* 'write': intent to write. */
90 /* we ensure here that the page cannot be swapped out by the
92 /* XXX: This test may be incorrect for 2.6 kernels */
100 *ppage_index
= page_to_pfn(page
);
101 return (struct kqemu_user_page
*)page
;
104 void CDECL
kqemu_unlock_user_page(struct kqemu_user_page
*page1
)
106 struct page
*page
= (struct page
*)page1
;
107 set_page_dirty(page
);
114 /* Allocate a new page and return its physical address (page
115 index). Return a host OS private page identifier or NULL if
117 struct kqemu_page
*CDECL
kqemu_alloc_zeroed_page(unsigned long *ppage_index
)
122 vaddr
= get_zeroed_page(GFP_KERNEL
);
128 page
= virt_to_page(vaddr
);
129 *ppage_index
= page_to_pfn(page
);
130 return (struct kqemu_page
*)page
;
133 void CDECL
kqemu_free_page(struct kqemu_page
*page1
)
135 struct page
*page
= (struct page
*)page1
;
142 /* Return a host kernel address of the physical page whose private
143 identifier is 'page1' */
144 void * CDECL
kqemu_page_kaddr(struct kqemu_page
*page1
)
146 struct page
*page
= (struct page
*)page1
;
147 return page_address(page
);
150 /* Allocate 'size' bytes of memory in host kernel address space (size
151 is a multiple of 4 KB) and return the address or NULL if error. The
152 allocated memory must be marked as executable by the host kernel
153 and must be page aligned. On i386 with PAE (but not on x86_64), it
154 must be allocated in the first 4 GB of physical memory. */
155 void * CDECL
kqemu_vmalloc(unsigned int size
)
157 return __vmalloc(size
, GFP_KERNEL
, PAGE_KERNEL_EXEC
);
160 void CDECL
kqemu_vfree(void *ptr
)
165 /* Convert a page aligned address inside a memory area allocated by
166 kqemu_vmalloc() to a physical address (page index) */
167 unsigned long CDECL
kqemu_vmalloc_to_phys(const void *vaddr
)
170 page
= vmalloc_to_page((void *)vaddr
);
173 return page_to_pfn(page
);
176 /* Map a IO area in the kernel address space and return its
177 address. Return NULL if error or not implemented. This function is
178 only used if an APIC is detected on the host CPU. */
179 void * CDECL
kqemu_io_map(unsigned long page_index
, unsigned int size
)
181 return ioremap(page_index
<< PAGE_SHIFT
, size
);
184 /* Unmap the IO area */
185 void CDECL
kqemu_io_unmap(void *ptr
, unsigned int size
)
190 /* return TRUE if a signal is pending (i.e. the guest must stop
192 int CDECL
kqemu_schedule(void)
194 if (need_resched()) {
197 return signal_pending(current
);
202 void CDECL
kqemu_log(const char *fmt
, ...)
206 vsnprintf(log_buf
, sizeof(log_buf
), fmt
, ap
);
207 printk("kqemu: %s", log_buf
);
211 /*********************************************************/
213 static struct kqemu_global_state
*kqemu_gs
;
215 struct kqemu_instance
{
216 struct semaphore sem
;
217 struct kqemu_state
*state
;
220 static int kqemu_open(struct inode
*inode
, struct file
*filp
)
222 struct kqemu_instance
*ks
;
224 ks
= kmalloc(sizeof(struct kqemu_instance
), GFP_KERNEL
);
227 init_MUTEX(&ks
->sem
);
229 filp
->private_data
= ks
;
233 static int kqemu_release(struct inode
*inode
, struct file
*filp
)
235 struct kqemu_instance
*ks
= filp
->private_data
;
239 kqemu_delete(ks
->state
);
247 printk("lock_count=%d page_alloc_count=%d\n",
248 lock_count
, page_alloc_count
);
253 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
254 static long kqemu_ioctl(struct file
*filp
,
255 unsigned int cmd
, unsigned long arg
)
257 static int kqemu_ioctl(struct inode
*inode
, struct file
*filp
,
258 unsigned int cmd
, unsigned long arg
)
261 struct kqemu_instance
*ks
= filp
->private_data
;
262 struct kqemu_state
*s
= ks
->state
;
269 struct kqemu_init d1
, *d
= &d1
;
274 if (copy_from_user(d
, (void *)arg
, sizeof(*d
))) {
278 s
= kqemu_init(d
, kqemu_gs
);
287 case KQEMU_SET_PHYS_MEM
:
289 struct kqemu_phys_mem kphys_mem
;
295 if (copy_from_user(&kphys_mem
, (void *)arg
, sizeof(kphys_mem
))) {
299 ret
= kqemu_set_phys_mem(s
, &kphys_mem
);
307 struct kqemu_cpu_state
*ctx
;
313 ctx
= kqemu_get_cpu_state(s
);
314 if (copy_from_user(ctx
, (void *)arg
, sizeof(*ctx
))) {
318 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
322 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
325 if (copy_to_user((void *)arg
, ctx
, sizeof(*ctx
))) {
331 case KQEMU_GET_VERSION
:
333 if (put_user(KQEMU_VERSION
, (int *)arg
) < 0) {
348 static struct file_operations kqemu_fops
= {
350 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
351 compat_ioctl
: kqemu_ioctl
,
352 unlocked_ioctl
: kqemu_ioctl
,
357 release
: kqemu_release
,
360 static struct miscdevice kqemu_dev
=
362 .minor
= MISC_DYNAMIC_MINOR
,
367 int init_module(void)
369 int ret
, max_locked_pages
;
372 printk("QEMU Accelerator Module version %d.%d.%d, Copyright (c) 2005-2008 Fabrice Bellard\n",
373 (KQEMU_VERSION
>> 16),
374 (KQEMU_VERSION
>> 8) & 0xff,
375 (KQEMU_VERSION
) & 0xff);
377 max_locked_pages
= si
.totalram
/ 2;
378 kqemu_gs
= kqemu_global_init(max_locked_pages
);
383 ret
= register_chrdev(major
, "kqemu", &kqemu_fops
);
385 kqemu_global_delete(kqemu_gs
);
386 printk("kqemu: could not get major %d\n", major
);
390 ret
= misc_register (&kqemu_dev
);
392 kqemu_global_delete(kqemu_gs
);
393 printk("kqemu: could not create device\n");
397 printk("KQEMU installed, max_locked_mem=%dkB.\n",
398 max_locked_pages
* 4);
402 void cleanup_module(void)
405 unregister_chrdev(major
, "kqemu");
407 misc_deregister (&kqemu_dev
);
409 kqemu_global_delete(kqemu_gs
);
414 MODULE_LICENSE("GPL");