1 /* $Id: kqemu-freebsd.c,v 1.6 2006/04/25 22:16:42 bellard Exp $ */
7 #include <sys/ioccom.h>
8 #include <sys/malloc.h>
9 #include <sys/module.h>
10 #if __FreeBSD_version >= 500000
11 #include <sys/mutex.h>
14 #include <sys/resourcevar.h>
15 #if __FreeBSD_version >= 500000
16 #include <sys/sched.h>
18 #include <sys/signalvar.h>
19 #include <sys/kernel.h>
20 #include <sys/sysctl.h>
22 #if __FreeBSD_version < 500000
27 #include <vm/vm_param.h>
28 #include <vm/vm_extern.h>
30 #include <vm/vm_map.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_page.h>
34 #include <machine/vmparam.h>
35 #include <machine/stdarg.h>
37 #include "kqemu-kernel.h"
40 #define KQEMU_MAJOR 250
43 MALLOC_DECLARE(M_KQEMU
);
44 MALLOC_DEFINE(M_KQEMU
, "kqemu", "kqemu buffers");
47 SYSCTL_INT(_debug
, OID_AUTO
, kqemu_debug
, CTLFLAG_RW
, &kqemu_debug
, 0,
50 #define USER_BASE 0x1000
52 /* lock the page at virtual address 'user_addr' and return its
53 physical page index. Return NULL if error */
54 struct kqemu_user_page
*CDECL
kqemu_lock_user_page(unsigned long *ppage_index
,
55 unsigned long user_addr
)
57 struct vmspace
*vm
= curproc
->p_vmspace
;
58 vm_offset_t va
= user_addr
;
62 #if __FreeBSD_version >= 500000
63 ret
= vm_map_wire(&vm
->vm_map
, va
, va
+PAGE_SIZE
, VM_MAP_WIRE_USER
);
65 ret
= vm_map_user_pageable(&vm
->vm_map
, va
, va
+PAGE_SIZE
, FALSE
);
67 if (ret
!= KERN_SUCCESS
) {
68 kqemu_log("kqemu_lock_user_page(%08lx) failed, ret=%d\n", user_addr
, ret
);
71 pmap
= vm_map_pmap(&vm
->vm_map
);
72 pa
= pmap_extract(pmap
, va
);
73 /* kqemu_log("kqemu_lock_user_page(%08lx) va=%08x pa=%08x\n", user_addr, va, pa); */
74 *ppage_index
= pa
>> PAGE_SHIFT
;
75 return (struct kqemu_user_page
*)va
;
78 void CDECL
kqemu_unlock_user_page(struct kqemu_user_page
*page
)
80 struct vmspace
*vm
= curproc
->p_vmspace
;
83 /* kqemu_log("kqemu_unlock_user_page(%08lx)\n", page_index); */
84 va
= (vm_offset_t
)page
;
85 #if __FreeBSD_version >= 500000
86 ret
= vm_map_unwire(&vm
->vm_map
, va
, va
+PAGE_SIZE
, VM_MAP_WIRE_USER
);
88 ret
= vm_map_user_pageable(&vm
->vm_map
, va
, va
+PAGE_SIZE
, TRUE
);
91 if (ret
!= KERN_SUCCESS
) {
92 kqemu_log("kqemu_unlock_user_page(%08lx) failed, ret=%d\n", page_index
, ret
);
98 * Allocate a new page. The page must be mapped in the kernel space.
99 * Return the page_index or -1 if error.
101 struct kqemu_page
*CDECL
kqemu_alloc_zeroed_page(unsigned long *ppage_index
)
107 va
= kmem_alloc(kernel_map
, PAGE_SIZE
);
109 kqemu_log("kqemu_alloc_zeroed_page: NULL\n");
112 pmap
= vm_map_pmap(kernel_map
);
113 pa
= pmap_extract(pmap
, va
);
114 /* kqemu_log("kqemu_alloc_zeroed_page: %08x\n", pa); */
115 *ppage_index
= pa
>> PAGE_SHIFT
;
116 return (struct kqemu_page
*)va
;
119 void CDECL
kqemu_free_page(struct kqemu_page
*page
)
122 kqemu_log("kqemu_free_page(%p)\n", page
);
123 kmem_free(kernel_map
, (vm_offset_t
) page
, PAGE_SIZE
);
126 /* return kernel address of the physical page page_index */
127 void * CDECL
kqemu_page_kaddr(struct kqemu_page
*page
)
129 vm_offset_t va
= (vm_offset_t
)page
;
133 /* contraint: each page of the vmalloced area must be in the first 4
134 GB of physical memory */
135 void * CDECL
kqemu_vmalloc(unsigned int size
)
137 void *ptr
= malloc(size
, M_KQEMU
, M_WAITOK
);
139 kqemu_log("kqemu_vmalloc(%d): %p\n", size
, ptr
);
143 void CDECL
kqemu_vfree(void *ptr
)
146 kqemu_log("kqemu_vfree(%p)\n", ptr
);
150 /* return the physical page index for a given virtual page */
151 unsigned long CDECL
kqemu_vmalloc_to_phys(const void *vaddr
)
153 vm_paddr_t pa
= vtophys(vaddr
);
155 kqemu_log("kqemu_vmalloc_to_phys(%p)->error\n", vaddr
);
159 kqemu_log("kqemu_vmalloc_to_phys(%p)->%08x\n", vaddr
, pa
);
160 return pa
>> PAGE_SHIFT
;
163 /* Map a IO area in the kernel address space and return its
164 address. Return NULL if error or not implemented. */
165 void * CDECL
kqemu_io_map(unsigned long page_index
, unsigned int size
)
170 /* Unmap the IO area */
171 void CDECL
kqemu_io_unmap(void *ptr
, unsigned int size
)
175 #if __FreeBSD_version < 500000
177 curpriority_cmp(struct proc
*p
)
179 int c_class
, p_class
;
181 c_class
= RTP_PRIO_BASE(curproc
->p_rtprio
.type
);
182 p_class
= RTP_PRIO_BASE(p
->p_rtprio
.type
);
183 if (p_class
!= c_class
)
184 return (p_class
- c_class
);
185 if (p_class
== RTP_PRIO_NORMAL
)
186 return (((int)p
->p_priority
- (int)curpriority
) / PPQ
);
187 return ((int)p
->p_rtprio
.prio
- (int)curproc
->p_rtprio
.prio
);
190 /* return TRUE if a signal is pending (i.e. the guest must stop
192 int CDECL
kqemu_schedule(void)
194 struct proc
*p
= curproc
;
195 if (curpriority_cmp(p
) > 0) {
197 p
->p_priority
= MAXPRI
;
199 p
->p_stats
->p_ru
.ru_nvcsw
++;
203 return issignal(curproc
) != 0;
206 /* return TRUE if a signal is pending (i.e. the guest must stop
208 int CDECL
kqemu_schedule(void)
210 /* kqemu_log("kqemu_schedule\n"); */
211 mtx_lock_spin(&sched_lock
);
212 mi_switch(SW_VOL
, NULL
);
213 mtx_unlock_spin(&sched_lock
);
214 return SIGPENDING(curthread
);
218 static char log_buf
[4096];
220 void CDECL
kqemu_log(const char *fmt
, ...)
224 vsnprintf(log_buf
, sizeof(log_buf
), fmt
, ap
);
225 printf("kqemu: %s", log_buf
);
229 struct kqemu_instance
{
230 #if __FreeBSD_version >= 500000
231 TAILQ_ENTRY(kqemu_instance
) kqemu_ent
;
232 struct cdev
*kqemu_dev
;
234 /* struct semaphore sem; */
235 struct kqemu_state
*state
;
238 static int kqemu_ref_count
= 0;
239 static struct kqemu_global_state
*kqemu_gs
= NULL
;
241 #if __FreeBSD_version < 500000
242 static dev_t kqemu_dev
;
244 static struct clonedevs
*kqemuclones
;
245 static TAILQ_HEAD(,kqemu_instance
) kqemuhead
= TAILQ_HEAD_INITIALIZER(kqemuhead
);
246 static eventhandler_tag clonetag
;
249 static d_close_t kqemu_close
;
250 static d_open_t kqemu_open
;
251 static d_ioctl_t kqemu_ioctl
;
253 static struct cdevsw kqemu_cdevsw
= {
254 #if __FreeBSD_version < 500000
255 /* open */ kqemu_open
,
256 /* close */ kqemu_close
,
259 /* ioctl */ kqemu_ioctl
,
262 /* strategy */ nostrategy
,
264 /* maj */ KQEMU_MAJOR
,
270 .d_version
= D_VERSION
,
271 .d_flags
= D_NEEDGIANT
,
272 .d_open
= kqemu_open
,
273 .d_ioctl
= kqemu_ioctl
,
274 .d_close
= kqemu_close
,
279 #if __FreeBSD_version >= 500000
281 #if __FreeBSD_version >= 600034
282 kqemu_clone(void *arg
, struct ucred
*cred
, char *name
, int namelen
,
285 kqemu_clone(void *arg
, char *name
, int namelen
, struct cdev
**dev
)
292 if (strcmp(name
, "kqemu") == 0)
294 else if (dev_stdclone(name
, NULL
, "kqemu", &unit
) != 1)
295 return; /* Bad name */
297 r
= clone_create(&kqemuclones
, &kqemu_cdevsw
, &unit
, dev
, 0);
299 *dev
= make_dev(&kqemu_cdevsw
, unit2minor(unit
),
300 UID_ROOT
, GID_WHEEL
, 0660, "kqemu%d", unit
);
303 (*dev
)->si_flags
|= SI_CHEAPCLONE
;
309 static void kqemu_destroy(struct kqemu_instance
*ks
)
311 #if __FreeBSD_version >= 500000
312 struct cdev
*dev
= ks
->kqemu_dev
;
316 kqemu_delete(ks
->state
);
320 #if __FreeBSD_version >= 500000
322 TAILQ_REMOVE(&kqemuhead
, ks
, kqemu_ent
);
331 #if __FreeBSD_version < 500000
332 kqemu_open(dev_t dev
, int flags
, int fmt __unused
, struct proc
*p
)
335 kqemu_open(struct cdev
*dev
, int flags
, int fmt __unused
,
338 struct proc
*p
= td
->td_proc
;
340 struct kqemu_instance
*ks
;
342 #if __FreeBSD_version < 500000
347 if ((flags
& (FREAD
|FWRITE
)) == FREAD
)
350 ks
= malloc(sizeof(struct kqemu_instance
), M_KQEMU
, M_WAITOK
);
352 kqemu_log("malloc failed\n");
355 memset(ks
, 0, sizeof *ks
);
356 #if __FreeBSD_version >= 500000
358 TAILQ_INSERT_TAIL(&kqemuhead
, ks
, kqemu_ent
);
364 kqemu_log("opened by pid=%d\n", p
->p_pid
);
370 #if __FreeBSD_version < 500000
371 kqemu_ioctl(dev_t dev
, u_long cmd
, caddr_t addr
,
372 int flags __unused
, struct proc
*p
)
374 kqemu_ioctl(struct cdev
*dev
, u_long cmd
, caddr_t addr
,
375 int flags __unused
, struct thread
*td
)
380 struct kqemu_instance
*ks
= dev
->si_drv1
;
381 struct kqemu_state
*s
= ks
->state
;
385 struct kqemu_init d1
, *d
= &d1
;
390 d1
= *(struct kqemu_init
*)addr
;
392 kqemu_log("ram_base=%p ram_size=%ld\n", d1
.ram_base
, d1
.ram_size
);
393 s
= kqemu_init(d
, kqemu_gs
);
402 struct kqemu_cpu_state
*ctx
;
407 ctx
= kqemu_get_cpu_state(s
);
408 *ctx
= *(struct kqemu_cpu_state
*)addr
;
409 #if __FreeBSD_version >= 500000
413 #if __FreeBSD_version >= 500000
415 td
->td_retval
[0] = ret
;
417 p
->p_retval
[0] = ret
;
419 *(struct kqemu_cpu_state
*)addr
= *ctx
;
422 case KQEMU_GET_VERSION
:
423 *(int *)addr
= KQEMU_VERSION
;
433 #if __FreeBSD_version < 500000
434 kqemu_close(dev_t dev
, int flags
, int fmt __unused
, struct proc
*p
)
437 kqemu_close(struct cdev
*dev __unused
, int flags
, int fmt __unused
,
440 struct proc
*p
= td
->td_proc
;
442 struct kqemu_instance
*ks
= (struct kqemu_instance
*) dev
->si_drv1
;
447 kqemu_log("closed by pid=%d\n", p
->p_pid
);
453 kqemu_modevent(module_t mod __unused
, int type
, void *data __unused
)
456 int max_locked_pages
;
457 #if __FreeBSD_version < 500000
460 struct kqemu_instance
*ks
;
465 printf("kqemu version 0x%08x\n", KQEMU_VERSION
);
466 max_locked_pages
= physmem
/ 2;
467 kqemu_gs
= kqemu_global_init(max_locked_pages
);
468 #if __FreeBSD_version < 500000
469 if ((rc
= cdevsw_add(&kqemu_cdevsw
))) {
470 kqemu_log("error registering cdevsw, rc=%d\n", rc
);
474 kqemu_dev
= make_dev(&kqemu_cdevsw
, 0,
475 UID_ROOT
, GID_WHEEL
, 0660, "kqemu");
477 clone_setup(&kqemuclones
);
478 clonetag
= EVENTHANDLER_REGISTER(dev_clone
, kqemu_clone
, 0, 1000);
484 kqemu_log("KQEMU installed, max_locked_mem=%dkB.\n",
485 max_locked_pages
* 4);
490 if (kqemu_ref_count
> 0) {
494 #if __FreeBSD_version < 500000
495 destroy_dev(kqemu_dev
);
496 if ((rc
= cdevsw_remove(&kqemu_cdevsw
)))
497 kqemu_log("error unregistering, rc=%d\n", rc
);
499 EVENTHANDLER_DEREGISTER(dev_clone
, clonetag
);
500 while ((ks
= TAILQ_FIRST(&kqemuhead
)) != NULL
) {
503 clone_cleanup(&kqemuclones
);
505 kqemu_global_delete(kqemu_gs
);
517 DEV_MODULE(kqemu
, kqemu_modevent
, NULL
);
518 MODULE_VERSION(kqemu
, 1);