1 /*P:200 This contains all the /dev/lguest code, whereby the userspace
2 * launcher controls and communicates with the Guest. For example,
3 * the first write will tell us the Guest's memory layout and entry
4 * point. A read will run the Guest until something happens, such as
5 * a signal or the Guest doing a NOTIFY out to the Launcher. There is
6 * also a way for the Launcher to attach eventfds to particular NOTIFY
7 * values instead of returning from the read() call.
9 #include <linux/uaccess.h>
10 #include <linux/miscdevice.h>
12 #include <linux/sched.h>
13 #include <linux/eventfd.h>
14 #include <linux/file.h>
15 #include <linux/slab.h>
16 #include <linux/export.h>
20 * Before we move on, let's jump ahead and look at what the kernel does when
21 * it needs to look up the eventfds. That will complete our picture of how we
24 * The notification value is in cpu->pending_notify: we return true if it went
27 bool send_notify_to_eventfd(struct lg_cpu
*cpu
)
30 struct lg_eventfd_map
*map
;
33 * This "rcu_read_lock()" helps track when someone is still looking at
34 * the (RCU-using) eventfds array. It's not actually a lock at all;
35 * indeed it's a noop in many configurations. (You didn't expect me to
36 * explain all the RCU secrets here, did you?)
40 * rcu_dereference is the counter-side of rcu_assign_pointer(); it
41 * makes sure we don't access the memory pointed to by
42 * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy,
43 * but Alpha allows this! Paul McKenney points out that a really
44 * aggressive compiler could have the same effect:
45 * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
47 * So play safe, use rcu_dereference to get the rcu-protected pointer:
49 map
= rcu_dereference(cpu
->lg
->eventfds
);
51 * Simple array search: even if they add an eventfd while we do this,
52 * we'll continue to use the old array and just won't see the new one.
54 for (i
= 0; i
< map
->num
; i
++) {
55 if (map
->map
[i
].addr
== cpu
->pending_notify
) {
56 eventfd_signal(map
->map
[i
].event
, 1);
57 cpu
->pending_notify
= 0;
61 /* We're done with the rcu-protected variable cpu->lg->eventfds. */
64 /* If we cleared the notification, it's because we found a match. */
65 return cpu
->pending_notify
== 0;
69 * One of the more tricksy tricks in the Linux Kernel is a technique called
70 * Read Copy Update. Since one point of lguest is to teach lguest journeyers
71 * about kernel coding, I use it here. (In case you're curious, other purposes
72 * include learning about virtualization and instilling a deep appreciation for
73 * simplicity and puppies).
75 * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
76 * add new eventfds without ever blocking readers from accessing the array.
77 * The current Launcher only does this during boot, so that never happens. But
78 * Read Copy Update is cool, and adding a lock risks damaging even more puppies
79 * than this code does.
81 * We allocate a brand new one-larger array, copy the old one and add our new
82 * element. Then we make the lg eventfd pointer point to the new array.
83 * That's the easy part: now we need to free the old one, but we need to make
84 * sure no slow CPU somewhere is still looking at it. That's what
85 * synchronize_rcu does for us: waits until every CPU has indicated that it has
86 * moved on to know it's no longer using the old one.
88 * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
90 static int add_eventfd(struct lguest
*lg
, unsigned long addr
, int fd
)
92 struct lg_eventfd_map
*new, *old
= lg
->eventfds
;
95 * We don't allow notifications on value 0 anyway (pending_notify of
96 * 0 means "nothing pending").
102 * Replace the old array with the new one, carefully: others can
103 * be accessing it at the same time.
105 new = kmalloc(sizeof(*new) + sizeof(new->map
[0]) * (old
->num
+ 1),
110 /* First make identical copy. */
111 memcpy(new->map
, old
->map
, sizeof(old
->map
[0]) * old
->num
);
114 /* Now append new entry. */
115 new->map
[new->num
].addr
= addr
;
116 new->map
[new->num
].event
= eventfd_ctx_fdget(fd
);
117 if (IS_ERR(new->map
[new->num
].event
)) {
118 int err
= PTR_ERR(new->map
[new->num
].event
);
125 * Now put new one in place: rcu_assign_pointer() is a fancy way of
126 * doing "lg->eventfds = new", but it uses memory barriers to make
127 * absolutely sure that the contents of "new" written above is nailed
128 * down before we actually do the assignment.
130 * We have to think about these kinds of things when we're operating on
131 * live data without locks.
133 rcu_assign_pointer(lg
->eventfds
, new);
136 * We're not in a big hurry. Wait until no one's looking at old
137 * version, then free it.
146 * Receiving notifications from the Guest is usually done by attaching a
147 * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will
148 * become readable when the Guest does an LHCALL_NOTIFY with that value.
150 * This is really convenient for processing each virtqueue in a separate
153 static int attach_eventfd(struct lguest
*lg
, const unsigned long __user
*input
)
155 unsigned long addr
, fd
;
158 if (get_user(addr
, input
) != 0)
161 if (get_user(fd
, input
) != 0)
165 * Just make sure two callers don't add eventfds at once. We really
166 * only need to lock against callers adding to the same Guest, so using
167 * the Big Lguest Lock is overkill. But this is setup, not a fast path.
169 mutex_lock(&lguest_lock
);
170 err
= add_eventfd(lg
, addr
, fd
);
171 mutex_unlock(&lguest_lock
);
177 * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
178 * number to /dev/lguest.
180 static int user_send_irq(struct lg_cpu
*cpu
, const unsigned long __user
*input
)
184 if (get_user(irq
, input
) != 0)
186 if (irq
>= LGUEST_IRQS
)
190 * Next time the Guest runs, the core code will see if it can deliver
193 set_interrupt(cpu
, irq
);
198 * Once our Guest is initialized, the Launcher makes it run by reading
201 static ssize_t
read(struct file
*file
, char __user
*user
, size_t size
,loff_t
*o
)
203 struct lguest
*lg
= file
->private_data
;
205 unsigned int cpu_id
= *o
;
207 /* You must write LHREQ_INITIALIZE first! */
211 /* Watch out for arbitrary vcpu indexes! */
212 if (cpu_id
>= lg
->nr_cpus
)
215 cpu
= &lg
->cpus
[cpu_id
];
217 /* If you're not the task which owns the Guest, go away. */
218 if (current
!= cpu
->tsk
)
221 /* If the Guest is already dead, we indicate why */
225 /* lg->dead either contains an error code, or a string. */
226 if (IS_ERR(lg
->dead
))
227 return PTR_ERR(lg
->dead
);
229 /* We can only return as much as the buffer they read with. */
230 len
= min(size
, strlen(lg
->dead
)+1);
231 if (copy_to_user(user
, lg
->dead
, len
) != 0)
237 * If we returned from read() last time because the Guest sent I/O,
240 if (cpu
->pending_notify
)
241 cpu
->pending_notify
= 0;
243 /* Run the Guest until something interesting happens. */
244 return run_guest(cpu
, (unsigned long __user
*)user
);
248 * This actually initializes a CPU. For the moment, a Guest is only
249 * uniprocessor, so "id" is always 0.
251 static int lg_cpu_start(struct lg_cpu
*cpu
, unsigned id
, unsigned long start_ip
)
253 /* We have a limited number the number of CPUs in the lguest struct. */
254 if (id
>= ARRAY_SIZE(cpu
->lg
->cpus
))
257 /* Set up this CPU's id, and pointer back to the lguest struct. */
259 cpu
->lg
= container_of((cpu
- id
), struct lguest
, cpus
[0]);
262 /* Each CPU has a timer it can set. */
266 * We need a complete page for the Guest registers: they are accessible
267 * to the Guest and we can only grant it access to whole pages.
269 cpu
->regs_page
= get_zeroed_page(GFP_KERNEL
);
273 /* We actually put the registers at the bottom of the page. */
274 cpu
->regs
= (void *)cpu
->regs_page
+ PAGE_SIZE
- sizeof(*cpu
->regs
);
277 * Now we initialize the Guest's registers, handing it the start
280 lguest_arch_setup_regs(cpu
, start_ip
);
283 * We keep a pointer to the Launcher task (ie. current task) for when
284 * other Guests want to wake this one (eg. console input).
289 * We need to keep a pointer to the Launcher's memory map, because if
290 * the Launcher dies we need to clean it up. If we don't keep a
291 * reference, it is destroyed before close() is called.
293 cpu
->mm
= get_task_mm(cpu
->tsk
);
296 * We remember which CPU's pages this Guest used last, for optimization
297 * when the same Guest runs on the same CPU twice.
299 cpu
->last_pages
= NULL
;
301 /* No error == success. */
306 * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in
307 * addition to the LHREQ_INITIALIZE value). These are:
309 * base: The start of the Guest-physical memory inside the Launcher memory.
311 * pfnlimit: The highest (Guest-physical) page number the Guest should be
312 * allowed to access. The Guest memory lives inside the Launcher, so it sets
313 * this to ensure the Guest can only reach its own memory.
315 * start: The first instruction to execute ("eip" in x86-speak).
317 static int initialize(struct file
*file
, const unsigned long __user
*input
)
319 /* "struct lguest" contains all we (the Host) know about a Guest. */
322 unsigned long args
[3];
325 * We grab the Big Lguest lock, which protects against multiple
326 * simultaneous initializations.
328 mutex_lock(&lguest_lock
);
329 /* You can't initialize twice! Close the device and start again... */
330 if (file
->private_data
) {
335 if (copy_from_user(args
, input
, sizeof(args
)) != 0) {
340 lg
= kzalloc(sizeof(*lg
), GFP_KERNEL
);
346 lg
->eventfds
= kmalloc(sizeof(*lg
->eventfds
), GFP_KERNEL
);
351 lg
->eventfds
->num
= 0;
353 /* Populate the easy fields of our "struct lguest" */
354 lg
->mem_base
= (void __user
*)args
[0];
355 lg
->pfn_limit
= args
[1];
357 /* This is the first cpu (cpu 0) and it will start booting at args[2] */
358 err
= lg_cpu_start(&lg
->cpus
[0], 0, args
[2]);
363 * Initialize the Guest's shadow page tables. This allocates
364 * memory, so can fail.
366 err
= init_guest_pagetable(lg
);
370 /* We keep our "struct lguest" in the file's private_data. */
371 file
->private_data
= lg
;
373 mutex_unlock(&lguest_lock
);
375 /* And because this is a write() call, we return the length used. */
379 /* FIXME: This should be in free_vcpu */
380 free_page(lg
->cpus
[0].regs_page
);
386 mutex_unlock(&lguest_lock
);
391 * The first operation the Launcher does must be a write. All writes
392 * start with an unsigned long number: for the first write this must be
393 * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
394 * writes of other values to send interrupts or set up receipt of notifications.
396 * Note that we overload the "offset" in the /dev/lguest file to indicate what
397 * CPU number we're dealing with. Currently this is always 0 since we only
398 * support uniprocessor Guests, but you can see the beginnings of SMP support
401 static ssize_t
write(struct file
*file
, const char __user
*in
,
402 size_t size
, loff_t
*off
)
405 * Once the Guest is initialized, we hold the "struct lguest" in the
408 struct lguest
*lg
= file
->private_data
;
409 const unsigned long __user
*input
= (const unsigned long __user
*)in
;
411 struct lg_cpu
*uninitialized_var(cpu
);
412 unsigned int cpu_id
= *off
;
414 /* The first value tells us what this request is. */
415 if (get_user(req
, input
) != 0)
419 /* If you haven't initialized, you must do that first. */
420 if (req
!= LHREQ_INITIALIZE
) {
421 if (!lg
|| (cpu_id
>= lg
->nr_cpus
))
423 cpu
= &lg
->cpus
[cpu_id
];
425 /* Once the Guest is dead, you can only read() why it died. */
431 case LHREQ_INITIALIZE
:
432 return initialize(file
, input
);
434 return user_send_irq(cpu
, input
);
436 return attach_eventfd(lg
, input
);
443 * The final piece of interface code is the close() routine. It reverses
444 * everything done in initialize(). This is usually called because the
447 * Note that the close routine returns 0 or a negative error number: it can't
448 * really fail, but it can whine. I blame Sun for this wart, and K&R C for
449 * letting them do it.
451 static int close(struct inode
*inode
, struct file
*file
)
453 struct lguest
*lg
= file
->private_data
;
456 /* If we never successfully initialized, there's nothing to clean up */
461 * We need the big lock, to protect from inter-guest I/O and other
462 * Launchers initializing guests.
464 mutex_lock(&lguest_lock
);
466 /* Free up the shadow page tables for the Guest. */
467 free_guest_pagetable(lg
);
469 for (i
= 0; i
< lg
->nr_cpus
; i
++) {
470 /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
471 hrtimer_cancel(&lg
->cpus
[i
].hrt
);
472 /* We can free up the register page we allocated. */
473 free_page(lg
->cpus
[i
].regs_page
);
475 * Now all the memory cleanups are done, it's safe to release
476 * the Launcher's memory management structure.
478 mmput(lg
->cpus
[i
].mm
);
481 /* Release any eventfds they registered. */
482 for (i
= 0; i
< lg
->eventfds
->num
; i
++)
483 eventfd_ctx_put(lg
->eventfds
->map
[i
].event
);
487 * If lg->dead doesn't contain an error code it will be NULL or a
488 * kmalloc()ed string, either of which is ok to hand to kfree().
490 if (!IS_ERR(lg
->dead
))
492 /* Free the memory allocated to the lguest_struct */
494 /* Release lock and exit. */
495 mutex_unlock(&lguest_lock
);
501 * Welcome to our journey through the Launcher!
503 * The Launcher is the Host userspace program which sets up, runs and services
504 * the Guest. In fact, many comments in the Drivers which refer to "the Host"
505 * doing things are inaccurate: the Launcher does all the device handling for
506 * the Guest, but the Guest can't know that.
508 * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
509 * shall see more of that later.
511 * We begin our understanding with the Host kernel interface which the Launcher
512 * uses: reading and writing a character device called /dev/lguest. All the
513 * work happens in the read(), write() and close() routines:
515 static const struct file_operations lguest_fops
= {
516 .owner
= THIS_MODULE
,
520 .llseek
= default_llseek
,
525 * This is a textbook example of a "misc" character device. Populate a "struct
526 * miscdevice" and register it with misc_register().
528 static struct miscdevice lguest_dev
= {
529 .minor
= MISC_DYNAMIC_MINOR
,
531 .fops
= &lguest_fops
,
534 int __init
lguest_device_init(void)
536 return misc_register(&lguest_dev
);
539 void __exit
lguest_device_remove(void)
541 misc_deregister(&lguest_dev
);