bluetooth: hci_core: defer hci_unregister_sysfs()
[pv_ops_mirror.git] / arch / x86 / lguest / boot.c
blob5afdde4895dcefe823e0e500df57d977a7ffa350
1 /*P:010
2 * A hypervisor allows multiple Operating Systems to run on a single machine.
3 * To quote David Wheeler: "Any problem in computer science can be solved with
4 * another layer of indirection."
6 * We keep things simple in two ways. First, we start with a normal Linux
7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the
11 * Launcher.
13 * Secondly, we only run specially modified Guests, not normal kernels. When
14 * you set CONFIG_LGUEST to 'y' or 'm', this automatically sets
15 * CONFIG_LGUEST_GUEST=y, which compiles this file into the kernel so it knows
16 * how to be a Guest. This means that you can use the same kernel you boot
17 * normally (ie. as a Host) as a Guest.
19 * These Guests know that they cannot do privileged operations, such as disable
20 * interrupts, and that they have to ask the Host to do such things explicitly.
21 * This file consists of all the replacements for such low-level native
22 * hardware operations: these special Guest versions call the Host.
24 * So how does the kernel know it's a Guest? The Guest starts at a special
25 * entry point marked with a magic string, which sets up a few things then
26 * calls here. We replace the native functions various "paravirt" structures
27 * with our Guest versions, then boot like normal. :*/
30 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License as published by
34 * the Free Software Foundation; either version 2 of the License, or
35 * (at your option) any later version.
37 * This program is distributed in the hope that it will be useful, but
38 * WITHOUT ANY WARRANTY; without even the implied warranty of
39 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
40 * NON INFRINGEMENT. See the GNU General Public License for more
41 * details.
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
47 #include <linux/kernel.h>
48 #include <linux/start_kernel.h>
49 #include <linux/string.h>
50 #include <linux/console.h>
51 #include <linux/screen_info.h>
52 #include <linux/irq.h>
53 #include <linux/interrupt.h>
54 #include <linux/clocksource.h>
55 #include <linux/clockchips.h>
56 #include <linux/lguest.h>
57 #include <linux/lguest_launcher.h>
58 #include <linux/virtio_console.h>
59 #include <linux/pm.h>
60 #include <asm/paravirt.h>
61 #include <asm/param.h>
62 #include <asm/page.h>
63 #include <asm/pgtable.h>
64 #include <asm/desc.h>
65 #include <asm/setup.h>
66 #include <asm/e820.h>
67 #include <asm/mce.h>
68 #include <asm/io.h>
69 #include <asm/i387.h>
70 #include <asm/reboot.h> /* for struct machine_ops */
72 /*G:010 Welcome to the Guest!
74 * The Guest in our tale is a simple creature: identical to the Host but
75 * behaving in simplified but equivalent ways. In particular, the Guest is the
76 * same kernel as the Host (or at least, built from the same source code). :*/
78 /* Declarations for definitions in lguest_guest.S */
79 extern char lguest_noirq_start[], lguest_noirq_end[];
80 extern const char lgstart_cli[], lgend_cli[];
81 extern const char lgstart_sti[], lgend_sti[];
82 extern const char lgstart_popf[], lgend_popf[];
83 extern const char lgstart_pushf[], lgend_pushf[];
84 extern const char lgstart_iret[], lgend_iret[];
85 extern void lguest_iret(void);
87 struct lguest_data lguest_data = {
88 .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
89 .noirq_start = (u32)lguest_noirq_start,
90 .noirq_end = (u32)lguest_noirq_end,
91 .kernel_address = PAGE_OFFSET,
92 .blocked_interrupts = { 1 }, /* Block timer interrupts */
93 .syscall_vec = SYSCALL_VECTOR,
95 static cycle_t clock_base;
97 /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
98 * ring buffer of stored hypercalls which the Host will run though next time we
99 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
100 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
101 * and 255 once the Host has finished with it.
103 * If we come around to a slot which hasn't been finished, then the table is
104 * full and we just make the hypercall directly. This has the nice side
105 * effect of causing the Host to run all the stored calls in the ring buffer
106 * which empties it for next time! */
107 static void async_hcall(unsigned long call, unsigned long arg1,
108 unsigned long arg2, unsigned long arg3)
110 /* Note: This code assumes we're uniprocessor. */
111 static unsigned int next_call;
112 unsigned long flags;
114 /* Disable interrupts if not already disabled: we don't want an
115 * interrupt handler making a hypercall while we're already doing
116 * one! */
117 local_irq_save(flags);
118 if (lguest_data.hcall_status[next_call] != 0xFF) {
119 /* Table full, so do normal hcall which will flush table. */
120 hcall(call, arg1, arg2, arg3);
121 } else {
122 lguest_data.hcalls[next_call].arg0 = call;
123 lguest_data.hcalls[next_call].arg1 = arg1;
124 lguest_data.hcalls[next_call].arg2 = arg2;
125 lguest_data.hcalls[next_call].arg3 = arg3;
126 /* Arguments must all be written before we mark it to go */
127 wmb();
128 lguest_data.hcall_status[next_call] = 0;
129 if (++next_call == LHCALL_RING_SIZE)
130 next_call = 0;
132 local_irq_restore(flags);
135 /*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
136 * real optimization trick!
138 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
139 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
140 * are reasonably expensive, batching them up makes sense. For example, a
141 * large munmap might update dozens of page table entries: that code calls
142 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
143 * lguest_leave_lazy_mode().
145 * So, when we're in lazy mode, we call async_hcall() to store the call for
146 * future processing. */
147 static void lazy_hcall(unsigned long call,
148 unsigned long arg1,
149 unsigned long arg2,
150 unsigned long arg3)
152 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
153 hcall(call, arg1, arg2, arg3);
154 else
155 async_hcall(call, arg1, arg2, arg3);
158 /* When lazy mode is turned off reset the per-cpu lazy mode variable and then
159 * issue a hypercall to flush any stored calls. */
160 static void lguest_leave_lazy_mode(void)
162 paravirt_leave_lazy(paravirt_get_lazy_mode());
163 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
166 /*G:033
167 * After that diversion we return to our first native-instruction
168 * replacements: four functions for interrupt control.
170 * The simplest way of implementing these would be to have "turn interrupts
171 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
172 * these are by far the most commonly called functions of those we override.
174 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
175 * which the Guest can update with a single instruction. The Host knows to
176 * check there when it wants to deliver an interrupt.
179 /* save_flags() is expected to return the processor state (ie. "flags"). The
180 * flags word contains all kind of stuff, but in practice Linux only cares
181 * about the interrupt flag. Our "save_flags()" just returns that. */
182 static unsigned long save_fl(void)
184 return lguest_data.irq_enabled;
187 /* restore_flags() just sets the flags back to the value given. */
188 static void restore_fl(unsigned long flags)
190 lguest_data.irq_enabled = flags;
193 /* Interrupts go off... */
194 static void irq_disable(void)
196 lguest_data.irq_enabled = 0;
199 /* Interrupts go on... */
200 static void irq_enable(void)
202 lguest_data.irq_enabled = X86_EFLAGS_IF;
204 /*:*/
205 /*M:003 Note that we don't check for outstanding interrupts when we re-enable
206 * them (or when we unmask an interrupt). This seems to work for the moment,
207 * since interrupts are rare and we'll just get the interrupt on the next timer
208 * tick, but when we turn on CONFIG_NO_HZ, we should revisit this. One way
209 * would be to put the "irq_enabled" field in a page by itself, and have the
210 * Host write-protect it when an interrupt comes in when irqs are disabled.
211 * There will then be a page fault as soon as interrupts are re-enabled. :*/
213 /*G:034
214 * The Interrupt Descriptor Table (IDT).
216 * The IDT tells the processor what to do when an interrupt comes in. Each
217 * entry in the table is a 64-bit descriptor: this holds the privilege level,
218 * address of the handler, and... well, who cares? The Guest just asks the
219 * Host to make the change anyway, because the Host controls the real IDT.
221 static void lguest_write_idt_entry(gate_desc *dt,
222 int entrynum, const gate_desc *g)
224 u32 *desc = (u32 *)g;
225 /* Keep the local copy up to date. */
226 native_write_idt_entry(dt, entrynum, g);
227 /* Tell Host about this new entry. */
228 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
231 /* Changing to a different IDT is very rare: we keep the IDT up-to-date every
232 * time it is written, so we can simply loop through all entries and tell the
233 * Host about them. */
234 static void lguest_load_idt(const struct desc_ptr *desc)
236 unsigned int i;
237 struct desc_struct *idt = (void *)desc->address;
239 for (i = 0; i < (desc->size+1)/8; i++)
240 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
244 * The Global Descriptor Table.
246 * The Intel architecture defines another table, called the Global Descriptor
247 * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
248 * instruction, and then several other instructions refer to entries in the
249 * table. There are three entries which the Switcher needs, so the Host simply
250 * controls the entire thing and the Guest asks it to make changes using the
251 * LOAD_GDT hypercall.
253 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
254 * hypercall and use that repeatedly to load a new IDT. I don't think it
255 * really matters, but wouldn't it be nice if they were the same?
257 static void lguest_load_gdt(const struct desc_ptr *desc)
259 BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
260 hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
263 /* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
264 * then tell the Host to reload the entire thing. This operation is so rare
265 * that this naive implementation is reasonable. */
266 static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
267 const void *desc, int type)
269 native_write_gdt_entry(dt, entrynum, desc, type);
270 hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
273 /* OK, I lied. There are three "thread local storage" GDT entries which change
274 * on every context switch (these three entries are how glibc implements
275 * __thread variables). So we have a hypercall specifically for this case. */
276 static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
278 /* There's one problem which normal hardware doesn't have: the Host
279 * can't handle us removing entries we're currently using. So we clear
280 * the GS register here: if it's needed it'll be reloaded anyway. */
281 loadsegment(gs, 0);
282 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
285 /*G:038 That's enough excitement for now, back to ploughing through each of
286 * the different pv_ops structures (we're about 1/3 of the way through).
288 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
289 * uses this for some strange applications like Wine. We don't do anything
290 * here, so they'll get an informative and friendly Segmentation Fault. */
291 static void lguest_set_ldt(const void *addr, unsigned entries)
295 /* This loads a GDT entry into the "Task Register": that entry points to a
296 * structure called the Task State Segment. Some comments scattered though the
297 * kernel code indicate that this used for task switching in ages past, along
298 * with blood sacrifice and astrology.
300 * Now there's nothing interesting in here that we don't get told elsewhere.
301 * But the native version uses the "ltr" instruction, which makes the Host
302 * complain to the Guest about a Segmentation Fault and it'll oops. So we
303 * override the native version with a do-nothing version. */
304 static void lguest_load_tr_desc(void)
308 /* The "cpuid" instruction is a way of querying both the CPU identity
309 * (manufacturer, model, etc) and its features. It was introduced before the
310 * Pentium in 1993 and keeps getting extended by both Intel and AMD. As you
311 * might imagine, after a decade and a half this treatment, it is now a giant
312 * ball of hair. Its entry in the current Intel manual runs to 28 pages.
314 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
315 * has been translated into 4 languages. I am not making this up!
317 * We could get funky here and identify ourselves as "GenuineLguest", but
318 * instead we just use the real "cpuid" instruction. Then I pretty much turned
319 * off feature bits until the Guest booted. (Don't say that: you'll damage
320 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
321 * hardly future proof.) Noone's listening! They don't like you anyway,
322 * parenthetic weirdo!
324 * Replacing the cpuid so we can turn features off is great for the kernel, but
325 * anyone (including userspace) can just use the raw "cpuid" instruction and
326 * the Host won't even notice since it isn't privileged. So we try not to get
327 * too worked up about it. */
328 static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
329 unsigned int *cx, unsigned int *dx)
331 int function = *ax;
333 native_cpuid(ax, bx, cx, dx);
334 switch (function) {
335 case 1: /* Basic feature request. */
336 /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
337 *cx &= 0x00002201;
338 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
339 *dx &= 0x07808101;
340 /* The Host can do a nice optimization if it knows that the
341 * kernel mappings (addresses above 0xC0000000 or whatever
342 * PAGE_OFFSET is set to) haven't changed. But Linux calls
343 * flush_tlb_user() for both user and kernel mappings unless
344 * the Page Global Enable (PGE) feature bit is set. */
345 *dx |= 0x00002000;
346 break;
347 case 0x80000000:
348 /* Futureproof this a little: if they ask how much extended
349 * processor information there is, limit it to known fields. */
350 if (*ax > 0x80000008)
351 *ax = 0x80000008;
352 break;
356 /* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
357 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
358 * it. The Host needs to know when the Guest wants to change them, so we have
359 * a whole series of functions like read_cr0() and write_cr0().
361 * We start with cr0. cr0 allows you to turn on and off all kinds of basic
362 * features, but Linux only really cares about one: the horrifically-named Task
363 * Switched (TS) bit at bit 3 (ie. 8)
365 * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
366 * the floating point unit is used. Which allows us to restore FPU state
367 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
368 * name like "FPUTRAP bit" be a little less cryptic?
370 * We store cr0 (and cr3) locally, because the Host never changes it. The
371 * Guest sometimes wants to read it and we'd prefer not to bother the Host
372 * unnecessarily. */
373 static unsigned long current_cr0, current_cr3;
374 static void lguest_write_cr0(unsigned long val)
376 lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0);
377 current_cr0 = val;
380 static unsigned long lguest_read_cr0(void)
382 return current_cr0;
385 /* Intel provided a special instruction to clear the TS bit for people too cool
386 * to use write_cr0() to do it. This "clts" instruction is faster, because all
387 * the vowels have been optimized out. */
388 static void lguest_clts(void)
390 lazy_hcall(LHCALL_TS, 0, 0, 0);
391 current_cr0 &= ~X86_CR0_TS;
394 /* cr2 is the virtual address of the last page fault, which the Guest only ever
395 * reads. The Host kindly writes this into our "struct lguest_data", so we
396 * just read it out of there. */
397 static unsigned long lguest_read_cr2(void)
399 return lguest_data.cr2;
402 /* cr3 is the current toplevel pagetable page: the principle is the same as
403 * cr0. Keep a local copy, and tell the Host when it changes. */
404 static void lguest_write_cr3(unsigned long cr3)
406 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
407 current_cr3 = cr3;
410 static unsigned long lguest_read_cr3(void)
412 return current_cr3;
415 /* cr4 is used to enable and disable PGE, but we don't care. */
416 static unsigned long lguest_read_cr4(void)
418 return 0;
421 static void lguest_write_cr4(unsigned long val)
426 * Page Table Handling.
428 * Now would be a good time to take a rest and grab a coffee or similarly
429 * relaxing stimulant. The easy parts are behind us, and the trek gradually
430 * winds uphill from here.
432 * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
433 * maps virtual addresses to physical addresses using "page tables". We could
434 * use one huge index of 1 million entries: each address is 4 bytes, so that's
435 * 1024 pages just to hold the page tables. But since most virtual addresses
436 * are unused, we use a two level index which saves space. The cr3 register
437 * contains the physical address of the top level "page directory" page, which
438 * contains physical addresses of up to 1024 second-level pages. Each of these
439 * second level pages contains up to 1024 physical addresses of actual pages,
440 * or Page Table Entries (PTEs).
442 * Here's a diagram, where arrows indicate physical addresses:
444 * cr3 ---> +---------+
445 * | --------->+---------+
446 * | | | PADDR1 |
447 * Top-level | | PADDR2 |
448 * (PMD) page | | |
449 * | | Lower-level |
450 * | | (PTE) page |
451 * | | | |
452 * .... ....
454 * So to convert a virtual address to a physical address, we look up the top
455 * level, which points us to the second level, which gives us the physical
456 * address of that page. If the top level entry was not present, or the second
457 * level entry was not present, then the virtual address is invalid (we
458 * say "the page was not mapped").
460 * Put another way, a 32-bit virtual address is divided up like so:
462 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
463 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
464 * Index into top Index into second Offset within page
465 * page directory page pagetable page
467 * The kernel spends a lot of time changing both the top-level page directory
468 * and lower-level pagetable pages. The Guest doesn't know physical addresses,
469 * so while it maintains these page tables exactly like normal, it also needs
470 * to keep the Host informed whenever it makes a change: the Host will create
471 * the real page tables based on the Guests'.
474 /* The Guest calls this to set a second-level entry (pte), ie. to map a page
475 * into a process' address space. We set the entry then tell the Host the
476 * toplevel and address this corresponds to. The Guest uses one pagetable per
477 * process, so we need to tell the Host which one we're changing (mm->pgd). */
478 static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
479 pte_t *ptep, pte_t pteval)
481 *ptep = pteval;
482 lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
485 /* The Guest calls this to set a top-level entry. Again, we set the entry then
486 * tell the Host which top-level page we changed, and the index of the entry we
487 * changed. */
488 static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
490 *pmdp = pmdval;
491 lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
492 (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
495 /* There are a couple of legacy places where the kernel sets a PTE, but we
496 * don't know the top level any more. This is useless for us, since we don't
497 * know which pagetable is changing or what address, so we just tell the Host
498 * to forget all of them. Fortunately, this is very rare.
500 * ... except in early boot when the kernel sets up the initial pagetables,
501 * which makes booting astonishingly slow. So we don't even tell the Host
502 * anything changed until we've done the first page table switch. */
503 static void lguest_set_pte(pte_t *ptep, pte_t pteval)
505 *ptep = pteval;
506 /* Don't bother with hypercall before initial setup. */
507 if (current_cr3)
508 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
511 /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
512 * native page table operations. On native hardware you can set a new page
513 * table entry whenever you want, but if you want to remove one you have to do
514 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
516 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
517 * called when a valid entry is written, not when it's removed (ie. marked not
518 * present). Instead, this is where we come when the Guest wants to remove a
519 * page table entry: we tell the Host to set that entry to 0 (ie. the present
520 * bit is zero). */
521 static void lguest_flush_tlb_single(unsigned long addr)
523 /* Simply set it to zero: if it was not, it will fault back in. */
524 lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
527 /* This is what happens after the Guest has removed a large number of entries.
528 * This tells the Host that any of the page table entries for userspace might
529 * have changed, ie. virtual addresses below PAGE_OFFSET. */
530 static void lguest_flush_tlb_user(void)
532 lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
535 /* This is called when the kernel page tables have changed. That's not very
536 * common (unless the Guest is using highmem, which makes the Guest extremely
537 * slow), so it's worth separating this from the user flushing above. */
538 static void lguest_flush_tlb_kernel(void)
540 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
544 * The Unadvanced Programmable Interrupt Controller.
546 * This is an attempt to implement the simplest possible interrupt controller.
547 * I spent some time looking though routines like set_irq_chip_and_handler,
548 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
549 * I *think* this is as simple as it gets.
551 * We can tell the Host what interrupts we want blocked ready for using the
552 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
553 * simple as setting a bit. We don't actually "ack" interrupts as such, we
554 * just mask and unmask them. I wonder if we should be cleverer?
556 static void disable_lguest_irq(unsigned int irq)
558 set_bit(irq, lguest_data.blocked_interrupts);
561 static void enable_lguest_irq(unsigned int irq)
563 clear_bit(irq, lguest_data.blocked_interrupts);
566 /* This structure describes the lguest IRQ controller. */
567 static struct irq_chip lguest_irq_controller = {
568 .name = "lguest",
569 .mask = disable_lguest_irq,
570 .mask_ack = disable_lguest_irq,
571 .unmask = enable_lguest_irq,
574 /* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
575 * interrupt (except 128, which is used for system calls), and then tells the
576 * Linux infrastructure that each interrupt is controlled by our level-based
577 * lguest interrupt controller. */
578 static void __init lguest_init_IRQ(void)
580 unsigned int i;
582 for (i = 0; i < LGUEST_IRQS; i++) {
583 int vector = FIRST_EXTERNAL_VECTOR + i;
584 if (vector != SYSCALL_VECTOR) {
585 set_intr_gate(vector, interrupt[i]);
586 set_irq_chip_and_handler(i, &lguest_irq_controller,
587 handle_level_irq);
590 /* This call is required to set up for 4k stacks, where we have
591 * separate stacks for hard and soft interrupts. */
592 irq_ctx_init(smp_processor_id());
596 * Time.
598 * It would be far better for everyone if the Guest had its own clock, but
599 * until then the Host gives us the time on every interrupt.
601 static unsigned long lguest_get_wallclock(void)
603 return lguest_data.time.tv_sec;
606 static cycle_t lguest_clock_read(void)
608 unsigned long sec, nsec;
610 /* If the Host tells the TSC speed, we can trust that. */
611 if (lguest_data.tsc_khz)
612 return native_read_tsc();
614 /* If we can't use the TSC, we read the time value written by the Host.
615 * Since it's in two parts (seconds and nanoseconds), we risk reading
616 * it just as it's changing from 99 & 0.999999999 to 100 and 0, and
617 * getting 99 and 0. As Linux tends to come apart under the stress of
618 * time travel, we must be careful: */
619 do {
620 /* First we read the seconds part. */
621 sec = lguest_data.time.tv_sec;
622 /* This read memory barrier tells the compiler and the CPU that
623 * this can't be reordered: we have to complete the above
624 * before going on. */
625 rmb();
626 /* Now we read the nanoseconds part. */
627 nsec = lguest_data.time.tv_nsec;
628 /* Make sure we've done that. */
629 rmb();
630 /* Now if the seconds part has changed, try again. */
631 } while (unlikely(lguest_data.time.tv_sec != sec));
633 /* Our non-TSC clock is in real nanoseconds. */
634 return sec*1000000000ULL + nsec;
637 /* This is what we tell the kernel is our clocksource. */
638 static struct clocksource lguest_clock = {
639 .name = "lguest",
640 .rating = 400,
641 .read = lguest_clock_read,
642 .mask = CLOCKSOURCE_MASK(64),
643 .mult = 1 << 22,
644 .shift = 22,
645 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
648 /* The "scheduler clock" is just our real clock, adjusted to start at zero */
649 static unsigned long long lguest_sched_clock(void)
651 return cyc2ns(&lguest_clock, lguest_clock_read() - clock_base);
654 /* We also need a "struct clock_event_device": Linux asks us to set it to go
655 * off some time in the future. Actually, James Morris figured all this out, I
656 * just applied the patch. */
657 static int lguest_clockevent_set_next_event(unsigned long delta,
658 struct clock_event_device *evt)
660 if (delta < LG_CLOCK_MIN_DELTA) {
661 if (printk_ratelimit())
662 printk(KERN_DEBUG "%s: small delta %lu ns\n",
663 __FUNCTION__, delta);
664 return -ETIME;
666 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0);
667 return 0;
670 static void lguest_clockevent_set_mode(enum clock_event_mode mode,
671 struct clock_event_device *evt)
673 switch (mode) {
674 case CLOCK_EVT_MODE_UNUSED:
675 case CLOCK_EVT_MODE_SHUTDOWN:
676 /* A 0 argument shuts the clock down. */
677 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0);
678 break;
679 case CLOCK_EVT_MODE_ONESHOT:
680 /* This is what we expect. */
681 break;
682 case CLOCK_EVT_MODE_PERIODIC:
683 BUG();
684 case CLOCK_EVT_MODE_RESUME:
685 break;
689 /* This describes our primitive timer chip. */
690 static struct clock_event_device lguest_clockevent = {
691 .name = "lguest",
692 .features = CLOCK_EVT_FEAT_ONESHOT,
693 .set_next_event = lguest_clockevent_set_next_event,
694 .set_mode = lguest_clockevent_set_mode,
695 .rating = INT_MAX,
696 .mult = 1,
697 .shift = 0,
698 .min_delta_ns = LG_CLOCK_MIN_DELTA,
699 .max_delta_ns = LG_CLOCK_MAX_DELTA,
702 /* This is the Guest timer interrupt handler (hardware interrupt 0). We just
703 * call the clockevent infrastructure and it does whatever needs doing. */
704 static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
706 unsigned long flags;
708 /* Don't interrupt us while this is running. */
709 local_irq_save(flags);
710 lguest_clockevent.event_handler(&lguest_clockevent);
711 local_irq_restore(flags);
714 /* At some point in the boot process, we get asked to set up our timing
715 * infrastructure. The kernel doesn't expect timer interrupts before this, but
716 * we cleverly initialized the "blocked_interrupts" field of "struct
717 * lguest_data" so that timer interrupts were blocked until now. */
718 static void lguest_time_init(void)
720 /* Set up the timer interrupt (0) to go to our simple timer routine */
721 set_irq_handler(0, lguest_time_irq);
723 /* Our clock structure looks like arch/x86/kernel/tsc_32.c if we can
724 * use the TSC, otherwise it's a dumb nanosecond-resolution clock.
725 * Either way, the "rating" is set so high that it's always chosen over
726 * any other clocksource. */
727 if (lguest_data.tsc_khz)
728 lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
729 lguest_clock.shift);
730 clock_base = lguest_clock_read();
731 clocksource_register(&lguest_clock);
733 /* Now we've set up our clock, we can use it as the scheduler clock */
734 pv_time_ops.sched_clock = lguest_sched_clock;
736 /* We can't set cpumask in the initializer: damn C limitations! Set it
737 * here and register our timer device. */
738 lguest_clockevent.cpumask = cpumask_of_cpu(0);
739 clockevents_register_device(&lguest_clockevent);
741 /* Finally, we unblock the timer interrupt. */
742 enable_lguest_irq(0);
746 * Miscellaneous bits and pieces.
748 * Here is an oddball collection of functions which the Guest needs for things
749 * to work. They're pretty simple.
752 /* The Guest needs to tell the Host what stack it expects traps to use. For
753 * native hardware, this is part of the Task State Segment mentioned above in
754 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
756 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
757 * segment), the privilege level (we're privilege level 1, the Host is 0 and
758 * will not tolerate us trying to use that), the stack pointer, and the number
759 * of pages in the stack. */
760 static void lguest_load_sp0(struct tss_struct *tss,
761 struct thread_struct *thread)
763 lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0,
764 THREAD_SIZE/PAGE_SIZE);
767 /* Let's just say, I wouldn't do debugging under a Guest. */
768 static void lguest_set_debugreg(int regno, unsigned long value)
770 /* FIXME: Implement */
773 /* There are times when the kernel wants to make sure that no memory writes are
774 * caught in the cache (that they've all reached real hardware devices). This
775 * doesn't matter for the Guest which has virtual hardware.
777 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
778 * (clflush) instruction is available and the kernel uses that. Otherwise, it
779 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
780 * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
781 * ignore clflush, but replace wbinvd.
783 static void lguest_wbinvd(void)
787 /* If the Guest expects to have an Advanced Programmable Interrupt Controller,
788 * we play dumb by ignoring writes and returning 0 for reads. So it's no
789 * longer Programmable nor Controlling anything, and I don't think 8 lines of
790 * code qualifies for Advanced. It will also never interrupt anything. It
791 * does, however, allow us to get through the Linux boot code. */
792 #ifdef CONFIG_X86_LOCAL_APIC
793 static void lguest_apic_write(unsigned long reg, u32 v)
797 static u32 lguest_apic_read(unsigned long reg)
799 return 0;
801 #endif
803 /* STOP! Until an interrupt comes in. */
804 static void lguest_safe_halt(void)
806 hcall(LHCALL_HALT, 0, 0, 0);
809 /* Perhaps CRASH isn't the best name for this hypercall, but we use it to get a
810 * message out when we're crashing as well as elegant termination like powering
811 * off.
813 * Note that the Host always prefers that the Guest speak in physical addresses
814 * rather than virtual addresses, so we use __pa() here. */
815 static void lguest_power_off(void)
817 hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0);
821 * Panicing.
823 * Don't. But if you did, this is what happens.
825 static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
827 hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0);
828 /* The hcall won't return, but to keep gcc happy, we're "done". */
829 return NOTIFY_DONE;
832 static struct notifier_block paniced = {
833 .notifier_call = lguest_panic
836 /* Setting up memory is fairly easy. */
837 static __init char *lguest_memory_setup(void)
839 /* We do this here and not earlier because lockcheck barfs if we do it
840 * before start_kernel() */
841 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
843 /* The Linux bootloader header contains an "e820" memory map: the
844 * Launcher populated the first entry with our memory limit. */
845 add_memory_region(boot_params.e820_map[0].addr,
846 boot_params.e820_map[0].size,
847 boot_params.e820_map[0].type);
849 /* This string is for the boot messages. */
850 return "LGUEST";
853 /* We will eventually use the virtio console device to produce console output,
854 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
855 * console output. */
856 static __init int early_put_chars(u32 vtermno, const char *buf, int count)
858 char scratch[17];
859 unsigned int len = count;
861 /* We use a nul-terminated string, so we have to make a copy. Icky,
862 * huh? */
863 if (len > sizeof(scratch) - 1)
864 len = sizeof(scratch) - 1;
865 scratch[len] = '\0';
866 memcpy(scratch, buf, len);
867 hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0);
869 /* This routine returns the number of bytes actually written. */
870 return len;
873 /*G:050
874 * Patching (Powerfully Placating Performance Pedants)
876 * We have already seen that pv_ops structures let us replace simple
877 * native instructions with calls to the appropriate back end all throughout
878 * the kernel. This allows the same kernel to run as a Guest and as a native
879 * kernel, but it's slow because of all the indirect branches.
881 * Remember that David Wheeler quote about "Any problem in computer science can
882 * be solved with another layer of indirection"? The rest of that quote is
883 * "... But that usually will create another problem." This is the first of
884 * those problems.
886 * Our current solution is to allow the paravirt back end to optionally patch
887 * over the indirect calls to replace them with something more efficient. We
888 * patch the four most commonly called functions: disable interrupts, enable
889 * interrupts, restore interrupts and save interrupts. We usually have 6 or 10
890 * bytes to patch into: the Guest versions of these operations are small enough
891 * that we can fit comfortably.
893 * First we need assembly templates of each of the patchable Guest operations,
894 * and these are in lguest_asm.S. */
896 /*G:060 We construct a table from the assembler templates: */
897 static const struct lguest_insns
899 const char *start, *end;
900 } lguest_insns[] = {
901 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
902 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
903 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
904 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
907 /* Now our patch routine is fairly simple (based on the native one in
908 * paravirt.c). If we have a replacement, we copy it in and return how much of
909 * the available space we used. */
910 static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
911 unsigned long addr, unsigned len)
913 unsigned int insn_len;
915 /* Don't do anything special if we don't have a replacement */
916 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
917 return paravirt_patch_default(type, clobber, ibuf, addr, len);
919 insn_len = lguest_insns[type].end - lguest_insns[type].start;
921 /* Similarly if we can't fit replacement (shouldn't happen, but let's
922 * be thorough). */
923 if (len < insn_len)
924 return paravirt_patch_default(type, clobber, ibuf, addr, len);
926 /* Copy in our instructions. */
927 memcpy(ibuf, lguest_insns[type].start, insn_len);
928 return insn_len;
931 static void lguest_restart(char *reason)
933 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0);
936 /*G:030 Once we get to lguest_init(), we know we're a Guest. The pv_ops
937 * structures in the kernel provide points for (almost) every routine we have
938 * to override to avoid privileged instructions. */
939 __init void lguest_init(void)
941 /* We're under lguest, paravirt is enabled, and we're running at
942 * privilege level 1, not 0 as normal. */
943 pv_info.name = "lguest";
944 pv_info.paravirt_enabled = 1;
945 pv_info.kernel_rpl = 1;
947 /* We set up all the lguest overrides for sensitive operations. These
948 * are detailed with the operations themselves. */
950 /* interrupt-related operations */
951 pv_irq_ops.init_IRQ = lguest_init_IRQ;
952 pv_irq_ops.save_fl = save_fl;
953 pv_irq_ops.restore_fl = restore_fl;
954 pv_irq_ops.irq_disable = irq_disable;
955 pv_irq_ops.irq_enable = irq_enable;
956 pv_irq_ops.safe_halt = lguest_safe_halt;
958 /* init-time operations */
959 pv_init_ops.memory_setup = lguest_memory_setup;
960 pv_init_ops.patch = lguest_patch;
962 /* Intercepts of various cpu instructions */
963 pv_cpu_ops.load_gdt = lguest_load_gdt;
964 pv_cpu_ops.cpuid = lguest_cpuid;
965 pv_cpu_ops.load_idt = lguest_load_idt;
966 pv_cpu_ops.iret = lguest_iret;
967 pv_cpu_ops.load_sp0 = lguest_load_sp0;
968 pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
969 pv_cpu_ops.set_ldt = lguest_set_ldt;
970 pv_cpu_ops.load_tls = lguest_load_tls;
971 pv_cpu_ops.set_debugreg = lguest_set_debugreg;
972 pv_cpu_ops.clts = lguest_clts;
973 pv_cpu_ops.read_cr0 = lguest_read_cr0;
974 pv_cpu_ops.write_cr0 = lguest_write_cr0;
975 pv_cpu_ops.read_cr4 = lguest_read_cr4;
976 pv_cpu_ops.write_cr4 = lguest_write_cr4;
977 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
978 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
979 pv_cpu_ops.wbinvd = lguest_wbinvd;
980 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
981 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
983 /* pagetable management */
984 pv_mmu_ops.write_cr3 = lguest_write_cr3;
985 pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
986 pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
987 pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
988 pv_mmu_ops.set_pte = lguest_set_pte;
989 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
990 pv_mmu_ops.set_pmd = lguest_set_pmd;
991 pv_mmu_ops.read_cr2 = lguest_read_cr2;
992 pv_mmu_ops.read_cr3 = lguest_read_cr3;
993 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
994 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
996 #ifdef CONFIG_X86_LOCAL_APIC
997 /* apic read/write intercepts */
998 pv_apic_ops.apic_write = lguest_apic_write;
999 pv_apic_ops.apic_write_atomic = lguest_apic_write;
1000 pv_apic_ops.apic_read = lguest_apic_read;
1001 #endif
1003 /* time operations */
1004 pv_time_ops.get_wallclock = lguest_get_wallclock;
1005 pv_time_ops.time_init = lguest_time_init;
1007 /* Now is a good time to look at the implementations of these functions
1008 * before returning to the rest of lguest_init(). */
1010 /*G:070 Now we've seen all the paravirt_ops, we return to
1011 * lguest_init() where the rest of the fairly chaotic boot setup
1012 * occurs. */
1014 /* The native boot code sets up initial page tables immediately after
1015 * the kernel itself, and sets init_pg_tables_end so they're not
1016 * clobbered. The Launcher places our initial pagetables somewhere at
1017 * the top of our physical memory, so we don't need extra space: set
1018 * init_pg_tables_end to the end of the kernel. */
1019 init_pg_tables_end = __pa(pg0);
1021 /* Load the %fs segment register (the per-cpu segment register) with
1022 * the normal data segment to get through booting. */
1023 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
1025 /* The Host uses the top of the Guest's virtual address space for the
1026 * Host<->Guest Switcher, and it tells us how big that is in
1027 * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */
1028 reserve_top_address(lguest_data.reserve_mem);
1030 /* If we don't initialize the lock dependency checker now, it crashes
1031 * paravirt_disable_iospace. */
1032 lockdep_init();
1034 /* The IDE code spends about 3 seconds probing for disks: if we reserve
1035 * all the I/O ports up front it can't get them and so doesn't probe.
1036 * Other device drivers are similar (but less severe). This cuts the
1037 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
1038 paravirt_disable_iospace();
1040 /* This is messy CPU setup stuff which the native boot code does before
1041 * start_kernel, so we have to do, too: */
1042 cpu_detect(&new_cpu_data);
1043 /* head.S usually sets up the first capability word, so do it here. */
1044 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1046 /* Math is always hard! */
1047 new_cpu_data.hard_math = 1;
1049 #ifdef CONFIG_X86_MCE
1050 mce_disabled = 1;
1051 #endif
1052 #ifdef CONFIG_ACPI
1053 acpi_disabled = 1;
1054 acpi_ht = 0;
1055 #endif
1057 /* We set the perferred console to "hvc". This is the "hypervisor
1058 * virtual console" driver written by the PowerPC people, which we also
1059 * adapted for lguest's use. */
1060 add_preferred_console("hvc", 0, NULL);
1062 /* Register our very early console. */
1063 virtio_cons_early_init(early_put_chars);
1065 /* Last of all, we set the power management poweroff hook to point to
1066 * the Guest routine to power off. */
1067 pm_power_off = lguest_power_off;
1069 machine_ops.restart = lguest_restart;
1070 /* Now we're set up, call start_kernel() in init/main.c and we proceed
1071 * to boot as normal. It never returns. */
1072 start_kernel();
1075 * This marks the end of stage II of our journey, The Guest.
1077 * It is now time for us to explore the layer of virtual drivers and complete
1078 * our understanding of the Guest in "make Drivers".