2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
16 #include <asm/setup.h>
18 #include <asm/xen/hypervisor.h>
19 #include <asm/xen/hypercall.h>
23 #include <xen/interface/callback.h>
24 #include <xen/interface/memory.h>
25 #include <xen/interface/physdev.h>
26 #include <xen/features.h>
31 /* These are code, but not functions. Defined in entry.S */
32 extern const char xen_hypervisor_callback
[];
33 extern const char xen_failsafe_callback
[];
34 extern void xen_sysenter_target(void);
35 extern void xen_syscall_target(void);
36 extern void xen_syscall32_target(void);
38 /* Amount of extra memory space we add to the e820 ranges */
39 phys_addr_t xen_extra_mem_start
, xen_extra_mem_size
;
42 * The maximum amount of extra memory compared to the base size. The
43 * main scaling factor is the size of struct page. At extreme ratios
44 * of base:extra, all the base memory can be filled with page
45 * structures for the extra memory, leaving no space for anything
48 * 10x seems like a reasonable balance between scaling flexibility and
49 * leaving a practically usable system.
51 #define EXTRA_MEM_RATIO (10)
53 static void __init
xen_add_extra_mem(unsigned long pages
)
57 u64 size
= (u64
)pages
* PAGE_SIZE
;
58 u64 extra_start
= xen_extra_mem_start
+ xen_extra_mem_size
;
63 e820_add_region(extra_start
, size
, E820_RAM
);
64 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
66 memblock_x86_reserve_range(extra_start
, extra_start
+ size
, "XEN EXTRA");
68 xen_extra_mem_size
+= size
;
70 xen_max_p2m_pfn
= PFN_DOWN(extra_start
+ size
);
72 for (pfn
= PFN_DOWN(extra_start
); pfn
<= xen_max_p2m_pfn
; pfn
++)
73 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
76 static unsigned long __init
xen_release_chunk(phys_addr_t start_addr
,
79 struct xen_memory_reservation reservation
= {
84 unsigned long start
, end
;
85 unsigned long len
= 0;
89 start
= PFN_UP(start_addr
);
90 end
= PFN_DOWN(end_addr
);
95 printk(KERN_INFO
"xen_release_chunk: looking at area pfn %lx-%lx: ",
97 for(pfn
= start
; pfn
< end
; pfn
++) {
98 unsigned long mfn
= pfn_to_mfn(pfn
);
100 /* Make sure pfn exists to start with */
101 if (mfn
== INVALID_P2M_ENTRY
|| mfn_to_pfn(mfn
) != pfn
)
104 set_xen_guest_handle(reservation
.extent_start
, &mfn
);
105 reservation
.nr_extents
= 1;
107 ret
= HYPERVISOR_memory_op(XENMEM_decrease_reservation
,
109 WARN(ret
!= 1, "Failed to release memory %lx-%lx err=%d\n",
112 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
116 printk(KERN_CONT
"%ld pages freed\n", len
);
121 static unsigned long __init
xen_return_unused_memory(unsigned long max_pfn
,
122 const struct e820map
*e820
)
124 phys_addr_t max_addr
= PFN_PHYS(max_pfn
);
125 phys_addr_t last_end
= ISA_END_ADDRESS
;
126 unsigned long released
= 0;
129 /* Free any unused memory above the low 1Mbyte. */
130 for (i
= 0; i
< e820
->nr_map
&& last_end
< max_addr
; i
++) {
131 phys_addr_t end
= e820
->map
[i
].addr
;
132 end
= min(max_addr
, end
);
135 released
+= xen_release_chunk(last_end
, end
);
136 last_end
= max(last_end
, e820
->map
[i
].addr
+ e820
->map
[i
].size
);
139 if (last_end
< max_addr
)
140 released
+= xen_release_chunk(last_end
, max_addr
);
142 printk(KERN_INFO
"released %ld pages of unused memory\n", released
);
146 static unsigned long __init
xen_set_identity(const struct e820entry
*list
,
149 phys_addr_t last
= xen_initial_domain() ? 0 : ISA_END_ADDRESS
;
150 phys_addr_t start_pci
= last
;
151 const struct e820entry
*entry
;
152 unsigned long identity
= 0;
155 for (i
= 0, entry
= list
; i
< map_size
; i
++, entry
++) {
156 phys_addr_t start
= entry
->addr
;
157 phys_addr_t end
= start
+ entry
->size
;
165 /* Skip over the 1MB region. */
169 if ((entry
->type
== E820_RAM
) || (entry
->type
== E820_UNUSABLE
)) {
170 if (start
> start_pci
)
171 identity
+= set_phys_range_identity(
172 PFN_UP(start_pci
), PFN_DOWN(start
));
174 /* Without saving 'last' we would gooble RAM too
175 * at the end of the loop. */
180 start_pci
= min(start
, start_pci
);
183 if (last
> start_pci
)
184 identity
+= set_phys_range_identity(
185 PFN_UP(start_pci
), PFN_DOWN(last
));
189 * machine_specific_memory_setup - Hook for machine specific memory setup.
191 char * __init
xen_memory_setup(void)
193 static struct e820entry map
[E820MAX
] __initdata
;
194 static struct e820entry map_raw
[E820MAX
] __initdata
;
196 unsigned long max_pfn
= xen_start_info
->nr_pages
;
197 unsigned long long mem_end
;
199 struct xen_memory_map memmap
;
200 unsigned long extra_pages
= 0;
201 unsigned long extra_limit
;
202 unsigned long identity_pages
= 0;
206 max_pfn
= min(MAX_DOMAIN_PAGES
, max_pfn
);
207 mem_end
= PFN_PHYS(max_pfn
);
209 memmap
.nr_entries
= E820MAX
;
210 set_xen_guest_handle(memmap
.buffer
, map
);
212 op
= xen_initial_domain() ?
213 XENMEM_machine_memory_map
:
215 rc
= HYPERVISOR_memory_op(op
, &memmap
);
217 BUG_ON(xen_initial_domain());
218 memmap
.nr_entries
= 1;
220 map
[0].size
= mem_end
;
221 /* 8MB slack (to balance backend allocations). */
222 map
[0].size
+= 8ULL << 20;
223 map
[0].type
= E820_RAM
;
228 memcpy(map_raw
, map
, sizeof(map
));
230 xen_extra_mem_start
= mem_end
;
231 for (i
= 0; i
< memmap
.nr_entries
; i
++) {
232 unsigned long long end
;
234 /* Guard against non-page aligned E820 entries. */
235 if (map
[i
].type
== E820_RAM
)
236 map
[i
].size
-= (map
[i
].size
+ map
[i
].addr
) % PAGE_SIZE
;
238 end
= map
[i
].addr
+ map
[i
].size
;
239 if (map
[i
].type
== E820_RAM
&& end
> mem_end
) {
240 /* RAM off the end - may be partially included */
241 u64 delta
= min(map
[i
].size
, end
- mem_end
);
243 map
[i
].size
-= delta
;
246 extra_pages
+= PFN_DOWN(delta
);
248 * Set RAM below 4GB that is not for us to be unusable.
249 * This prevents "System RAM" address space from being
250 * used as potential resource for I/O address (happens
251 * when 'allocate_resource' is called).
254 (xen_initial_domain() && end
< 0x100000000ULL
))
255 e820_add_region(end
, delta
, E820_UNUSABLE
);
258 if (map
[i
].size
> 0 && end
> xen_extra_mem_start
)
259 xen_extra_mem_start
= end
;
261 /* Add region if any remains */
263 e820_add_region(map
[i
].addr
, map
[i
].size
, map
[i
].type
);
265 /* Align the balloon area so that max_low_pfn does not get set
266 * to be at the _end_ of the PCI gap at the far end (fee01000).
267 * Note that xen_extra_mem_start gets set in the loop above to be
268 * past the last E820 region. */
269 if (xen_initial_domain() && (xen_extra_mem_start
< (1ULL<<32)))
270 xen_extra_mem_start
= (1ULL<<32);
273 * In domU, the ISA region is normal, usable memory, but we
274 * reserve ISA memory anyway because too many things poke
277 * In Dom0, the host E820 information can leave gaps in the
278 * ISA range, which would cause us to release those pages. To
279 * avoid this, we unconditionally reserve them here.
281 e820_add_region(ISA_START_ADDRESS
, ISA_END_ADDRESS
- ISA_START_ADDRESS
,
288 * See comment above "struct start_info" in <xen/interface/xen.h>
290 memblock_x86_reserve_range(__pa(xen_start_info
->mfn_list
),
291 __pa(xen_start_info
->pt_base
),
294 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
296 extra_pages
+= xen_return_unused_memory(xen_start_info
->nr_pages
, &e820
);
299 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
300 * factor the base size. On non-highmem systems, the base
301 * size is the full initial memory allocation; on highmem it
302 * is limited to the max size of lowmem, so that it doesn't
303 * get completely filled.
305 * In principle there could be a problem in lowmem systems if
306 * the initial memory is also very large with respect to
307 * lowmem, but we won't try to deal with that here.
309 extra_limit
= min(EXTRA_MEM_RATIO
* min(max_pfn
, PFN_DOWN(MAXMEM
)),
310 max_pfn
+ extra_pages
);
312 if (extra_limit
>= max_pfn
)
313 extra_pages
= extra_limit
- max_pfn
;
317 xen_add_extra_mem(extra_pages
);
320 * Set P2M for all non-RAM pages and E820 gaps to be identity
321 * type PFNs. We supply it with the non-sanitized version
324 identity_pages
= xen_set_identity(map_raw
, memmap
.nr_entries
);
325 printk(KERN_INFO
"Set %ld page(s) to 1-1 mapping.\n", identity_pages
);
330 * Set the bit indicating "nosegneg" library variants should be used.
331 * We only need to bother in pure 32-bit mode; compat 32-bit processes
332 * can have un-truncated segments, so wrapping around is allowed.
334 static void __init
fiddle_vdso(void)
338 mask
= VDSO32_SYMBOL(&vdso32_int80_start
, NOTE_MASK
);
339 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
340 mask
= VDSO32_SYMBOL(&vdso32_sysenter_start
, NOTE_MASK
);
341 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
345 static int __cpuinit
register_callback(unsigned type
, const void *func
)
347 struct callback_register callback
= {
349 .address
= XEN_CALLBACK(__KERNEL_CS
, func
),
350 .flags
= CALLBACKF_mask_events
,
353 return HYPERVISOR_callback_op(CALLBACKOP_register
, &callback
);
356 void __cpuinit
xen_enable_sysenter(void)
359 unsigned sysenter_feature
;
362 sysenter_feature
= X86_FEATURE_SEP
;
364 sysenter_feature
= X86_FEATURE_SYSENTER32
;
367 if (!boot_cpu_has(sysenter_feature
))
370 ret
= register_callback(CALLBACKTYPE_sysenter
, xen_sysenter_target
);
372 setup_clear_cpu_cap(sysenter_feature
);
375 void __cpuinit
xen_enable_syscall(void)
380 ret
= register_callback(CALLBACKTYPE_syscall
, xen_syscall_target
);
382 printk(KERN_ERR
"Failed to set syscall callback: %d\n", ret
);
383 /* Pretty fatal; 64-bit userspace has no other
384 mechanism for syscalls. */
387 if (boot_cpu_has(X86_FEATURE_SYSCALL32
)) {
388 ret
= register_callback(CALLBACKTYPE_syscall32
,
389 xen_syscall32_target
);
391 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32
);
393 #endif /* CONFIG_X86_64 */
396 void __init
xen_arch_setup(void)
398 xen_panic_handler_init();
400 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_4gb_segments
);
401 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_writable_pagetables
);
403 if (!xen_feature(XENFEAT_auto_translated_physmap
))
404 HYPERVISOR_vm_assist(VMASST_CMD_enable
,
405 VMASST_TYPE_pae_extended_cr3
);
407 if (register_callback(CALLBACKTYPE_event
, xen_hypervisor_callback
) ||
408 register_callback(CALLBACKTYPE_failsafe
, xen_failsafe_callback
))
411 xen_enable_sysenter();
412 xen_enable_syscall();
415 if (!(xen_start_info
->flags
& SIF_INITDOMAIN
)) {
416 printk(KERN_INFO
"ACPI in unprivileged domain disabled\n");
421 memcpy(boot_command_line
, xen_start_info
->cmd_line
,
422 MAX_GUEST_CMDLINE
> COMMAND_LINE_SIZE
?
423 COMMAND_LINE_SIZE
: MAX_GUEST_CMDLINE
);
425 /* Set up idle, making sure it calls safe_halt() pvop */
427 boot_cpu_data
.hlt_works_ok
= 1;
429 pm_idle
= default_idle
;
430 boot_option_idle_override
= IDLE_HALT
;