2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
16 #include <asm/setup.h>
18 #include <asm/xen/hypervisor.h>
19 #include <asm/xen/hypercall.h>
23 #include <xen/interface/callback.h>
24 #include <xen/interface/memory.h>
25 #include <xen/interface/physdev.h>
26 #include <xen/features.h>
31 /* These are code, but not functions. Defined in entry.S */
32 extern const char xen_hypervisor_callback
[];
33 extern const char xen_failsafe_callback
[];
34 extern void xen_sysenter_target(void);
35 extern void xen_syscall_target(void);
36 extern void xen_syscall32_target(void);
38 /* Amount of extra memory space we add to the e820 ranges */
39 phys_addr_t xen_extra_mem_start
, xen_extra_mem_size
;
42 * The maximum amount of extra memory compared to the base size. The
43 * main scaling factor is the size of struct page. At extreme ratios
44 * of base:extra, all the base memory can be filled with page
45 * structures for the extra memory, leaving no space for anything
48 * 10x seems like a reasonable balance between scaling flexibility and
49 * leaving a practically usable system.
51 #define EXTRA_MEM_RATIO (10)
53 static void __init
xen_add_extra_mem(unsigned long pages
)
57 u64 size
= (u64
)pages
* PAGE_SIZE
;
58 u64 extra_start
= xen_extra_mem_start
+ xen_extra_mem_size
;
63 e820_add_region(extra_start
, size
, E820_RAM
);
64 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
66 memblock_x86_reserve_range(extra_start
, extra_start
+ size
, "XEN EXTRA");
68 xen_extra_mem_size
+= size
;
70 xen_max_p2m_pfn
= PFN_DOWN(extra_start
+ size
);
72 for (pfn
= PFN_DOWN(extra_start
); pfn
<= xen_max_p2m_pfn
; pfn
++)
73 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
76 static unsigned long __init
xen_release_chunk(phys_addr_t start_addr
,
79 struct xen_memory_reservation reservation
= {
84 unsigned long start
, end
;
85 unsigned long len
= 0;
89 start
= PFN_UP(start_addr
);
90 end
= PFN_DOWN(end_addr
);
95 printk(KERN_INFO
"xen_release_chunk: looking at area pfn %lx-%lx: ",
97 for(pfn
= start
; pfn
< end
; pfn
++) {
98 unsigned long mfn
= pfn_to_mfn(pfn
);
100 /* Make sure pfn exists to start with */
101 if (mfn
== INVALID_P2M_ENTRY
|| mfn_to_pfn(mfn
) != pfn
)
104 set_xen_guest_handle(reservation
.extent_start
, &mfn
);
105 reservation
.nr_extents
= 1;
107 ret
= HYPERVISOR_memory_op(XENMEM_decrease_reservation
,
109 WARN(ret
!= 1, "Failed to release memory %lx-%lx err=%d\n",
112 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
116 printk(KERN_CONT
"%ld pages freed\n", len
);
121 static unsigned long __init
xen_return_unused_memory(unsigned long max_pfn
,
122 const struct e820map
*e820
)
124 phys_addr_t max_addr
= PFN_PHYS(max_pfn
);
125 phys_addr_t last_end
= ISA_END_ADDRESS
;
126 unsigned long released
= 0;
129 /* Free any unused memory above the low 1Mbyte. */
130 for (i
= 0; i
< e820
->nr_map
&& last_end
< max_addr
; i
++) {
131 phys_addr_t end
= e820
->map
[i
].addr
;
132 end
= min(max_addr
, end
);
135 released
+= xen_release_chunk(last_end
, end
);
136 last_end
= max(last_end
, e820
->map
[i
].addr
+ e820
->map
[i
].size
);
139 if (last_end
< max_addr
)
140 released
+= xen_release_chunk(last_end
, max_addr
);
142 printk(KERN_INFO
"released %ld pages of unused memory\n", released
);
146 static unsigned long __init
xen_set_identity(const struct e820entry
*list
,
149 phys_addr_t last
= xen_initial_domain() ? 0 : ISA_END_ADDRESS
;
150 phys_addr_t start_pci
= last
;
151 const struct e820entry
*entry
;
152 unsigned long identity
= 0;
155 for (i
= 0, entry
= list
; i
< map_size
; i
++, entry
++) {
156 phys_addr_t start
= entry
->addr
;
157 phys_addr_t end
= start
+ entry
->size
;
165 /* Skip over the 1MB region. */
169 if ((entry
->type
== E820_RAM
) || (entry
->type
== E820_UNUSABLE
)) {
170 if (start
> start_pci
)
171 identity
+= set_phys_range_identity(
172 PFN_UP(start_pci
), PFN_DOWN(start
));
174 /* Without saving 'last' we would gooble RAM too
175 * at the end of the loop. */
180 start_pci
= min(start
, start_pci
);
183 if (last
> start_pci
)
184 identity
+= set_phys_range_identity(
185 PFN_UP(start_pci
), PFN_DOWN(last
));
189 * machine_specific_memory_setup - Hook for machine specific memory setup.
191 char * __init
xen_memory_setup(void)
193 static struct e820entry map
[E820MAX
] __initdata
;
194 static struct e820entry map_raw
[E820MAX
] __initdata
;
196 unsigned long max_pfn
= xen_start_info
->nr_pages
;
197 unsigned long long mem_end
;
199 struct xen_memory_map memmap
;
200 unsigned long extra_pages
= 0;
201 unsigned long extra_limit
;
202 unsigned long identity_pages
= 0;
206 max_pfn
= min(MAX_DOMAIN_PAGES
, max_pfn
);
207 mem_end
= PFN_PHYS(max_pfn
);
209 memmap
.nr_entries
= E820MAX
;
210 set_xen_guest_handle(memmap
.buffer
, map
);
212 op
= xen_initial_domain() ?
213 XENMEM_machine_memory_map
:
215 rc
= HYPERVISOR_memory_op(op
, &memmap
);
217 BUG_ON(xen_initial_domain());
218 memmap
.nr_entries
= 1;
220 map
[0].size
= mem_end
;
221 /* 8MB slack (to balance backend allocations). */
222 map
[0].size
+= 8ULL << 20;
223 map
[0].type
= E820_RAM
;
228 memcpy(map_raw
, map
, sizeof(map
));
231 xen_extra_mem_start
= mem_end
;
233 xen_extra_mem_start
= max((1ULL << 32), mem_end
);
235 for (i
= 0; i
< memmap
.nr_entries
; i
++) {
236 unsigned long long end
;
238 /* Guard against non-page aligned E820 entries. */
239 if (map
[i
].type
== E820_RAM
)
240 map
[i
].size
-= (map
[i
].size
+ map
[i
].addr
) % PAGE_SIZE
;
242 end
= map
[i
].addr
+ map
[i
].size
;
243 if (map
[i
].type
== E820_RAM
&& end
> mem_end
) {
244 /* RAM off the end - may be partially included */
245 u64 delta
= min(map
[i
].size
, end
- mem_end
);
247 map
[i
].size
-= delta
;
250 extra_pages
+= PFN_DOWN(delta
);
252 * Set RAM below 4GB that is not for us to be unusable.
253 * This prevents "System RAM" address space from being
254 * used as potential resource for I/O address (happens
255 * when 'allocate_resource' is called).
258 (xen_initial_domain() && end
< 0x100000000ULL
))
259 e820_add_region(end
, delta
, E820_UNUSABLE
);
262 if (map
[i
].size
> 0 && end
> xen_extra_mem_start
)
263 xen_extra_mem_start
= end
;
265 /* Add region if any remains */
267 e820_add_region(map
[i
].addr
, map
[i
].size
, map
[i
].type
);
271 * In domU, the ISA region is normal, usable memory, but we
272 * reserve ISA memory anyway because too many things poke
275 * In Dom0, the host E820 information can leave gaps in the
276 * ISA range, which would cause us to release those pages. To
277 * avoid this, we unconditionally reserve them here.
279 e820_add_region(ISA_START_ADDRESS
, ISA_END_ADDRESS
- ISA_START_ADDRESS
,
286 * See comment above "struct start_info" in <xen/interface/xen.h>
288 memblock_x86_reserve_range(__pa(xen_start_info
->mfn_list
),
289 __pa(xen_start_info
->pt_base
),
292 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
294 extra_pages
+= xen_return_unused_memory(xen_start_info
->nr_pages
, &e820
);
297 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
298 * factor the base size. On non-highmem systems, the base
299 * size is the full initial memory allocation; on highmem it
300 * is limited to the max size of lowmem, so that it doesn't
301 * get completely filled.
303 * In principle there could be a problem in lowmem systems if
304 * the initial memory is also very large with respect to
305 * lowmem, but we won't try to deal with that here.
307 extra_limit
= min(EXTRA_MEM_RATIO
* min(max_pfn
, PFN_DOWN(MAXMEM
)),
308 max_pfn
+ extra_pages
);
310 if (extra_limit
>= max_pfn
)
311 extra_pages
= extra_limit
- max_pfn
;
315 xen_add_extra_mem(extra_pages
);
318 * Set P2M for all non-RAM pages and E820 gaps to be identity
319 * type PFNs. We supply it with the non-sanitized version
322 identity_pages
= xen_set_identity(map_raw
, memmap
.nr_entries
);
323 printk(KERN_INFO
"Set %ld page(s) to 1-1 mapping.\n", identity_pages
);
328 * Set the bit indicating "nosegneg" library variants should be used.
329 * We only need to bother in pure 32-bit mode; compat 32-bit processes
330 * can have un-truncated segments, so wrapping around is allowed.
332 static void __init
fiddle_vdso(void)
336 mask
= VDSO32_SYMBOL(&vdso32_int80_start
, NOTE_MASK
);
337 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
338 mask
= VDSO32_SYMBOL(&vdso32_sysenter_start
, NOTE_MASK
);
339 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
343 static int __cpuinit
register_callback(unsigned type
, const void *func
)
345 struct callback_register callback
= {
347 .address
= XEN_CALLBACK(__KERNEL_CS
, func
),
348 .flags
= CALLBACKF_mask_events
,
351 return HYPERVISOR_callback_op(CALLBACKOP_register
, &callback
);
354 void __cpuinit
xen_enable_sysenter(void)
357 unsigned sysenter_feature
;
360 sysenter_feature
= X86_FEATURE_SEP
;
362 sysenter_feature
= X86_FEATURE_SYSENTER32
;
365 if (!boot_cpu_has(sysenter_feature
))
368 ret
= register_callback(CALLBACKTYPE_sysenter
, xen_sysenter_target
);
370 setup_clear_cpu_cap(sysenter_feature
);
373 void __cpuinit
xen_enable_syscall(void)
378 ret
= register_callback(CALLBACKTYPE_syscall
, xen_syscall_target
);
380 printk(KERN_ERR
"Failed to set syscall callback: %d\n", ret
);
381 /* Pretty fatal; 64-bit userspace has no other
382 mechanism for syscalls. */
385 if (boot_cpu_has(X86_FEATURE_SYSCALL32
)) {
386 ret
= register_callback(CALLBACKTYPE_syscall32
,
387 xen_syscall32_target
);
389 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32
);
391 #endif /* CONFIG_X86_64 */
394 void __init
xen_arch_setup(void)
396 xen_panic_handler_init();
398 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_4gb_segments
);
399 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_writable_pagetables
);
401 if (!xen_feature(XENFEAT_auto_translated_physmap
))
402 HYPERVISOR_vm_assist(VMASST_CMD_enable
,
403 VMASST_TYPE_pae_extended_cr3
);
405 if (register_callback(CALLBACKTYPE_event
, xen_hypervisor_callback
) ||
406 register_callback(CALLBACKTYPE_failsafe
, xen_failsafe_callback
))
409 xen_enable_sysenter();
410 xen_enable_syscall();
413 if (!(xen_start_info
->flags
& SIF_INITDOMAIN
)) {
414 printk(KERN_INFO
"ACPI in unprivileged domain disabled\n");
419 memcpy(boot_command_line
, xen_start_info
->cmd_line
,
420 MAX_GUEST_CMDLINE
> COMMAND_LINE_SIZE
?
421 COMMAND_LINE_SIZE
: MAX_GUEST_CMDLINE
);
423 /* Set up idle, making sure it calls safe_halt() pvop */
425 boot_cpu_data
.hlt_works_ok
= 1;
427 pm_idle
= default_idle
;
428 boot_option_idle_override
= IDLE_HALT
;