2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
16 #include <asm/setup.h>
18 #include <asm/xen/hypervisor.h>
19 #include <asm/xen/hypercall.h>
23 #include <xen/interface/callback.h>
24 #include <xen/interface/memory.h>
25 #include <xen/interface/physdev.h>
26 #include <xen/features.h>
31 /* These are code, but not functions. Defined in entry.S */
32 extern const char xen_hypervisor_callback
[];
33 extern const char xen_failsafe_callback
[];
34 extern void xen_sysenter_target(void);
35 extern void xen_syscall_target(void);
36 extern void xen_syscall32_target(void);
38 /* Amount of extra memory space we add to the e820 ranges */
39 phys_addr_t xen_extra_mem_start
, xen_extra_mem_size
;
42 * The maximum amount of extra memory compared to the base size. The
43 * main scaling factor is the size of struct page. At extreme ratios
44 * of base:extra, all the base memory can be filled with page
45 * structures for the extra memory, leaving no space for anything
48 * 10x seems like a reasonable balance between scaling flexibility and
49 * leaving a practically usable system.
51 #define EXTRA_MEM_RATIO (10)
53 static __init
void xen_add_extra_mem(unsigned long pages
)
55 u64 size
= (u64
)pages
* PAGE_SIZE
;
56 u64 extra_start
= xen_extra_mem_start
+ xen_extra_mem_size
;
61 e820_add_region(extra_start
, size
, E820_RAM
);
62 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
64 memblock_x86_reserve_range(extra_start
, extra_start
+ size
, "XEN EXTRA");
66 xen_extra_mem_size
+= size
;
68 xen_max_p2m_pfn
= PFN_DOWN(extra_start
+ size
);
71 static unsigned long __init
xen_release_chunk(phys_addr_t start_addr
,
74 struct xen_memory_reservation reservation
= {
79 unsigned long start
, end
;
80 unsigned long len
= 0;
84 start
= PFN_UP(start_addr
);
85 end
= PFN_DOWN(end_addr
);
90 printk(KERN_INFO
"xen_release_chunk: looking at area pfn %lx-%lx: ",
92 for(pfn
= start
; pfn
< end
; pfn
++) {
93 unsigned long mfn
= pfn_to_mfn(pfn
);
95 /* Make sure pfn exists to start with */
96 if (mfn
== INVALID_P2M_ENTRY
|| mfn_to_pfn(mfn
) != pfn
)
99 set_xen_guest_handle(reservation
.extent_start
, &mfn
);
100 reservation
.nr_extents
= 1;
102 ret
= HYPERVISOR_memory_op(XENMEM_decrease_reservation
,
104 WARN(ret
!= 1, "Failed to release memory %lx-%lx err=%d\n",
107 set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
111 printk(KERN_CONT
"%ld pages freed\n", len
);
116 static unsigned long __init
xen_return_unused_memory(unsigned long max_pfn
,
117 const struct e820map
*e820
)
119 phys_addr_t max_addr
= PFN_PHYS(max_pfn
);
120 phys_addr_t last_end
= ISA_END_ADDRESS
;
121 unsigned long released
= 0;
124 /* Free any unused memory above the low 1Mbyte. */
125 for (i
= 0; i
< e820
->nr_map
&& last_end
< max_addr
; i
++) {
126 phys_addr_t end
= e820
->map
[i
].addr
;
127 end
= min(max_addr
, end
);
130 released
+= xen_release_chunk(last_end
, end
);
131 last_end
= max(last_end
, e820
->map
[i
].addr
+ e820
->map
[i
].size
);
134 if (last_end
< max_addr
)
135 released
+= xen_release_chunk(last_end
, max_addr
);
137 printk(KERN_INFO
"released %ld pages of unused memory\n", released
);
142 * machine_specific_memory_setup - Hook for machine specific memory setup.
144 char * __init
xen_memory_setup(void)
146 static struct e820entry map
[E820MAX
] __initdata
;
148 unsigned long max_pfn
= xen_start_info
->nr_pages
;
149 unsigned long long mem_end
;
151 struct xen_memory_map memmap
;
152 unsigned long extra_pages
= 0;
153 unsigned long extra_limit
;
157 max_pfn
= min(MAX_DOMAIN_PAGES
, max_pfn
);
158 mem_end
= PFN_PHYS(max_pfn
);
160 memmap
.nr_entries
= E820MAX
;
161 set_xen_guest_handle(memmap
.buffer
, map
);
163 op
= xen_initial_domain() ?
164 XENMEM_machine_memory_map
:
166 rc
= HYPERVISOR_memory_op(op
, &memmap
);
168 BUG_ON(xen_initial_domain());
169 memmap
.nr_entries
= 1;
171 map
[0].size
= mem_end
;
172 /* 8MB slack (to balance backend allocations). */
173 map
[0].size
+= 8ULL << 20;
174 map
[0].type
= E820_RAM
;
180 xen_extra_mem_start
= mem_end
;
181 for (i
= 0; i
< memmap
.nr_entries
; i
++) {
182 unsigned long long end
= map
[i
].addr
+ map
[i
].size
;
184 if (map
[i
].type
== E820_RAM
&& end
> mem_end
) {
185 /* RAM off the end - may be partially included */
186 u64 delta
= min(map
[i
].size
, end
- mem_end
);
188 map
[i
].size
-= delta
;
191 extra_pages
+= PFN_DOWN(delta
);
194 if (map
[i
].size
> 0 && end
> xen_extra_mem_start
)
195 xen_extra_mem_start
= end
;
197 /* Add region if any remains */
199 e820_add_region(map
[i
].addr
, map
[i
].size
, map
[i
].type
);
203 * In domU, the ISA region is normal, usable memory, but we
204 * reserve ISA memory anyway because too many things poke
207 * In Dom0, the host E820 information can leave gaps in the
208 * ISA range, which would cause us to release those pages. To
209 * avoid this, we unconditionally reserve them here.
211 e820_add_region(ISA_START_ADDRESS
, ISA_END_ADDRESS
- ISA_START_ADDRESS
,
218 * See comment above "struct start_info" in <xen/interface/xen.h>
220 memblock_x86_reserve_range(__pa(xen_start_info
->mfn_list
),
221 __pa(xen_start_info
->pt_base
),
224 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
226 extra_pages
+= xen_return_unused_memory(xen_start_info
->nr_pages
, &e820
);
229 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
230 * factor the base size. On non-highmem systems, the base
231 * size is the full initial memory allocation; on highmem it
232 * is limited to the max size of lowmem, so that it doesn't
233 * get completely filled.
235 * In principle there could be a problem in lowmem systems if
236 * the initial memory is also very large with respect to
237 * lowmem, but we won't try to deal with that here.
239 extra_limit
= min(EXTRA_MEM_RATIO
* min(max_pfn
, PFN_DOWN(MAXMEM
)),
240 max_pfn
+ extra_pages
);
242 if (extra_limit
>= max_pfn
)
243 extra_pages
= extra_limit
- max_pfn
;
247 xen_add_extra_mem(extra_pages
);
253 * Set the bit indicating "nosegneg" library variants should be used.
254 * We only need to bother in pure 32-bit mode; compat 32-bit processes
255 * can have un-truncated segments, so wrapping around is allowed.
257 static void __init
fiddle_vdso(void)
261 mask
= VDSO32_SYMBOL(&vdso32_int80_start
, NOTE_MASK
);
262 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
263 mask
= VDSO32_SYMBOL(&vdso32_sysenter_start
, NOTE_MASK
);
264 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
268 static __cpuinit
int register_callback(unsigned type
, const void *func
)
270 struct callback_register callback
= {
272 .address
= XEN_CALLBACK(__KERNEL_CS
, func
),
273 .flags
= CALLBACKF_mask_events
,
276 return HYPERVISOR_callback_op(CALLBACKOP_register
, &callback
);
279 void __cpuinit
xen_enable_sysenter(void)
282 unsigned sysenter_feature
;
285 sysenter_feature
= X86_FEATURE_SEP
;
287 sysenter_feature
= X86_FEATURE_SYSENTER32
;
290 if (!boot_cpu_has(sysenter_feature
))
293 ret
= register_callback(CALLBACKTYPE_sysenter
, xen_sysenter_target
);
295 setup_clear_cpu_cap(sysenter_feature
);
298 void __cpuinit
xen_enable_syscall(void)
303 ret
= register_callback(CALLBACKTYPE_syscall
, xen_syscall_target
);
305 printk(KERN_ERR
"Failed to set syscall callback: %d\n", ret
);
306 /* Pretty fatal; 64-bit userspace has no other
307 mechanism for syscalls. */
310 if (boot_cpu_has(X86_FEATURE_SYSCALL32
)) {
311 ret
= register_callback(CALLBACKTYPE_syscall32
,
312 xen_syscall32_target
);
314 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32
);
316 #endif /* CONFIG_X86_64 */
319 void __init
xen_arch_setup(void)
321 xen_panic_handler_init();
323 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_4gb_segments
);
324 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_writable_pagetables
);
326 if (!xen_feature(XENFEAT_auto_translated_physmap
))
327 HYPERVISOR_vm_assist(VMASST_CMD_enable
,
328 VMASST_TYPE_pae_extended_cr3
);
330 if (register_callback(CALLBACKTYPE_event
, xen_hypervisor_callback
) ||
331 register_callback(CALLBACKTYPE_failsafe
, xen_failsafe_callback
))
334 xen_enable_sysenter();
335 xen_enable_syscall();
338 if (!(xen_start_info
->flags
& SIF_INITDOMAIN
)) {
339 printk(KERN_INFO
"ACPI in unprivileged domain disabled\n");
344 memcpy(boot_command_line
, xen_start_info
->cmd_line
,
345 MAX_GUEST_CMDLINE
> COMMAND_LINE_SIZE
?
346 COMMAND_LINE_SIZE
: MAX_GUEST_CMDLINE
);
348 /* Set up idle, making sure it calls safe_halt() pvop */
350 boot_cpu_data
.hlt_works_ok
= 1;
352 pm_idle
= default_idle
;