2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
17 #include <asm/setup.h>
19 #include <asm/xen/hypervisor.h>
20 #include <asm/xen/hypercall.h>
24 #include <xen/interface/callback.h>
25 #include <xen/interface/memory.h>
26 #include <xen/interface/physdev.h>
27 #include <xen/features.h>
32 /* These are code, but not functions. Defined in entry.S */
33 extern const char xen_hypervisor_callback
[];
34 extern const char xen_failsafe_callback
[];
35 extern void xen_sysenter_target(void);
36 extern void xen_syscall_target(void);
37 extern void xen_syscall32_target(void);
39 /* Amount of extra memory space we add to the e820 ranges */
40 phys_addr_t xen_extra_mem_start
, xen_extra_mem_size
;
43 * The maximum amount of extra memory compared to the base size. The
44 * main scaling factor is the size of struct page. At extreme ratios
45 * of base:extra, all the base memory can be filled with page
46 * structures for the extra memory, leaving no space for anything
49 * 10x seems like a reasonable balance between scaling flexibility and
50 * leaving a practically usable system.
52 #define EXTRA_MEM_RATIO (10)
54 static void __init
xen_add_extra_mem(unsigned long pages
)
58 u64 size
= (u64
)pages
* PAGE_SIZE
;
59 u64 extra_start
= xen_extra_mem_start
+ xen_extra_mem_size
;
64 e820_add_region(extra_start
, size
, E820_RAM
);
65 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
67 memblock_x86_reserve_range(extra_start
, extra_start
+ size
, "XEN EXTRA");
69 xen_extra_mem_size
+= size
;
71 xen_max_p2m_pfn
= PFN_DOWN(extra_start
+ size
);
73 for (pfn
= PFN_DOWN(extra_start
); pfn
<= xen_max_p2m_pfn
; pfn
++)
74 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
77 static unsigned long __init
xen_release_chunk(phys_addr_t start_addr
,
80 struct xen_memory_reservation reservation
= {
85 unsigned long start
, end
;
86 unsigned long len
= 0;
90 start
= PFN_UP(start_addr
);
91 end
= PFN_DOWN(end_addr
);
96 for(pfn
= start
; pfn
< end
; pfn
++) {
97 unsigned long mfn
= pfn_to_mfn(pfn
);
99 /* Make sure pfn exists to start with */
100 if (mfn
== INVALID_P2M_ENTRY
|| mfn_to_pfn(mfn
) != pfn
)
103 set_xen_guest_handle(reservation
.extent_start
, &mfn
);
104 reservation
.nr_extents
= 1;
106 ret
= HYPERVISOR_memory_op(XENMEM_decrease_reservation
,
108 WARN(ret
!= 1, "Failed to release pfn %lx err=%d\n", pfn
, ret
);
110 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
114 printk(KERN_INFO
"Freeing %lx-%lx pfn range: %lu pages freed\n",
120 static unsigned long __init
xen_return_unused_memory(unsigned long max_pfn
,
121 const struct e820map
*e820
)
123 phys_addr_t max_addr
= PFN_PHYS(max_pfn
);
124 phys_addr_t last_end
= ISA_END_ADDRESS
;
125 unsigned long released
= 0;
128 /* Free any unused memory above the low 1Mbyte. */
129 for (i
= 0; i
< e820
->nr_map
&& last_end
< max_addr
; i
++) {
130 phys_addr_t end
= e820
->map
[i
].addr
;
131 end
= min(max_addr
, end
);
134 released
+= xen_release_chunk(last_end
, end
);
135 last_end
= max(last_end
, e820
->map
[i
].addr
+ e820
->map
[i
].size
);
138 if (last_end
< max_addr
)
139 released
+= xen_release_chunk(last_end
, max_addr
);
141 printk(KERN_INFO
"released %lu pages of unused memory\n", released
);
145 static unsigned long __init
xen_set_identity(const struct e820entry
*list
,
148 phys_addr_t last
= xen_initial_domain() ? 0 : ISA_END_ADDRESS
;
149 phys_addr_t start_pci
= last
;
150 const struct e820entry
*entry
;
151 unsigned long identity
= 0;
154 for (i
= 0, entry
= list
; i
< map_size
; i
++, entry
++) {
155 phys_addr_t start
= entry
->addr
;
156 phys_addr_t end
= start
+ entry
->size
;
164 /* Skip over the 1MB region. */
168 if ((entry
->type
== E820_RAM
) || (entry
->type
== E820_UNUSABLE
)) {
169 if (start
> start_pci
)
170 identity
+= set_phys_range_identity(
171 PFN_UP(start_pci
), PFN_DOWN(start
));
173 /* Without saving 'last' we would gooble RAM too
174 * at the end of the loop. */
179 start_pci
= min(start
, start_pci
);
182 if (last
> start_pci
)
183 identity
+= set_phys_range_identity(
184 PFN_UP(start_pci
), PFN_DOWN(last
));
188 * machine_specific_memory_setup - Hook for machine specific memory setup.
190 char * __init
xen_memory_setup(void)
192 static struct e820entry map
[E820MAX
] __initdata
;
193 static struct e820entry map_raw
[E820MAX
] __initdata
;
195 unsigned long max_pfn
= xen_start_info
->nr_pages
;
196 unsigned long long mem_end
;
198 struct xen_memory_map memmap
;
199 unsigned long extra_pages
= 0;
200 unsigned long extra_limit
;
201 unsigned long identity_pages
= 0;
205 max_pfn
= min(MAX_DOMAIN_PAGES
, max_pfn
);
206 mem_end
= PFN_PHYS(max_pfn
);
208 memmap
.nr_entries
= E820MAX
;
209 set_xen_guest_handle(memmap
.buffer
, map
);
211 op
= xen_initial_domain() ?
212 XENMEM_machine_memory_map
:
214 rc
= HYPERVISOR_memory_op(op
, &memmap
);
216 BUG_ON(xen_initial_domain());
217 memmap
.nr_entries
= 1;
219 map
[0].size
= mem_end
;
220 /* 8MB slack (to balance backend allocations). */
221 map
[0].size
+= 8ULL << 20;
222 map
[0].type
= E820_RAM
;
227 memcpy(map_raw
, map
, sizeof(map
));
229 xen_extra_mem_start
= mem_end
;
230 for (i
= 0; i
< memmap
.nr_entries
; i
++) {
231 unsigned long long end
;
233 /* Guard against non-page aligned E820 entries. */
234 if (map
[i
].type
== E820_RAM
)
235 map
[i
].size
-= (map
[i
].size
+ map
[i
].addr
) % PAGE_SIZE
;
237 end
= map
[i
].addr
+ map
[i
].size
;
238 if (map
[i
].type
== E820_RAM
&& end
> mem_end
) {
239 /* RAM off the end - may be partially included */
240 u64 delta
= min(map
[i
].size
, end
- mem_end
);
242 map
[i
].size
-= delta
;
245 extra_pages
+= PFN_DOWN(delta
);
247 * Set RAM below 4GB that is not for us to be unusable.
248 * This prevents "System RAM" address space from being
249 * used as potential resource for I/O address (happens
250 * when 'allocate_resource' is called).
253 (xen_initial_domain() && end
< 0x100000000ULL
))
254 e820_add_region(end
, delta
, E820_UNUSABLE
);
257 if (map
[i
].size
> 0 && end
> xen_extra_mem_start
)
258 xen_extra_mem_start
= end
;
260 /* Add region if any remains */
262 e820_add_region(map
[i
].addr
, map
[i
].size
, map
[i
].type
);
264 /* Align the balloon area so that max_low_pfn does not get set
265 * to be at the _end_ of the PCI gap at the far end (fee01000).
266 * Note that xen_extra_mem_start gets set in the loop above to be
267 * past the last E820 region. */
268 if (xen_initial_domain() && (xen_extra_mem_start
< (1ULL<<32)))
269 xen_extra_mem_start
= (1ULL<<32);
272 * In domU, the ISA region is normal, usable memory, but we
273 * reserve ISA memory anyway because too many things poke
276 * In Dom0, the host E820 information can leave gaps in the
277 * ISA range, which would cause us to release those pages. To
278 * avoid this, we unconditionally reserve them here.
280 e820_add_region(ISA_START_ADDRESS
, ISA_END_ADDRESS
- ISA_START_ADDRESS
,
287 * See comment above "struct start_info" in <xen/interface/xen.h>
289 memblock_x86_reserve_range(__pa(xen_start_info
->mfn_list
),
290 __pa(xen_start_info
->pt_base
),
293 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
295 extra_pages
+= xen_return_unused_memory(xen_start_info
->nr_pages
, &e820
);
298 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
299 * factor the base size. On non-highmem systems, the base
300 * size is the full initial memory allocation; on highmem it
301 * is limited to the max size of lowmem, so that it doesn't
302 * get completely filled.
304 * In principle there could be a problem in lowmem systems if
305 * the initial memory is also very large with respect to
306 * lowmem, but we won't try to deal with that here.
308 extra_limit
= min(EXTRA_MEM_RATIO
* min(max_pfn
, PFN_DOWN(MAXMEM
)),
309 max_pfn
+ extra_pages
);
311 if (extra_limit
>= max_pfn
)
312 extra_pages
= extra_limit
- max_pfn
;
316 xen_add_extra_mem(extra_pages
);
319 * Set P2M for all non-RAM pages and E820 gaps to be identity
320 * type PFNs. We supply it with the non-sanitized version
323 identity_pages
= xen_set_identity(map_raw
, memmap
.nr_entries
);
324 printk(KERN_INFO
"Set %ld page(s) to 1-1 mapping.\n", identity_pages
);
329 * Set the bit indicating "nosegneg" library variants should be used.
330 * We only need to bother in pure 32-bit mode; compat 32-bit processes
331 * can have un-truncated segments, so wrapping around is allowed.
333 static void __init
fiddle_vdso(void)
337 mask
= VDSO32_SYMBOL(&vdso32_int80_start
, NOTE_MASK
);
338 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
339 mask
= VDSO32_SYMBOL(&vdso32_sysenter_start
, NOTE_MASK
);
340 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
344 static int __cpuinit
register_callback(unsigned type
, const void *func
)
346 struct callback_register callback
= {
348 .address
= XEN_CALLBACK(__KERNEL_CS
, func
),
349 .flags
= CALLBACKF_mask_events
,
352 return HYPERVISOR_callback_op(CALLBACKOP_register
, &callback
);
355 void __cpuinit
xen_enable_sysenter(void)
358 unsigned sysenter_feature
;
361 sysenter_feature
= X86_FEATURE_SEP
;
363 sysenter_feature
= X86_FEATURE_SYSENTER32
;
366 if (!boot_cpu_has(sysenter_feature
))
369 ret
= register_callback(CALLBACKTYPE_sysenter
, xen_sysenter_target
);
371 setup_clear_cpu_cap(sysenter_feature
);
374 void __cpuinit
xen_enable_syscall(void)
379 ret
= register_callback(CALLBACKTYPE_syscall
, xen_syscall_target
);
381 printk(KERN_ERR
"Failed to set syscall callback: %d\n", ret
);
382 /* Pretty fatal; 64-bit userspace has no other
383 mechanism for syscalls. */
386 if (boot_cpu_has(X86_FEATURE_SYSCALL32
)) {
387 ret
= register_callback(CALLBACKTYPE_syscall32
,
388 xen_syscall32_target
);
390 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32
);
392 #endif /* CONFIG_X86_64 */
395 void __init
xen_arch_setup(void)
397 xen_panic_handler_init();
399 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_4gb_segments
);
400 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_writable_pagetables
);
402 if (!xen_feature(XENFEAT_auto_translated_physmap
))
403 HYPERVISOR_vm_assist(VMASST_CMD_enable
,
404 VMASST_TYPE_pae_extended_cr3
);
406 if (register_callback(CALLBACKTYPE_event
, xen_hypervisor_callback
) ||
407 register_callback(CALLBACKTYPE_failsafe
, xen_failsafe_callback
))
410 xen_enable_sysenter();
411 xen_enable_syscall();
414 if (!(xen_start_info
->flags
& SIF_INITDOMAIN
)) {
415 printk(KERN_INFO
"ACPI in unprivileged domain disabled\n");
420 memcpy(boot_command_line
, xen_start_info
->cmd_line
,
421 MAX_GUEST_CMDLINE
> COMMAND_LINE_SIZE
?
422 COMMAND_LINE_SIZE
: MAX_GUEST_CMDLINE
);
424 /* Set up idle, making sure it calls safe_halt() pvop */
426 boot_cpu_data
.hlt_works_ok
= 1;
429 boot_option_idle_override
= IDLE_HALT
;