2 #include <xen/events.h>
3 #include <xen/grant_table.h>
5 #include <xen/interface/vcpu.h>
6 #include <xen/interface/xen.h>
7 #include <xen/interface/memory.h>
8 #include <xen/interface/hvm/params.h>
9 #include <xen/features.h>
10 #include <xen/platform_pci.h>
11 #include <xen/xenbus.h>
13 #include <xen/interface/sched.h>
14 #include <xen/xen-ops.h>
15 #include <asm/xen/hypervisor.h>
16 #include <asm/xen/hypercall.h>
17 #include <asm/system_misc.h>
18 #include <linux/interrupt.h>
19 #include <linux/irqreturn.h>
20 #include <linux/module.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/cpuidle.h>
25 #include <linux/cpufreq.h>
29 struct start_info _xen_start_info
;
30 struct start_info
*xen_start_info
= &_xen_start_info
;
31 EXPORT_SYMBOL_GPL(xen_start_info
);
33 enum xen_domain_type xen_domain_type
= XEN_NATIVE
;
34 EXPORT_SYMBOL_GPL(xen_domain_type
);
36 struct shared_info xen_dummy_shared_info
;
37 struct shared_info
*HYPERVISOR_shared_info
= (void *)&xen_dummy_shared_info
;
39 DEFINE_PER_CPU(struct vcpu_info
*, xen_vcpu
);
40 static struct vcpu_info __percpu
*xen_vcpu_info
;
42 /* These are unused until we support booting "pre-ballooned" */
43 unsigned long xen_released_pages
;
44 struct xen_memory_region xen_extra_mem
[XEN_EXTRA_MEM_MAX_REGIONS
] __initdata
;
46 /* TODO: to be removed */
47 __read_mostly
int xen_have_vector_callback
;
48 EXPORT_SYMBOL_GPL(xen_have_vector_callback
);
50 int xen_platform_pci_unplug
= XEN_UNPLUG_ALL
;
51 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug
);
53 static __read_mostly
int xen_events_irq
= -1;
55 /* map fgmfn of domid to lpfn in the current domain */
56 static int map_foreign_page(unsigned long lpfn
, unsigned long fgmfn
,
60 struct xen_add_to_physmap_range xatp
= {
62 .foreign_domid
= domid
,
64 .space
= XENMAPSPACE_gmfn_foreign
,
66 xen_ulong_t idx
= fgmfn
;
67 xen_pfn_t gpfn
= lpfn
;
70 set_xen_guest_handle(xatp
.idxs
, &idx
);
71 set_xen_guest_handle(xatp
.gpfns
, &gpfn
);
72 set_xen_guest_handle(xatp
.errs
, &err
);
74 rc
= HYPERVISOR_memory_op(XENMEM_add_to_physmap_range
, &xatp
);
76 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
77 rc
, err
, lpfn
, fgmfn
);
84 xen_pfn_t fgmfn
; /* foreign domain's gmfn */
87 struct vm_area_struct
*vma
;
90 struct xen_remap_mfn_info
*info
;
93 static int remap_pte_fn(pte_t
*ptep
, pgtable_t token
, unsigned long addr
,
96 struct remap_data
*info
= data
;
97 struct page
*page
= info
->pages
[info
->index
++];
98 unsigned long pfn
= page_to_pfn(page
);
99 pte_t pte
= pfn_pte(pfn
, info
->prot
);
101 if (map_foreign_page(pfn
, info
->fgmfn
, info
->domid
))
103 set_pte_at(info
->vma
->vm_mm
, addr
, ptep
, pte
);
108 int xen_remap_domain_mfn_range(struct vm_area_struct
*vma
,
110 xen_pfn_t mfn
, int nr
,
111 pgprot_t prot
, unsigned domid
,
115 struct remap_data data
;
117 /* TBD: Batching, current sole caller only does page at a time */
127 err
= apply_to_page_range(vma
->vm_mm
, addr
, nr
<< PAGE_SHIFT
,
128 remap_pte_fn
, &data
);
131 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range
);
133 int xen_unmap_domain_mfn_range(struct vm_area_struct
*vma
,
134 int nr
, struct page
**pages
)
138 for (i
= 0; i
< nr
; i
++) {
139 struct xen_remove_from_physmap xrp
;
140 unsigned long rc
, pfn
;
142 pfn
= page_to_pfn(pages
[i
]);
144 xrp
.domid
= DOMID_SELF
;
146 rc
= HYPERVISOR_memory_op(XENMEM_remove_from_physmap
, &xrp
);
148 pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
155 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range
);
157 static void __init
xen_percpu_init(void *unused
)
159 struct vcpu_register_vcpu_info info
;
160 struct vcpu_info
*vcpup
;
164 pr_info("Xen: initializing cpu%d\n", cpu
);
165 vcpup
= per_cpu_ptr(xen_vcpu_info
, cpu
);
167 info
.mfn
= __pa(vcpup
) >> PAGE_SHIFT
;
168 info
.offset
= offset_in_page(vcpup
);
170 err
= HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info
, cpu
, &info
);
172 per_cpu(xen_vcpu
, cpu
) = vcpup
;
174 enable_percpu_irq(xen_events_irq
, 0);
178 static void xen_restart(enum reboot_mode reboot_mode
, const char *cmd
)
180 struct sched_shutdown r
= { .reason
= SHUTDOWN_reboot
};
182 rc
= HYPERVISOR_sched_op(SCHEDOP_shutdown
, &r
);
187 static void xen_power_off(void)
189 struct sched_shutdown r
= { .reason
= SHUTDOWN_poweroff
};
191 rc
= HYPERVISOR_sched_op(SCHEDOP_shutdown
, &r
);
197 * see Documentation/devicetree/bindings/arm/xen.txt for the
198 * documentation of the Xen Device Tree format.
200 #define GRANT_TABLE_PHYSADDR 0
201 static int __init
xen_guest_init(void)
203 struct xen_add_to_physmap xatp
;
204 static struct shared_info
*shared_info_page
= 0;
205 struct device_node
*node
;
207 const char *s
= NULL
;
208 const char *version
= NULL
;
209 const char *xen_prefix
= "xen,xen-";
212 node
= of_find_compatible_node(NULL
, NULL
, "xen,xen");
214 pr_debug("No Xen support\n");
217 s
= of_get_property(node
, "compatible", &len
);
218 if (strlen(xen_prefix
) + 3 < len
&&
219 !strncmp(xen_prefix
, s
, strlen(xen_prefix
)))
220 version
= s
+ strlen(xen_prefix
);
221 if (version
== NULL
) {
222 pr_debug("Xen version not found\n");
225 if (of_address_to_resource(node
, GRANT_TABLE_PHYSADDR
, &res
))
227 xen_hvm_resume_frames
= res
.start
>> PAGE_SHIFT
;
228 xen_events_irq
= irq_of_parse_and_map(node
, 0);
229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
230 version
, xen_events_irq
, xen_hvm_resume_frames
);
231 xen_domain_type
= XEN_HVM_DOMAIN
;
233 xen_setup_features();
234 if (xen_feature(XENFEAT_dom0
))
235 xen_start_info
->flags
|= SIF_INITDOMAIN
|SIF_PRIVILEGED
;
237 xen_start_info
->flags
&= ~(SIF_INITDOMAIN
|SIF_PRIVILEGED
);
239 if (!shared_info_page
)
240 shared_info_page
= (struct shared_info
*)
241 get_zeroed_page(GFP_KERNEL
);
242 if (!shared_info_page
) {
243 pr_err("not enough memory\n");
246 xatp
.domid
= DOMID_SELF
;
248 xatp
.space
= XENMAPSPACE_shared_info
;
249 xatp
.gpfn
= __pa(shared_info_page
) >> PAGE_SHIFT
;
250 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap
, &xatp
))
253 HYPERVISOR_shared_info
= (struct shared_info
*)shared_info_page
;
255 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
256 * page, we use it in the event channel upcall and in some pvclock
258 * The shared info contains exactly 1 CPU (the boot CPU). The guest
259 * is required to use VCPUOP_register_vcpu_info to place vcpu info
260 * for secondary CPUs as they are brought up.
261 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
263 xen_vcpu_info
= __alloc_percpu(sizeof(struct vcpu_info
),
264 sizeof(struct vcpu_info
));
265 if (xen_vcpu_info
== NULL
)
269 if (!xen_initial_domain())
273 * Making sure board specific code will not set up ops for
274 * cpu idle and cpu freq.
281 core_initcall(xen_guest_init
);
283 static int __init
xen_pm_init(void)
288 pm_power_off
= xen_power_off
;
289 arm_pm_restart
= xen_restart
;
293 late_initcall(xen_pm_init
);
295 static irqreturn_t
xen_arm_callback(int irq
, void *arg
)
297 xen_hvm_evtchn_do_upcall();
301 static int __init
xen_init_events(void)
303 if (!xen_domain() || xen_events_irq
< 0)
308 if (request_percpu_irq(xen_events_irq
, xen_arm_callback
,
309 "events", &xen_vcpu
)) {
310 pr_err("Error requesting IRQ %d\n", xen_events_irq
);
314 on_each_cpu(xen_percpu_init
, NULL
, 0);
318 postcore_initcall(xen_init_events
);
320 /* In the hypervisor.S file. */
321 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op
);
322 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op
);
323 EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version
);
324 EXPORT_SYMBOL_GPL(HYPERVISOR_console_io
);
325 EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op
);
326 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op
);
327 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op
);
328 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op
);
329 EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op
);
330 EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op
);
331 EXPORT_SYMBOL_GPL(privcmd_call
);