2 #include <xen/events.h>
3 #include <xen/grant_table.h>
5 #include <xen/interface/vcpu.h>
6 #include <xen/interface/xen.h>
7 #include <xen/interface/memory.h>
8 #include <xen/interface/hvm/params.h>
9 #include <xen/features.h>
10 #include <xen/platform_pci.h>
11 #include <xen/xenbus.h>
13 #include <xen/interface/sched.h>
14 #include <xen/xen-ops.h>
15 #include <asm/xen/hypervisor.h>
16 #include <asm/xen/hypercall.h>
17 #include <asm/system_misc.h>
18 #include <linux/interrupt.h>
19 #include <linux/irqreturn.h>
20 #include <linux/module.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
27 struct start_info _xen_start_info
;
28 struct start_info
*xen_start_info
= &_xen_start_info
;
29 EXPORT_SYMBOL_GPL(xen_start_info
);
31 enum xen_domain_type xen_domain_type
= XEN_NATIVE
;
32 EXPORT_SYMBOL_GPL(xen_domain_type
);
34 struct shared_info xen_dummy_shared_info
;
35 struct shared_info
*HYPERVISOR_shared_info
= (void *)&xen_dummy_shared_info
;
37 DEFINE_PER_CPU(struct vcpu_info
*, xen_vcpu
);
38 static struct vcpu_info __percpu
*xen_vcpu_info
;
40 /* These are unused until we support booting "pre-ballooned" */
41 unsigned long xen_released_pages
;
42 struct xen_memory_region xen_extra_mem
[XEN_EXTRA_MEM_MAX_REGIONS
] __initdata
;
44 /* TODO: to be removed */
45 __read_mostly
int xen_have_vector_callback
;
46 EXPORT_SYMBOL_GPL(xen_have_vector_callback
);
48 int xen_platform_pci_unplug
= XEN_UNPLUG_ALL
;
49 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug
);
51 static __read_mostly
int xen_events_irq
= -1;
53 /* map fgmfn of domid to lpfn in the current domain */
54 static int map_foreign_page(unsigned long lpfn
, unsigned long fgmfn
,
58 struct xen_add_to_physmap_range xatp
= {
60 .foreign_domid
= domid
,
62 .space
= XENMAPSPACE_gmfn_foreign
,
64 xen_ulong_t idx
= fgmfn
;
65 xen_pfn_t gpfn
= lpfn
;
68 set_xen_guest_handle(xatp
.idxs
, &idx
);
69 set_xen_guest_handle(xatp
.gpfns
, &gpfn
);
70 set_xen_guest_handle(xatp
.errs
, &err
);
72 rc
= HYPERVISOR_memory_op(XENMEM_add_to_physmap_range
, &xatp
);
74 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
75 rc
, err
, lpfn
, fgmfn
);
82 xen_pfn_t fgmfn
; /* foreign domain's gmfn */
85 struct vm_area_struct
*vma
;
88 struct xen_remap_mfn_info
*info
;
91 static int remap_pte_fn(pte_t
*ptep
, pgtable_t token
, unsigned long addr
,
94 struct remap_data
*info
= data
;
95 struct page
*page
= info
->pages
[info
->index
++];
96 unsigned long pfn
= page_to_pfn(page
);
97 pte_t pte
= pfn_pte(pfn
, info
->prot
);
99 if (map_foreign_page(pfn
, info
->fgmfn
, info
->domid
))
101 set_pte_at(info
->vma
->vm_mm
, addr
, ptep
, pte
);
106 int xen_remap_domain_mfn_range(struct vm_area_struct
*vma
,
108 xen_pfn_t mfn
, int nr
,
109 pgprot_t prot
, unsigned domid
,
113 struct remap_data data
;
115 /* TBD: Batching, current sole caller only does page at a time */
125 err
= apply_to_page_range(vma
->vm_mm
, addr
, nr
<< PAGE_SHIFT
,
126 remap_pte_fn
, &data
);
129 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range
);
131 int xen_unmap_domain_mfn_range(struct vm_area_struct
*vma
,
132 int nr
, struct page
**pages
)
136 for (i
= 0; i
< nr
; i
++) {
137 struct xen_remove_from_physmap xrp
;
138 unsigned long rc
, pfn
;
140 pfn
= page_to_pfn(pages
[i
]);
142 xrp
.domid
= DOMID_SELF
;
144 rc
= HYPERVISOR_memory_op(XENMEM_remove_from_physmap
, &xrp
);
146 pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
153 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range
);
155 static void __init
xen_percpu_init(void *unused
)
157 struct vcpu_register_vcpu_info info
;
158 struct vcpu_info
*vcpup
;
162 pr_info("Xen: initializing cpu%d\n", cpu
);
163 vcpup
= per_cpu_ptr(xen_vcpu_info
, cpu
);
165 info
.mfn
= __pa(vcpup
) >> PAGE_SHIFT
;
166 info
.offset
= offset_in_page(vcpup
);
168 err
= HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info
, cpu
, &info
);
170 per_cpu(xen_vcpu
, cpu
) = vcpup
;
172 enable_percpu_irq(xen_events_irq
, 0);
175 static void xen_restart(char str
, const char *cmd
)
177 struct sched_shutdown r
= { .reason
= SHUTDOWN_reboot
};
179 rc
= HYPERVISOR_sched_op(SCHEDOP_shutdown
, &r
);
184 static void xen_power_off(void)
186 struct sched_shutdown r
= { .reason
= SHUTDOWN_poweroff
};
188 rc
= HYPERVISOR_sched_op(SCHEDOP_shutdown
, &r
);
194 * see Documentation/devicetree/bindings/arm/xen.txt for the
195 * documentation of the Xen Device Tree format.
197 #define GRANT_TABLE_PHYSADDR 0
198 static int __init
xen_guest_init(void)
200 struct xen_add_to_physmap xatp
;
201 static struct shared_info
*shared_info_page
= 0;
202 struct device_node
*node
;
204 const char *s
= NULL
;
205 const char *version
= NULL
;
206 const char *xen_prefix
= "xen,xen-";
209 node
= of_find_compatible_node(NULL
, NULL
, "xen,xen");
211 pr_debug("No Xen support\n");
214 s
= of_get_property(node
, "compatible", &len
);
215 if (strlen(xen_prefix
) + 3 < len
&&
216 !strncmp(xen_prefix
, s
, strlen(xen_prefix
)))
217 version
= s
+ strlen(xen_prefix
);
218 if (version
== NULL
) {
219 pr_debug("Xen version not found\n");
222 if (of_address_to_resource(node
, GRANT_TABLE_PHYSADDR
, &res
))
224 xen_hvm_resume_frames
= res
.start
>> PAGE_SHIFT
;
225 xen_events_irq
= irq_of_parse_and_map(node
, 0);
226 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
227 version
, xen_events_irq
, xen_hvm_resume_frames
);
228 xen_domain_type
= XEN_HVM_DOMAIN
;
230 xen_setup_features();
231 if (xen_feature(XENFEAT_dom0
))
232 xen_start_info
->flags
|= SIF_INITDOMAIN
|SIF_PRIVILEGED
;
234 xen_start_info
->flags
&= ~(SIF_INITDOMAIN
|SIF_PRIVILEGED
);
236 if (!shared_info_page
)
237 shared_info_page
= (struct shared_info
*)
238 get_zeroed_page(GFP_KERNEL
);
239 if (!shared_info_page
) {
240 pr_err("not enough memory\n");
243 xatp
.domid
= DOMID_SELF
;
245 xatp
.space
= XENMAPSPACE_shared_info
;
246 xatp
.gpfn
= __pa(shared_info_page
) >> PAGE_SHIFT
;
247 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap
, &xatp
))
250 HYPERVISOR_shared_info
= (struct shared_info
*)shared_info_page
;
252 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
253 * page, we use it in the event channel upcall and in some pvclock
255 * The shared info contains exactly 1 CPU (the boot CPU). The guest
256 * is required to use VCPUOP_register_vcpu_info to place vcpu info
257 * for secondary CPUs as they are brought up.
258 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
260 xen_vcpu_info
= __alloc_percpu(sizeof(struct vcpu_info
),
261 sizeof(struct vcpu_info
));
262 if (xen_vcpu_info
== NULL
)
266 if (!xen_initial_domain())
271 core_initcall(xen_guest_init
);
273 static int __init
xen_pm_init(void)
275 pm_power_off
= xen_power_off
;
276 arm_pm_restart
= xen_restart
;
280 subsys_initcall(xen_pm_init
);
282 static irqreturn_t
xen_arm_callback(int irq
, void *arg
)
284 xen_hvm_evtchn_do_upcall();
288 static int __init
xen_init_events(void)
290 if (!xen_domain() || xen_events_irq
< 0)
295 if (request_percpu_irq(xen_events_irq
, xen_arm_callback
,
296 "events", &xen_vcpu
)) {
297 pr_err("Error requesting IRQ %d\n", xen_events_irq
);
301 on_each_cpu(xen_percpu_init
, NULL
, 0);
305 postcore_initcall(xen_init_events
);
307 /* In the hypervisor.S file. */
308 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op
);
309 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op
);
310 EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version
);
311 EXPORT_SYMBOL_GPL(HYPERVISOR_console_io
);
312 EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op
);
313 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op
);
314 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op
);
315 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op
);
316 EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op
);
317 EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op
);
318 EXPORT_SYMBOL_GPL(privcmd_call
);