1 /* $NetBSD: hypervisor_machdep.c,v 1.12 2009/07/29 12:02:08 cegger Exp $ */
5 * Copyright (c) 2004 Christian Limpach.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /******************************************************************************
32 * Communication to/from hypervisor.
34 * Copyright (c) 2002-2004, K A Fraser
36 * Permission is hereby granted, free of charge, to any person obtaining a copy
37 * of this software and associated documentation files (the "Software"), to
38 * deal in the Software without restriction, including without limitation the
39 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
40 * sell copies of the Software, and to permit persons to whom the Software is
41 * furnished to do so, subject to the following conditions:
43 * The above copyright notice and this permission notice shall be included in
44 * all copies or substantial portions of the Software.
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
49 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
51 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
52 * DEALINGS IN THE SOFTWARE.
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.12 2009/07/29 12:02:08 cegger Exp $");
59 #include <sys/param.h>
60 #include <sys/systm.h>
63 #include <uvm/uvm_extern.h>
65 #include <machine/vmparam.h>
66 #include <machine/pmap.h>
69 #include <xen/hypervisor.h>
70 #include <xen/evtchn.h>
71 #include <xen/xenpmap.h>
76 * arch-dependent p2m frame lists list (L3 and L2)
77 * used by Xen for save/restore mappings
79 static unsigned long * l3_p2m_page
;
80 static unsigned long * l2_p2m_page
;
81 static int l2_p2m_page_size
; /* size of L2 page, in pages */
83 static void build_p2m_frame_list_list(void);
84 static void update_p2m_frame_list_list(void);
86 // #define PORT_DEBUG 4
87 // #define EARLY_DEBUG_EVENT
95 unsigned int l1i
, l2i
, port
;
96 volatile shared_info_t
*s
= HYPERVISOR_shared_info
;
98 volatile struct vcpu_info
*vci
;
106 if (HYPERVISOR_shared_info
->events
)
107 printf("stipending events %08lx mask %08lx ilevel %d\n",
108 HYPERVISOR_shared_info
->events
,
109 HYPERVISOR_shared_info
->events_mask
, ci
->ci_ilevel
);
112 #ifdef EARLY_DEBUG_EVENT
113 if (xen_atomic_test_bit(&s
->evtchn_pending
[0], debug_port
)) {
114 xen_debug_handler(NULL
);
115 xen_atomic_clear_bit(&s
->evtchn_pending
[0], debug_port
);
120 * we're only called after STIC, so we know that we'll have to
123 while (vci
->evtchn_upcall_pending
) {
125 vci
->evtchn_upcall_pending
= 0;
126 /* NB. No need for a barrier here -- XCHG is a barrier
128 l1
= xen_atomic_xchg(&vci
->evtchn_pending_sel
, 0);
129 while ((l1i
= xen_ffs(l1
)) != 0) {
133 l2
= s
->evtchn_pending
[l1i
] & ~s
->evtchn_mask
[l1i
];
135 * mask and clear event. More efficient than calling
136 * hypervisor_mask/clear_event for each event.
138 xen_atomic_setbits_l(&s
->evtchn_mask
[l1i
], l2
);
139 xen_atomic_clearbits_l(&s
->evtchn_pending
[l1i
], l2
);
140 while ((l2i
= xen_ffs(l2
)) != 0) {
144 port
= (l1i
<< LONG_SHIFT
) + l2i
;
145 if (evtsource
[port
]) {
146 hypervisor_set_ipending(
147 evtsource
[port
]->ev_imask
,
149 evtsource
[port
]->ev_evcnt
.ev_count
++;
150 if (ret
== 0 && ci
->ci_ilevel
<
151 evtsource
[port
]->ev_maxlevel
)
156 /* set pending event */
157 xenevt_setipending(l1i
, l2i
);
166 if (ci
->ci_ipending
& 0x1)
167 printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n",
168 HYPERVISOR_shared_info
->events
,
169 HYPERVISOR_shared_info
->events_mask
, ci
->ci_ilevel
,
177 do_hypervisor_callback(struct intrframe
*regs
)
181 unsigned int l1i
, l2i
, port
;
182 volatile shared_info_t
*s
= HYPERVISOR_shared_info
;
184 volatile struct vcpu_info
*vci
;
189 level
= ci
->ci_ilevel
;
191 // DDD printf("do_hypervisor_callback\n");
193 #ifdef EARLY_DEBUG_EVENT
194 if (xen_atomic_test_bit(&s
->evtchn_pending
[0], debug_port
)) {
195 xen_debug_handler(NULL
);
196 xen_atomic_clear_bit(&s
->evtchn_pending
[0], debug_port
);
200 while (vci
->evtchn_upcall_pending
) {
201 vci
->evtchn_upcall_pending
= 0;
202 /* NB. No need for a barrier here -- XCHG is a barrier
204 l1
= xen_atomic_xchg(&vci
->evtchn_pending_sel
, 0);
205 while ((l1i
= xen_ffs(l1
)) != 0) {
209 l2
= s
->evtchn_pending
[l1i
] & ~s
->evtchn_mask
[l1i
];
211 * mask and clear the pending events.
212 * Doing it here for all event that will be processed
213 * avoids a race with stipending (which can be called
214 * though evtchn_do_event->splx) that could cause an
215 * event to be both processed and marked pending.
217 xen_atomic_setbits_l(&s
->evtchn_mask
[l1i
], l2
);
218 xen_atomic_clearbits_l(&s
->evtchn_pending
[l1i
], l2
);
220 while ((l2i
= xen_ffs(l2
)) != 0) {
224 port
= (l1i
<< LONG_SHIFT
) + l2i
;
226 if (port
== PORT_DEBUG
)
227 printf("do_hypervisor_callback event %d\n", port
);
230 call_evtchn_do_event(port
, regs
);
233 if (ci
->ci_ilevel
< IPL_HIGH
) {
235 int oipl
= ci
->ci_ilevel
;
236 ci
->ci_ilevel
= IPL_HIGH
;
237 call_xenevt_event(port
);
238 ci
->ci_ilevel
= oipl
;
240 /* set pending event */
241 xenevt_setipending(l1i
, l2i
);
250 if (level
!= ci
->ci_ilevel
)
251 printf("hypervisor done %08x level %d/%d ipending %08x\n",
252 (uint
)vci
->evtchn_pending_sel
,
253 level
, ci
->ci_ilevel
, ci
->ci_ipending
);
258 hypervisor_unmask_event(unsigned int ev
)
260 volatile shared_info_t
*s
= HYPERVISOR_shared_info
;
261 volatile struct vcpu_info
*vci
= curcpu()->ci_vcpu
;
264 if (ev
== PORT_DEBUG
)
265 printf("hypervisor_unmask_event %d\n", ev
);
268 xen_atomic_clear_bit(&s
->evtchn_mask
[0], ev
);
270 * The following is basically the equivalent of
271 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the
272 * interrupt edge' if the channel is masked.
274 if (xen_atomic_test_bit(&s
->evtchn_pending
[0], ev
) &&
275 !xen_atomic_test_and_set_bit(&vci
->evtchn_pending_sel
, ev
>>LONG_SHIFT
)) {
276 xen_atomic_set_bit(&vci
->evtchn_upcall_pending
, 0);
277 if (!vci
->evtchn_upcall_mask
)
278 hypervisor_force_callback();
283 hypervisor_mask_event(unsigned int ev
)
285 volatile shared_info_t
*s
= HYPERVISOR_shared_info
;
287 if (ev
== PORT_DEBUG
)
288 printf("hypervisor_mask_event %d\n", ev
);
291 xen_atomic_set_bit(&s
->evtchn_mask
[0], ev
);
295 hypervisor_clear_event(unsigned int ev
)
297 volatile shared_info_t
*s
= HYPERVISOR_shared_info
;
299 if (ev
== PORT_DEBUG
)
300 printf("hypervisor_clear_event %d\n", ev
);
303 xen_atomic_clear_bit(&s
->evtchn_pending
[0], ev
);
307 hypervisor_enable_ipl(unsigned int ipl
)
311 struct cpu_info
*ci
= curcpu();
314 * enable all events for ipl. As we only set an event in ipl_evt_mask
315 * for its lowest IPL, and pending IPLs are processed high to low,
316 * we know that all callback for this event have been processed.
319 l1
= ci
->ci_isources
[ipl
]->ipl_evt_mask1
;
320 ci
->ci_isources
[ipl
]->ipl_evt_mask1
= 0;
321 while ((l1i
= xen_ffs(l1
)) != 0) {
324 l2
= ci
->ci_isources
[ipl
]->ipl_evt_mask2
[l1i
];
325 ci
->ci_isources
[ipl
]->ipl_evt_mask2
[l1i
] = 0;
326 while ((l2i
= xen_ffs(l2
)) != 0) {
332 evtch
= (l1i
<< LONG_SHIFT
) + l2i
;
333 hypervisor_enable_event(evtch
);
339 hypervisor_set_ipending(uint32_t iplmask
, int l1
, int l2
)
342 struct cpu_info
*ci
= curcpu();
344 /* set pending bit for the appropriate IPLs */
345 ci
->ci_ipending
|= iplmask
;
348 * And set event pending bit for the lowest IPL. As IPL are handled
349 * from high to low, this ensure that all callbacks will have been
350 * called when we ack the event
355 ci
->ci_isources
[ipl
]->ipl_evt_mask1
|= 1UL << l1
;
356 ci
->ci_isources
[ipl
]->ipl_evt_mask2
[l1
] |= 1UL << l2
;
360 hypervisor_machdep_attach(void)
362 /* dom0 does not require the arch-dependent P2M translation table */
363 if ( !xendomain_is_dom0() ) {
364 build_p2m_frame_list_list();
369 * Generate the p2m_frame_list_list table,
370 * needed for guest save/restore
373 build_p2m_frame_list_list(void)
375 int fpp
; /* number of page (frame) pointer per page */
376 unsigned long max_pfn
;
378 * The p2m list is composed of three levels of indirection,
379 * each layer containing MFNs pointing to lower level pages
380 * The indirection is used to convert a given PFN to its MFN
381 * Each N level page can point to @fpp (N-1) level pages
382 * For example, for x86 32bit, we have:
383 * - PAGE_SIZE: 4096 bytes
384 * - fpp: 1024 (one L3 page can address 1024 L2 pages)
385 * A L1 page contains the list of MFN we are looking for
387 max_pfn
= xen_start_info
.nr_pages
;
388 fpp
= PAGE_SIZE
/ sizeof(paddr_t
);
390 /* we only need one L3 page */
391 l3_p2m_page
= kmem_alloc(PAGE_SIZE
, KM_NOSLEEP
);
392 if (l3_p2m_page
== NULL
)
393 panic("could not allocate memory for l3_p2m_page");
396 * Determine how many L2 pages we need for the mapping
397 * Each L2 can map a total of @fpp L1 pages
399 l2_p2m_page_size
= howmany(max_pfn
, fpp
);
401 l2_p2m_page
= kmem_alloc(l2_p2m_page_size
* PAGE_SIZE
, KM_NOSLEEP
);
402 if (l2_p2m_page
== NULL
)
403 panic("could not allocate memory for l2_p2m_page");
405 /* We now have L3 and L2 pages ready, update L1 mapping */
406 update_p2m_frame_list_list();
411 * Update the L1 p2m_frame_list_list mapping (during guest boot or resume)
414 update_p2m_frame_list_list(void)
417 int fpp
; /* number of page (frame) pointer per page */
418 unsigned long max_pfn
;
420 max_pfn
= xen_start_info
.nr_pages
;
421 fpp
= PAGE_SIZE
/ sizeof(paddr_t
);
423 for (i
= 0; i
< l2_p2m_page_size
; i
++) {
425 * Each time we start a new L2 page,
426 * store its MFN in the L3 page
428 if ((i
% fpp
) == 0) {
429 l3_p2m_page
[i
/fpp
] = vtomfn(
430 (vaddr_t
)&l2_p2m_page
[i
]);
434 * since @xpmap_phys_to_machine_mapping array
435 * already contains PFN to MFN mapping, we just
436 * set the l2_p2m_page MFN pointer to the MFN of the
437 * according frame of @xpmap_phys_to_machine_mapping
439 l2_p2m_page
[i
] = vtomfn((vaddr_t
)
440 &xpmap_phys_to_machine_mapping
[i
*fpp
]);
443 HYPERVISOR_shared_info
->arch
.pfn_to_mfn_frame_list_list
=
444 vtomfn((vaddr_t
)l3_p2m_page
);
445 HYPERVISOR_shared_info
->arch
.max_pfn
= max_pfn
;