4 * Copyright (C) 2004-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* common code for the kernel and monitor. */
22 #define KER_ONLY(x...)
23 #define MON_MP_PTR(s, x) (x)
24 #define KER_MP_PTR(s, x) (x)
25 #define MON_RP_PTR(s, x) (x)
26 #define KER_RP_PTR(s, x) (x)
28 #define KER_ONLY(x...) x
29 #define MON_MP_PTR(s, x) ((struct mapped_page *)((uint8_t *)(x) + (s)->monitor_to_kernel_offset))
30 #define KER_MP_PTR(s, x) ((struct mapped_page *)((uint8_t *)(x) - (s)->monitor_to_kernel_offset))
31 #define MON_RP_PTR(s, x) ((struct kqemu_ram_page *)((uint8_t *)(x) + (s)->monitor_to_kernel_offset))
32 #define KER_RP_PTR(s, x) ((struct kqemu_ram_page *)((uint8_t *)(x) - (s)->monitor_to_kernel_offset))
35 /* actualize the segment cache in cpu state from the real segment cpu
36 cache (we use the LDT and GDT descriptors) */
37 static inline void reload_seg_cache(struct kqemu_state
*s
, int seg_reg
,
40 struct kqemu_segment_cache
*sc
;
45 if (s
->cpu_state
.cpl
!= 3) {
49 /* XXX: set DPL correctly */
50 if (sel1
== s
->regs1
.cs_sel
|| sel1
== s
->regs1
.ss_sel
) {
51 sel
= (selector
& ~7) | ((selector
& 4) << 14);
52 ptr
= (uint8_t *)s
->dt_table
+ sel
;
53 e1
= *(uint32_t *)(ptr
);
54 e2
= *(uint32_t *)(ptr
+ 4);
56 e1
= s
->seg_desc_cache
[seg_reg
][0];
57 e2
= s
->seg_desc_cache
[seg_reg
][1];
66 /* the CPL=3 DT table is not modified */
67 sel
= (selector
& ~7) | ((selector
& 4) << 14);
68 ptr
= (uint8_t *)s
->dt_table
+ ((NB_DT_TABLES
- 1) << 17) + sel
;
69 e1
= *(uint32_t *)(ptr
);
70 e2
= *(uint32_t *)(ptr
+ 4);
72 sc
= &s
->cpu_state
.segs
[seg_reg
];
73 if (seg_reg
== R_CS
|| seg_reg
== R_SS
)
74 selector
= (selector
& ~3) | s
->cpu_state
.cpl
;
75 sc
->selector
= selector
;
78 if (seg_reg
>= R_FS
) {
79 /* do nothing: the base is always loaded before with the
80 FSBASE and GSBASE MSRs */
83 sc
->base
= get_seg_base(e1
, e2
);
84 sc
->limit
= get_seg_limit(e1
, e2
);
87 void restore_cpu_state_from_regs(struct kqemu_state
*s
,
88 struct kqemu_exception_regs
*r
)
90 struct kqemu_cpu_state
*env
= &s
->cpu_state
;
92 env
->regs
[R_EAX
] = r
->eax
;
93 env
->regs
[R_ECX
] = r
->ecx
;
94 env
->regs
[R_EDX
] = r
->edx
;
95 env
->regs
[R_EBX
] = r
->ebx
;
96 env
->regs
[R_ESP
] = r
->esp
;
97 env
->regs
[R_EBP
] = r
->ebp
;
98 env
->regs
[R_ESI
] = r
->esi
;
99 env
->regs
[R_EDI
] = r
->edi
;
100 env
->regs
[8] = r
->r8
;
101 env
->regs
[9] = r
->r9
;
102 env
->regs
[10] = r
->r10
;
103 env
->regs
[11] = r
->r11
;
104 env
->regs
[12] = r
->r12
;
105 env
->regs
[13] = r
->r13
;
106 env
->regs
[14] = r
->r14
;
107 env
->regs
[15] = r
->r15
;
109 env
->eflags
= (s
->comm_page
.virt_eflags
& EFLAGS_MASK
) | (r
->eflags
& ~EFLAGS_MASK
);
111 reload_seg_cache(s
, R_CS
, r
->cs_sel
);
112 reload_seg_cache(s
, R_SS
, r
->ss_sel
);
113 reload_seg_cache(s
, R_DS
, env
->segs
[R_DS
].selector
);
114 reload_seg_cache(s
, R_ES
, env
->segs
[R_ES
].selector
);
116 env
->regs
[R_EAX
] = r
->eax
;
117 env
->regs
[R_ECX
] = r
->ecx
;
118 env
->regs
[R_EDX
] = r
->edx
;
119 env
->regs
[R_EBX
] = r
->ebx
;
120 env
->regs
[R_ESP
] = r
->esp
;
121 env
->regs
[R_EBP
] = r
->ebp
;
122 env
->regs
[R_ESI
] = r
->esi
;
123 env
->regs
[R_EDI
] = r
->edi
;
125 env
->eflags
= (s
->comm_page
.virt_eflags
& EFLAGS_MASK
) | (r
->eflags
& ~EFLAGS_MASK
);
127 reload_seg_cache(s
, R_CS
, r
->cs_sel
);
128 reload_seg_cache(s
, R_SS
, r
->ss_sel
);
129 reload_seg_cache(s
, R_DS
, r
->ds_sel
);
130 reload_seg_cache(s
, R_ES
, r
->es_sel
);
132 reload_seg_cache(s
, R_FS
, env
->segs
[R_FS
].selector
);
133 reload_seg_cache(s
, R_GS
, env
->segs
[R_GS
].selector
);
137 /* return a new virtual address suitable to map a page in it */
138 static void free_vaddr(struct kqemu_state
*s
, unsigned long vaddr
)
140 unsigned long page_index
;
141 page_index
= (vaddr
- s
->monitor_vaddr
) >> PAGE_SHIFT
;
142 if (page_index
>= MAX_MAPPED_PAGES
)
144 s
->mapped_pages
[page_index
].next
= s
->first_mapped_page
;
145 s
->first_mapped_page
= page_index
;
149 /* return -1 if no virtual address available */
150 static unsigned long get_vaddr(struct kqemu_state
*s
)
153 page_index
= s
->first_mapped_page
;
154 if (page_index
== -1)
156 s
->first_mapped_page
= s
->mapped_pages
[page_index
].next
;
157 return s
->monitor_vaddr
+ ((unsigned long)page_index
<< PAGE_SHIFT
);
160 static inline unsigned int page_index_hash_func(unsigned long page_index
)
162 return (page_index
^ (page_index
>> MAPPED_PAGES_HASH_BITS
)) &
163 (MAPPED_PAGES_HASH_SIZE
- 1);
166 static inline void *page_index_to_virt(struct kqemu_state
*s
,
167 unsigned long page_index
)
169 struct mapped_page
*p
;
170 p
= s
->mapped_pages_hash
[page_index_hash_func(page_index
)];
174 p
= MON_MP_PTR(s
, p
);
175 if (p
->page_index
== page_index
)
176 return (void *)(((p
- s
->mapped_pages
) << PAGE_SHIFT
) +
182 static inline void *page_index_to_kaddr(struct kqemu_state
*s
,
183 unsigned long page_index
)
185 struct mapped_page
*p
;
186 p
= s
->mapped_pages_hash
[page_index_hash_func(page_index
)];
190 p
= MON_MP_PTR(s
, p
);
191 if (p
->page_index
== page_index
)
192 return kqemu_page_kaddr(p
->host_page
);
197 static inline void set_vaddr_page_index(struct kqemu_state
*s
,
199 unsigned long page_index
,
203 struct mapped_page
*p
, **ph
;
204 p
= &s
->mapped_pages
[(vaddr
- s
->monitor_vaddr
) >> PAGE_SHIFT
];
205 p
->page_index
= page_index
;
206 p
->host_page
= host_page
;
207 p
->user_page
= is_user
;
208 ph
= &s
->mapped_pages_hash
[page_index_hash_func(page_index
)];
210 *ph
= KER_MP_PTR(s
, p
);
217 #define page_index_to_addr(s, x) page_index_to_virt(s, x)
219 #define page_index_to_addr(s, x) page_index_to_kaddr(s, x)
224 /* alloc = 0 : do not allocate PTEs
225 1 : allocate up to PTE page
226 2 : allocate up to PDE page
228 /* PAE x86_64 case */
229 static inline uint64_t *mon_get_ptep_l3(struct kqemu_state
*s
,
230 int as_index
, unsigned long vaddr
,
231 int alloc
KER_ONLY(, unsigned long *pvptep
))
233 int pml4e_index
, pdpe_index
, pde_index
, pte_index
;
234 unsigned long pdp_page_index
, pde_page_index
, pte_page_index
;
235 uint64_t pml4e
, pdpe
, pde
;
236 uint64_t *pgd_page
, *pdp_page
, *pde_page
, *pte_page
;
239 pgd_page
= s
->pgds
[as_index
].l4
;
240 pml4e_index
= (vaddr
>> 39) & 0x1ff;
241 pml4e
= pgd_page
[pml4e_index
];
242 if (!(pml4e
& PG_PRESENT_MASK
)) {
245 /* allocate a new page */
246 ptr
= mon_alloc_page(s
, &pdp_page_index
);
249 pgd_page
[pml4e_index
] = ((uint64_t)pdp_page_index
<< PAGE_SHIFT
) |
250 PG_PRESENT_MASK
| PG_RW_MASK
| PG_USER_MASK
;
252 pdp_page_index
= pml4e
>> PAGE_SHIFT
;
254 pdp_page
= page_index_to_addr(s
, pdp_page_index
);
256 pdpe_index
= (vaddr
>> 30) & 0x1ff;
257 pdpe
= pdp_page
[pdpe_index
];
258 if (!(pdpe
& PG_PRESENT_MASK
)) {
261 ptr
= mon_alloc_page(s
, &pde_page_index
);
264 pdp_page
[pdpe_index
] = ((uint64_t)pde_page_index
<< PAGE_SHIFT
) |
265 PG_PRESENT_MASK
| PG_RW_MASK
| PG_USER_MASK
;
267 pde_page_index
= pdpe
>> PAGE_SHIFT
;
269 pde_page
= page_index_to_addr(s
, pde_page_index
);
271 pde_index
= (vaddr
>> 21) & 0x1ff;
273 return pde_page
+ pde_index
;
274 pde
= pde_page
[pde_index
];
275 if (!(pde
& PG_PRESENT_MASK
)) {
278 ptr
= mon_alloc_page(s
, &pte_page_index
);
281 pde_page
[pde_index
] = ((uint64_t)pte_page_index
<< PAGE_SHIFT
) |
282 PG_PRESENT_MASK
| PG_RW_MASK
| PG_USER_MASK
;
284 pte_page_index
= pde
>> PAGE_SHIFT
;
286 pte_page
= page_index_to_addr(s
, pte_page_index
);
288 pte_index
= (vaddr
>> 12) & 0x1ff;
291 *pvptep
= (unsigned long)((uint64_t *)page_index_to_virt(s
, pte_page_index
) + pte_index
);
294 return pte_page
+ pte_index
;
297 /* just to avoid putting ifdefs */
298 static inline uint32_t *mon_get_ptep_l2(struct kqemu_state
*s
,
299 int as_index
, unsigned long vaddr
,
300 int alloc
KER_ONLY(, unsigned long *pvptep
))
307 static inline uint64_t *mon_get_ptep_l3(struct kqemu_state
*s
,
308 int as_index
, unsigned long vaddr
,
309 int alloc
KER_ONLY(, unsigned long *pvptep
))
311 int pdpe_index
, pde_index
, pte_index
;
312 unsigned long pde_page_index
, pte_page_index
;
314 uint64_t *pgd_page
, *pde_page
, *pte_page
;
317 pgd_page
= s
->pgds
[as_index
].l3
;
318 pdpe_index
= vaddr
>> 30;
319 pdpe
= pgd_page
[pdpe_index
];
320 if (!(pdpe
& PG_PRESENT_MASK
)) {
323 /* allocage a new page */
324 ptr
= mon_alloc_page(s
, &pde_page_index
);
327 /* no other bit must be set otherwise GPF */
328 pgd_page
[pdpe_index
] = ((uint64_t)pde_page_index
<< PAGE_SHIFT
) |
331 pde_page_index
= pdpe
>> PAGE_SHIFT
;
333 pde_page
= page_index_to_addr(s
, pde_page_index
);
335 pde_index
= (vaddr
>> 21) & 0x1ff;
337 return pde_page
+ pde_index
;
338 pde
= pde_page
[pde_index
];
339 if (!(pde
& PG_PRESENT_MASK
)) {
342 ptr
= mon_alloc_page(s
, &pte_page_index
);
345 pde_page
[pde_index
] = ((uint64_t)pte_page_index
<< PAGE_SHIFT
) |
346 PG_PRESENT_MASK
| PG_RW_MASK
| PG_USER_MASK
;
348 pte_page_index
= pde
>> PAGE_SHIFT
;
350 pte_page
= page_index_to_addr(s
, pte_page_index
);
352 pte_index
= (vaddr
>> 12) & 0x1ff;
355 *pvptep
= (unsigned long)((uint64_t *)page_index_to_virt(s
, pte_page_index
) + pte_index
);
358 return pte_page
+ pte_index
;
362 static inline uint32_t *mon_get_ptep_l2(struct kqemu_state
*s
,
363 int as_index
, unsigned long vaddr
,
364 int alloc
KER_ONLY(, unsigned long *pvptep
))
366 int pde_index
, pte_index
;
367 unsigned long pte_page_index
;
369 uint32_t *pgd_page
, *pte_page
;
372 pgd_page
= s
->pgds
[as_index
].l2
;
373 pde_index
= vaddr
>> PGD_SHIFT
;
375 return pgd_page
+ pde_index
;
376 pde
= pgd_page
[pde_index
];
377 if (!(pde
& PG_PRESENT_MASK
)) {
380 /* allocage a new page */
381 ptr
= mon_alloc_page(s
, &pte_page_index
);
384 pgd_page
[pde_index
] = (pte_page_index
<< PAGE_SHIFT
) |
385 PG_PRESENT_MASK
| PG_RW_MASK
| PG_USER_MASK
;
387 pte_page_index
= pde
>> PAGE_SHIFT
;
389 pte_page
= page_index_to_addr(s
, pte_page_index
);
390 pte_index
= (vaddr
>> PAGE_SHIFT
) & PTE_MASK
;
393 *pvptep
= (unsigned long)((uint32_t *)page_index_to_virt(s
, pte_page_index
) + pte_index
);
396 return pte_page
+ pte_index
;
401 static unsigned long mon_get_pte(struct kqemu_state
*s
,
402 int as_index
, unsigned long vaddr
)
406 ptep
= mon_get_ptep_l3(s
, as_index
, vaddr
, 0 KER_ONLY(, NULL
));
410 if (!(pte
& PG_PRESENT_MASK
))
412 return pte
>> PAGE_SHIFT
;
415 ptep
= mon_get_ptep_l2(s
, as_index
, vaddr
, 0 KER_ONLY(, NULL
));
419 if (!(pte
& PG_PRESENT_MASK
))
421 return pte
>> PAGE_SHIFT
;
426 /* RAM page handling */
428 static inline unsigned int ram_page_hash_func(unsigned long page_index
)
430 return (page_index
^ (page_index
>> RAM_PAGE_HASH_BITS
)) &
431 (RAM_PAGE_HASH_SIZE
- 1);
434 static inline struct kqemu_ram_page
*
435 find_ram_page_from_paddr(struct kqemu_state
*s
,
438 struct kqemu_ram_page
*rp
;
439 rp
= s
->ram_page_hash
[ram_page_hash_func(paddr
)];
441 rp
= MON_RP_PTR(s
, rp
);
442 if (rp
->paddr
== paddr
)
452 static unsigned long *get_ram_page_next_mapping_alloc(struct kqemu_state
*s
,
457 int pml4e_index
, pdpe_index
, pde_index
, pte_index
;
458 unsigned long ***pml4e
, **pdpe
, *pde
;
460 pml4e_index
= (vaddr
>> 39) & 0x1ff;
461 pml4e
= s
->ram_page_mappings
[as_index
][pml4e_index
];
465 pml4e
= mon_alloc_page(s
, NULL
);
468 s
->ram_page_mappings
[as_index
][pml4e_index
] = pml4e
;
471 pdpe_index
= (vaddr
>> 30) & 0x1ff;
472 pdpe
= pml4e
[pdpe_index
];
476 pdpe
= mon_alloc_page(s
, NULL
);
479 pml4e
[pdpe_index
] = pdpe
;
482 pde_index
= (vaddr
>> 21) & 0x1ff;
483 pde
= pdpe
[pde_index
];
487 pde
= mon_alloc_page(s
, NULL
);
490 pdpe
[pde_index
] = pde
;
493 pte_index
= (vaddr
>> 12) & 0x1ff;
494 return pde
+ pte_index
;
497 static unsigned long *get_ram_page_next_mapping_alloc(struct kqemu_state
*s
,
504 pgd_index
= vaddr
>> PGD_SHIFT
;
505 ptep
= s
->ram_page_mappings
[as_index
][pgd_index
];
509 ptep
= mon_alloc_page(s
, NULL
);
512 s
->ram_page_mappings
[as_index
][pgd_index
] = ptep
;
514 ptep
+= (vaddr
>> PAGE_SHIFT
) & PTE_MASK
;
519 static inline unsigned long *get_ram_page_next_mapping(struct kqemu_state
*s
,
523 return get_ram_page_next_mapping_alloc(s
, as_index
, vaddr
, 0);
526 #define GET_AS(vaddr) ((vaddr >> 1) & 0x7ff)
527 #define IS_LAST_VADDR(vaddr) ((vaddr & 1) == 0)
529 /* WARNING: the PTE is not modified */
530 static void unmap_virtual_ram_page(struct kqemu_state
*s
,
532 unsigned long vaddr1
)
534 struct kqemu_ram_page
*rp
;
535 unsigned long *pvaddr
, *ppvaddr
, vaddr
;
536 unsigned long page_index
;
538 #ifdef DEBUG_INVALIDATE
539 monitor_log(s
, "unmap_virtual_ram_page: as=%d vaddr=%p\n",
540 as_index
, (void *)vaddr1
);
542 page_index
= mon_get_pte(s
, as_index
, vaddr1
);
543 if (page_index
== -1)
545 rp
= find_ram_page_from_paddr(s
, page_index
);
549 /* should never happen */
552 #ifdef DEBUG_INVALIDATE
553 monitor_log(s
, "rp->vaddr=%p\n", (void *)rp
->vaddr
);
555 vaddr1
= vaddr1
| (as_index
<< 1);
556 if (rp
->vaddr
== vaddr1
) {
557 /* fast case (no other mappings) */
560 /* remove from mapping list */
561 MON_RP_PTR(s
, rp
->map_prev
)->map_next
= rp
->map_next
;
562 MON_RP_PTR(s
, rp
->map_next
)->map_prev
= rp
->map_prev
;
565 pvaddr
= &rp
->vaddr
; /* current mapping pointer */
566 ppvaddr
= NULL
; /* previous mapping pointer */
569 #ifdef DEBUG_INVALIDATE
570 monitor_log(s
, "vaddr=%p\n", (void *)vaddr
);
572 if ((vaddr
& ~1) == vaddr1
) {
573 if (IS_LAST_VADDR(vaddr
)) {
574 /* no mapping after : we just modify the last one,
577 *pvaddr
= -1; /* no previous mapping */
581 /* there is a mapping after */
582 *pvaddr
= *get_ram_page_next_mapping(s
, GET_AS(vaddr
),
587 if (IS_LAST_VADDR(vaddr
))
590 pvaddr
= get_ram_page_next_mapping(s
, GET_AS(vaddr
),
596 /* unmap a ram page (all its mappings are suppressed) */
597 static void unmap_ram_page(struct kqemu_state
*s
,
598 struct kqemu_ram_page
*rp
)
600 unsigned long vaddr
, addr
, k
;
607 #ifdef DEBUG_INVALIDATE
608 monitor_log(s
, "unram_ram_page: vaddr=%p\n", (void *)vaddr
);
610 addr
= vaddr
& ~0xfff;
611 if ((addr
- s
->ram_page_cache_base
) < RAM_PAGE_CACHE_SIZE
* PAGE_SIZE
) {
612 k
= (addr
- s
->ram_page_cache_base
) >> PAGE_SHIFT
;
613 /* invalidate the soft TLB mapping */
614 if (k
< SOFT_TLB_SIZE
) {
622 /* invalidate the ram page cache */
623 s
->slot_to_ram_addr
[k
] = -1;
625 mon_set_pte(s
, GET_AS(vaddr
), addr
, 0, 0);
626 if (IS_LAST_VADDR(vaddr
))
628 ptep
= get_ram_page_next_mapping(s
, GET_AS(vaddr
), addr
);
633 /* remove from mapping list */
634 MON_RP_PTR(s
, rp
->map_prev
)->map_next
= rp
->map_next
;
635 MON_RP_PTR(s
, rp
->map_next
)->map_prev
= rp
->map_prev
;
639 /* Note: we use a format close to a real x86 page table. XXX: add more
640 physical address bits */
641 static uint32_t *phys_page_findp(struct kqemu_state
*s
,
642 unsigned long page_index
, int alloc
)
644 int l1_index
, l2_index
;
645 unsigned long pde
, pt_page_index
;
649 l1_index
= (page_index
>> 10) & 0x3ff;
650 pde
= s
->phys_to_ram_map_pages
[l1_index
];
651 if (!(pde
& PG_PRESENT_MASK
)) {
654 ptr
= mon_alloc_page(s
, &pt_page_index
);
657 s
->phys_to_ram_map_pages
[l1_index
] =
658 (pt_page_index
<< PAGE_SHIFT
) | PG_PRESENT_MASK
;
660 pt_page_index
= pde
>> PAGE_SHIFT
;
662 pt_page
= page_index_to_addr(s
, pt_page_index
);
663 l2_index
= page_index
& PTE_MASK
;
664 return pt_page
+ l2_index
;
667 static inline void map_ram_init(struct kqemu_state
*s
)
669 struct kqemu_ram_page
*rp_head
;
670 rp_head
= &s
->mapped_page_head
;
671 rp_head
->map_next
= KER_RP_PTR(s
, rp_head
);
672 rp_head
->map_prev
= KER_RP_PTR(s
, rp_head
);
675 static void soft_tlb_flush(struct kqemu_state
*s
)
678 for(i
= 0;i
< SOFT_TLB_SIZE
; i
++) {
679 s
->soft_tlb
[i
].vaddr
[0] = -1;
680 s
->soft_tlb
[i
].vaddr
[1] = -1;
681 s
->soft_tlb
[i
].vaddr
[2] = -1;
682 s
->soft_tlb
[i
].vaddr
[3] = -1;
687 static inline void lock_ram_init(struct kqemu_state
*s
)
689 struct kqemu_ram_page
*rp_head
;
690 rp_head
= &s
->locked_page_head
;
691 rp_head
->lock_next
= KER_RP_PTR(s
, rp_head
);
692 rp_head
->lock_prev
= KER_RP_PTR(s
, rp_head
);
693 s
->nb_locked_ram_pages
= 0;
698 static inline void soft_tlb_invalidate(struct kqemu_state
*s
,
703 e
= &s
->soft_tlb
[(vaddr
>> PAGE_SHIFT
) & (SOFT_TLB_SIZE
- 1)];
704 if (e
->vaddr
[0] == vaddr
||
705 e
->vaddr
[1] == vaddr
||
706 e
->vaddr
[2] == vaddr
||
707 e
->vaddr
[3] == vaddr
) {
715 static void tlb_flush(struct kqemu_state
*s
, int global
)
717 struct kqemu_ram_page
*rp
, *rp_next
;
718 #ifdef PROFILE_INTERP2
722 #ifdef PROFILE_INTERP2
725 for(rp
= s
->mapped_page_head
.map_next
;
726 rp
!= KER_RP_PTR(s
, &s
->mapped_page_head
);
728 rp_next
= rp
->map_next
;
729 rp
= MON_RP_PTR(s
, rp
);
730 unmap_ram_page(s
, rp
);
736 #ifdef USE_USER_PG_GLOBAL
738 unsigned long host_cr4
;
739 /* flush global pages too */
740 asm volatile("mov %%cr4, %0" : "=r" (host_cr4
));
741 asm volatile ("mov %0, %%cr4" : : "r" (host_cr4
& ~CR4_PGE_MASK
));
742 asm volatile ("mov %0, %%cr3" : : "r" (s
->monitor_cr3
));
743 asm volatile ("mov %0, %%cr4" : : "r" (host_cr4
));
747 asm volatile ("mov %0, %%cr3" : : "r" (s
->monitor_cr3
));
750 #ifdef PROFILE_INTERP2
751 s
->tlb_flush_cycles
+= getclock() - ti
;
752 s
->tlb_flush_count
++;
756 static void tlb_flush_page(struct kqemu_state
*s
, unsigned long vaddr
)
758 #ifdef PROFILE_INTERP2
762 #ifdef PROFILE_INTERP2
766 if ((vaddr
- s
->monitor_vaddr
) < MONITOR_MEM_SIZE
)
768 /* flush user and kernel pages */
769 unmap_virtual_ram_page(s
, 0, vaddr
);
770 mon_set_pte(s
, 0, vaddr
, 0, 0);
772 unmap_virtual_ram_page(s
, 1, vaddr
);
773 mon_set_pte(s
, 1, vaddr
, 0, 0);
775 soft_tlb_invalidate(s
, vaddr
);
776 #ifdef PROFILE_INTERP2
777 s
->tlb_flush_page_cycles
+= getclock() - ti
;
778 s
->tlb_flush_page_count
++;