Fix typo in comment
[kqemu.git] / common / common.c
blob1381e4d2c2b8c5567609db91502940525233213c
1 /*
2 * KQEMU
4 * Copyright (C) 2004-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* common code for the kernel and monitor. */
21 #ifdef IN_MONITOR
22 #define KER_ONLY(x...)
23 #define MON_MP_PTR(s, x) (x)
24 #define KER_MP_PTR(s, x) (x)
25 #define MON_RP_PTR(s, x) (x)
26 #define KER_RP_PTR(s, x) (x)
27 #else
28 #define KER_ONLY(x...) x
29 #define MON_MP_PTR(s, x) ((struct mapped_page *)((uint8_t *)(x) + (s)->monitor_to_kernel_offset))
30 #define KER_MP_PTR(s, x) ((struct mapped_page *)((uint8_t *)(x) - (s)->monitor_to_kernel_offset))
31 #define MON_RP_PTR(s, x) ((struct kqemu_ram_page *)((uint8_t *)(x) + (s)->monitor_to_kernel_offset))
32 #define KER_RP_PTR(s, x) ((struct kqemu_ram_page *)((uint8_t *)(x) - (s)->monitor_to_kernel_offset))
33 #endif
35 /* actualize the segment cache in cpu state from the real segment cpu
36 cache (we use the LDT and GDT descriptors) */
37 static inline void reload_seg_cache(struct kqemu_state *s, int seg_reg,
38 uint16_t selector)
40 struct kqemu_segment_cache *sc;
41 uint32_t e1, e2, sel;
42 uint8_t *ptr;
44 #ifdef USE_SEG_GP
45 if (s->cpu_state.cpl != 3) {
46 uint32_t sel1;
47 sel1 = selector | 3;
48 if (sel1 != 3) {
49 /* XXX: set DPL correctly */
50 if (sel1 == s->regs1.cs_sel || sel1 == s->regs1.ss_sel) {
51 sel = (selector & ~7) | ((selector & 4) << 14);
52 ptr = (uint8_t *)s->dt_table + sel;
53 e1 = *(uint32_t *)(ptr);
54 e2 = *(uint32_t *)(ptr + 4);
55 } else {
56 e1 = s->seg_desc_cache[seg_reg][0];
57 e2 = s->seg_desc_cache[seg_reg][1];
59 } else {
60 e1 = 0;
61 e2 = 0;
63 } else
64 #endif
66 /* the CPL=3 DT table is not modified */
67 sel = (selector & ~7) | ((selector & 4) << 14);
68 ptr = (uint8_t *)s->dt_table + ((NB_DT_TABLES - 1) << 17) + sel;
69 e1 = *(uint32_t *)(ptr);
70 e2 = *(uint32_t *)(ptr + 4);
72 sc = &s->cpu_state.segs[seg_reg];
73 if (seg_reg == R_CS || seg_reg == R_SS)
74 selector = (selector & ~3) | s->cpu_state.cpl;
75 sc->selector = selector;
76 sc->flags = e2;
77 #ifdef __x86_64__
78 if (seg_reg >= R_FS) {
79 /* do nothing: the base is always loaded before with the
80 FSBASE and GSBASE MSRs */
81 } else
82 #endif
83 sc->base = get_seg_base(e1, e2);
84 sc->limit = get_seg_limit(e1, e2);
87 void restore_cpu_state_from_regs(struct kqemu_state *s,
88 struct kqemu_exception_regs *r)
90 struct kqemu_cpu_state *env = &s->cpu_state;
91 #ifdef __x86_64__
92 env->regs[R_EAX] = r->eax;
93 env->regs[R_ECX] = r->ecx;
94 env->regs[R_EDX] = r->edx;
95 env->regs[R_EBX] = r->ebx;
96 env->regs[R_ESP] = r->esp;
97 env->regs[R_EBP] = r->ebp;
98 env->regs[R_ESI] = r->esi;
99 env->regs[R_EDI] = r->edi;
100 env->regs[8] = r->r8;
101 env->regs[9] = r->r9;
102 env->regs[10] = r->r10;
103 env->regs[11] = r->r11;
104 env->regs[12] = r->r12;
105 env->regs[13] = r->r13;
106 env->regs[14] = r->r14;
107 env->regs[15] = r->r15;
108 env->eip = r->eip;
109 env->eflags = (s->comm_page.virt_eflags & EFLAGS_MASK) | (r->eflags & ~EFLAGS_MASK);
111 reload_seg_cache(s, R_CS, r->cs_sel);
112 reload_seg_cache(s, R_SS, r->ss_sel);
113 reload_seg_cache(s, R_DS, env->segs[R_DS].selector);
114 reload_seg_cache(s, R_ES, env->segs[R_ES].selector);
115 #else
116 env->regs[R_EAX] = r->eax;
117 env->regs[R_ECX] = r->ecx;
118 env->regs[R_EDX] = r->edx;
119 env->regs[R_EBX] = r->ebx;
120 env->regs[R_ESP] = r->esp;
121 env->regs[R_EBP] = r->ebp;
122 env->regs[R_ESI] = r->esi;
123 env->regs[R_EDI] = r->edi;
124 env->eip = r->eip;
125 env->eflags = (s->comm_page.virt_eflags & EFLAGS_MASK) | (r->eflags & ~EFLAGS_MASK);
127 reload_seg_cache(s, R_CS, r->cs_sel);
128 reload_seg_cache(s, R_SS, r->ss_sel);
129 reload_seg_cache(s, R_DS, r->ds_sel);
130 reload_seg_cache(s, R_ES, r->es_sel);
131 #endif
132 reload_seg_cache(s, R_FS, env->segs[R_FS].selector);
133 reload_seg_cache(s, R_GS, env->segs[R_GS].selector);
136 #if 0
137 /* return a new virtual address suitable to map a page in it */
138 static void free_vaddr(struct kqemu_state *s, unsigned long vaddr)
140 unsigned long page_index;
141 page_index = (vaddr - s->monitor_vaddr) >> PAGE_SHIFT;
142 if (page_index >= MAX_MAPPED_PAGES)
143 return;
144 s->mapped_pages[page_index].next = s->first_mapped_page;
145 s->first_mapped_page = page_index;
147 #endif
149 /* return -1 if no virtual address available */
150 static unsigned long get_vaddr(struct kqemu_state *s)
152 int page_index;
153 page_index = s->first_mapped_page;
154 if (page_index == -1)
155 return -1;
156 s->first_mapped_page = s->mapped_pages[page_index].next;
157 return s->monitor_vaddr + ((unsigned long)page_index << PAGE_SHIFT);
160 static inline unsigned int page_index_hash_func(unsigned long page_index)
162 return (page_index ^ (page_index >> MAPPED_PAGES_HASH_BITS)) &
163 (MAPPED_PAGES_HASH_SIZE - 1);
166 static inline void *page_index_to_virt(struct kqemu_state *s,
167 unsigned long page_index)
169 struct mapped_page *p;
170 p = s->mapped_pages_hash[page_index_hash_func(page_index)];
171 for(;;) {
172 if (!p)
173 return NULL;
174 p = MON_MP_PTR(s, p);
175 if (p->page_index == page_index)
176 return (void *)(((p - s->mapped_pages) << PAGE_SHIFT) +
177 s->monitor_vaddr);
178 p = p->hash_next;
182 static inline void *page_index_to_kaddr(struct kqemu_state *s,
183 unsigned long page_index)
185 struct mapped_page *p;
186 p = s->mapped_pages_hash[page_index_hash_func(page_index)];
187 for(;;) {
188 if (!p)
189 return NULL;
190 p = MON_MP_PTR(s, p);
191 if (p->page_index == page_index)
192 return kqemu_page_kaddr(p->host_page);
193 p = p->hash_next;
197 static inline void set_vaddr_page_index(struct kqemu_state *s,
198 unsigned long vaddr,
199 unsigned long page_index,
200 void *host_page,
201 int is_user)
203 struct mapped_page *p, **ph;
204 p = &s->mapped_pages[(vaddr - s->monitor_vaddr) >> PAGE_SHIFT];
205 p->page_index = page_index;
206 p->host_page = host_page;
207 p->user_page = is_user;
208 ph = &s->mapped_pages_hash[page_index_hash_func(page_index)];
209 p->hash_next = *ph;
210 *ph = KER_MP_PTR(s, p);
214 /* PTE access */
216 #ifdef IN_MONITOR
217 #define page_index_to_addr(s, x) page_index_to_virt(s, x)
218 #else
219 #define page_index_to_addr(s, x) page_index_to_kaddr(s, x)
220 #endif
222 #ifdef __x86_64__
224 /* alloc = 0 : do not allocate PTEs
225 1 : allocate up to PTE page
226 2 : allocate up to PDE page
228 /* PAE x86_64 case */
229 static inline uint64_t *mon_get_ptep_l3(struct kqemu_state *s,
230 int as_index, unsigned long vaddr,
231 int alloc KER_ONLY(, unsigned long *pvptep))
233 int pml4e_index, pdpe_index, pde_index, pte_index;
234 unsigned long pdp_page_index, pde_page_index, pte_page_index;
235 uint64_t pml4e, pdpe, pde;
236 uint64_t *pgd_page, *pdp_page, *pde_page, *pte_page;
237 void *ptr;
239 pgd_page = s->pgds[as_index].l4;
240 pml4e_index = (vaddr >> 39) & 0x1ff;
241 pml4e = pgd_page[pml4e_index];
242 if (!(pml4e & PG_PRESENT_MASK)) {
243 if (!alloc)
244 return NULL;
245 /* allocate a new page */
246 ptr = mon_alloc_page(s, &pdp_page_index);
247 if (!ptr)
248 return NULL;
249 pgd_page[pml4e_index] = ((uint64_t)pdp_page_index << PAGE_SHIFT) |
250 PG_PRESENT_MASK | PG_RW_MASK | PG_USER_MASK;
251 } else {
252 pdp_page_index = pml4e >> PAGE_SHIFT;
254 pdp_page = page_index_to_addr(s, pdp_page_index);
256 pdpe_index = (vaddr >> 30) & 0x1ff;
257 pdpe = pdp_page[pdpe_index];
258 if (!(pdpe & PG_PRESENT_MASK)) {
259 if (!alloc)
260 return NULL;
261 ptr = mon_alloc_page(s, &pde_page_index);
262 if (!ptr)
263 return NULL;
264 pdp_page[pdpe_index] = ((uint64_t)pde_page_index << PAGE_SHIFT) |
265 PG_PRESENT_MASK | PG_RW_MASK | PG_USER_MASK;
266 } else {
267 pde_page_index = pdpe >> PAGE_SHIFT;
269 pde_page = page_index_to_addr(s, pde_page_index);
271 pde_index = (vaddr >> 21) & 0x1ff;
272 if (alloc == 2)
273 return pde_page + pde_index;
274 pde = pde_page[pde_index];
275 if (!(pde & PG_PRESENT_MASK)) {
276 if (!alloc)
277 return NULL;
278 ptr = mon_alloc_page(s, &pte_page_index);
279 if (!ptr)
280 return NULL;
281 pde_page[pde_index] = ((uint64_t)pte_page_index << PAGE_SHIFT) |
282 PG_PRESENT_MASK | PG_RW_MASK | PG_USER_MASK;
283 } else {
284 pte_page_index = pde >> PAGE_SHIFT;
286 pte_page = page_index_to_addr(s, pte_page_index);
288 pte_index = (vaddr >> 12) & 0x1ff;
289 #ifndef IN_MONITOR
290 if (pvptep) {
291 *pvptep = (unsigned long)((uint64_t *)page_index_to_virt(s, pte_page_index) + pte_index);
293 #endif
294 return pte_page + pte_index;
297 /* just to avoid putting ifdefs */
298 static inline uint32_t *mon_get_ptep_l2(struct kqemu_state *s,
299 int as_index, unsigned long vaddr,
300 int alloc KER_ONLY(, unsigned long *pvptep))
302 return NULL;
305 #else
306 /* PAE case */
307 static inline uint64_t *mon_get_ptep_l3(struct kqemu_state *s,
308 int as_index, unsigned long vaddr,
309 int alloc KER_ONLY(, unsigned long *pvptep))
311 int pdpe_index, pde_index, pte_index;
312 unsigned long pde_page_index, pte_page_index;
313 uint64_t pdpe, pde;
314 uint64_t *pgd_page, *pde_page, *pte_page;
315 void *ptr;
317 pgd_page = s->pgds[as_index].l3;
318 pdpe_index = vaddr >> 30;
319 pdpe = pgd_page[pdpe_index];
320 if (!(pdpe & PG_PRESENT_MASK)) {
321 if (!alloc)
322 return NULL;
323 /* allocage a new page */
324 ptr = mon_alloc_page(s, &pde_page_index);
325 if (!ptr)
326 return NULL;
327 /* no other bit must be set otherwise GPF */
328 pgd_page[pdpe_index] = ((uint64_t)pde_page_index << PAGE_SHIFT) |
329 PG_PRESENT_MASK;
330 } else {
331 pde_page_index = pdpe >> PAGE_SHIFT;
333 pde_page = page_index_to_addr(s, pde_page_index);
335 pde_index = (vaddr >> 21) & 0x1ff;
336 if (alloc == 2)
337 return pde_page + pde_index;
338 pde = pde_page[pde_index];
339 if (!(pde & PG_PRESENT_MASK)) {
340 if (!alloc)
341 return NULL;
342 ptr = mon_alloc_page(s, &pte_page_index);
343 if (!ptr)
344 return NULL;
345 pde_page[pde_index] = ((uint64_t)pte_page_index << PAGE_SHIFT) |
346 PG_PRESENT_MASK | PG_RW_MASK | PG_USER_MASK;
347 } else {
348 pte_page_index = pde >> PAGE_SHIFT;
350 pte_page = page_index_to_addr(s, pte_page_index);
352 pte_index = (vaddr >> 12) & 0x1ff;
353 #ifndef IN_MONITOR
354 if (pvptep) {
355 *pvptep = (unsigned long)((uint64_t *)page_index_to_virt(s, pte_page_index) + pte_index);
357 #endif
358 return pte_page + pte_index;
361 /* legacy case */
362 static inline uint32_t *mon_get_ptep_l2(struct kqemu_state *s,
363 int as_index, unsigned long vaddr,
364 int alloc KER_ONLY(, unsigned long *pvptep))
366 int pde_index, pte_index;
367 unsigned long pte_page_index;
368 uint32_t pde;
369 uint32_t *pgd_page, *pte_page;
370 void *ptr;
372 pgd_page = s->pgds[as_index].l2;
373 pde_index = vaddr >> PGD_SHIFT;
374 if (alloc == 2)
375 return pgd_page + pde_index;
376 pde = pgd_page[pde_index];
377 if (!(pde & PG_PRESENT_MASK)) {
378 if (!alloc)
379 return NULL;
380 /* allocage a new page */
381 ptr = mon_alloc_page(s, &pte_page_index);
382 if (!ptr)
383 return NULL;
384 pgd_page[pde_index] = (pte_page_index << PAGE_SHIFT) |
385 PG_PRESENT_MASK | PG_RW_MASK | PG_USER_MASK;
386 } else {
387 pte_page_index = pde >> PAGE_SHIFT;
389 pte_page = page_index_to_addr(s, pte_page_index);
390 pte_index = (vaddr >> PAGE_SHIFT) & PTE_MASK;
391 #ifndef IN_MONITOR
392 if (pvptep) {
393 *pvptep = (unsigned long)((uint32_t *)page_index_to_virt(s, pte_page_index) + pte_index);
395 #endif
396 return pte_page + pte_index;
398 #endif
400 #ifdef IN_MONITOR
401 static unsigned long mon_get_pte(struct kqemu_state *s,
402 int as_index, unsigned long vaddr)
404 if (USE_PAE(s)) {
405 uint64_t *ptep, pte;
406 ptep = mon_get_ptep_l3(s, as_index, vaddr, 0 KER_ONLY(, NULL));
407 if (!ptep)
408 return -1;
409 pte = *ptep;
410 if (!(pte & PG_PRESENT_MASK))
411 return -1;
412 return pte >> PAGE_SHIFT;
413 } else {
414 uint32_t *ptep, pte;
415 ptep = mon_get_ptep_l2(s, as_index, vaddr, 0 KER_ONLY(, NULL));
416 if (!ptep)
417 return -1;
418 pte = *ptep;
419 if (!(pte & PG_PRESENT_MASK))
420 return -1;
421 return pte >> PAGE_SHIFT;
424 #endif
426 /* RAM page handling */
428 static inline unsigned int ram_page_hash_func(unsigned long page_index)
430 return (page_index ^ (page_index >> RAM_PAGE_HASH_BITS)) &
431 (RAM_PAGE_HASH_SIZE - 1);
434 static inline struct kqemu_ram_page *
435 find_ram_page_from_paddr(struct kqemu_state *s,
436 unsigned long paddr)
438 struct kqemu_ram_page *rp;
439 rp = s->ram_page_hash[ram_page_hash_func(paddr)];
440 while (rp != NULL) {
441 rp = MON_RP_PTR(s, rp);
442 if (rp->paddr == paddr)
443 return rp;
444 rp = rp->hash_next;
446 return NULL;
449 #ifdef IN_MONITOR
451 #ifdef __x86_64__
452 static unsigned long *get_ram_page_next_mapping_alloc(struct kqemu_state *s,
453 int as_index,
454 unsigned long vaddr,
455 int alloc)
457 int pml4e_index, pdpe_index, pde_index, pte_index;
458 unsigned long ***pml4e, **pdpe, *pde;
460 pml4e_index = (vaddr >> 39) & 0x1ff;
461 pml4e = s->ram_page_mappings[as_index][pml4e_index];
462 if (!pml4e) {
463 if (!alloc)
464 return NULL;
465 pml4e = mon_alloc_page(s, NULL);
466 if (!pml4e)
467 return NULL;
468 s->ram_page_mappings[as_index][pml4e_index] = pml4e;
471 pdpe_index = (vaddr >> 30) & 0x1ff;
472 pdpe = pml4e[pdpe_index];
473 if (!pdpe) {
474 if (!alloc)
475 return NULL;
476 pdpe = mon_alloc_page(s, NULL);
477 if (!pdpe)
478 return NULL;
479 pml4e[pdpe_index] = pdpe;
482 pde_index = (vaddr >> 21) & 0x1ff;
483 pde = pdpe[pde_index];
484 if (!pde) {
485 if (!alloc)
486 return NULL;
487 pde = mon_alloc_page(s, NULL);
488 if (!pde)
489 return NULL;
490 pdpe[pde_index] = pde;
493 pte_index = (vaddr >> 12) & 0x1ff;
494 return pde + pte_index;
496 #else
497 static unsigned long *get_ram_page_next_mapping_alloc(struct kqemu_state *s,
498 int as_index,
499 unsigned long vaddr,
500 int alloc)
502 int pgd_index;
503 unsigned long *ptep;
504 pgd_index = vaddr >> PGD_SHIFT;
505 ptep = s->ram_page_mappings[as_index][pgd_index];
506 if (!ptep) {
507 if (!alloc)
508 return NULL;
509 ptep = mon_alloc_page(s, NULL);
510 if (!ptep)
511 return NULL;
512 s->ram_page_mappings[as_index][pgd_index] = ptep;
514 ptep += (vaddr >> PAGE_SHIFT) & PTE_MASK;
515 return ptep;
517 #endif
519 static inline unsigned long *get_ram_page_next_mapping(struct kqemu_state *s,
520 int as_index,
521 unsigned long vaddr)
523 return get_ram_page_next_mapping_alloc(s, as_index, vaddr, 0);
526 #define GET_AS(vaddr) ((vaddr >> 1) & 0x7ff)
527 #define IS_LAST_VADDR(vaddr) ((vaddr & 1) == 0)
529 /* WARNING: the PTE is not modified */
530 static void unmap_virtual_ram_page(struct kqemu_state *s,
531 int as_index,
532 unsigned long vaddr1)
534 struct kqemu_ram_page *rp;
535 unsigned long *pvaddr, *ppvaddr, vaddr;
536 unsigned long page_index;
538 #ifdef DEBUG_INVALIDATE
539 monitor_log(s, "unmap_virtual_ram_page: as=%d vaddr=%p\n",
540 as_index, (void *)vaddr1);
541 #endif
542 page_index = mon_get_pte(s, as_index, vaddr1);
543 if (page_index == -1)
544 return;
545 rp = find_ram_page_from_paddr(s, page_index);
546 if (!rp) {
547 return;
549 /* should never happen */
550 if (rp->vaddr == -1)
551 return;
552 #ifdef DEBUG_INVALIDATE
553 monitor_log(s, "rp->vaddr=%p\n", (void *)rp->vaddr);
554 #endif
555 vaddr1 = vaddr1 | (as_index << 1);
556 if (rp->vaddr == vaddr1) {
557 /* fast case (no other mappings) */
558 rp->vaddr = -1;
560 /* remove from mapping list */
561 MON_RP_PTR(s, rp->map_prev)->map_next = rp->map_next;
562 MON_RP_PTR(s, rp->map_next)->map_prev = rp->map_prev;
563 } else {
564 /* slow case */
565 pvaddr = &rp->vaddr; /* current mapping pointer */
566 ppvaddr = NULL; /* previous mapping pointer */
567 for(;;) {
568 vaddr = *pvaddr;
569 #ifdef DEBUG_INVALIDATE
570 monitor_log(s, "vaddr=%p\n", (void *)vaddr);
571 #endif
572 if ((vaddr & ~1) == vaddr1) {
573 if (IS_LAST_VADDR(vaddr)) {
574 /* no mapping after : we just modify the last one,
575 if any */
576 if (!ppvaddr)
577 *pvaddr = -1; /* no previous mapping */
578 else
579 *ppvaddr &= ~1;
580 } else {
581 /* there is a mapping after */
582 *pvaddr = *get_ram_page_next_mapping(s, GET_AS(vaddr),
583 vaddr & ~0xfff);
585 break;
587 if (IS_LAST_VADDR(vaddr))
588 break;
589 ppvaddr = pvaddr;
590 pvaddr = get_ram_page_next_mapping(s, GET_AS(vaddr),
591 vaddr & ~0xfff);
596 /* unmap a ram page (all its mappings are suppressed) */
597 static void unmap_ram_page(struct kqemu_state *s,
598 struct kqemu_ram_page *rp)
600 unsigned long vaddr, addr, k;
601 unsigned long *ptep;
603 if (rp->vaddr == -1)
604 return;
605 vaddr = rp->vaddr;
606 for(;;) {
607 #ifdef DEBUG_INVALIDATE
608 monitor_log(s, "unram_ram_page: vaddr=%p\n", (void *)vaddr);
609 #endif
610 addr = vaddr & ~0xfff;
611 if ((addr - s->ram_page_cache_base) < RAM_PAGE_CACHE_SIZE * PAGE_SIZE) {
612 k = (addr - s->ram_page_cache_base) >> PAGE_SHIFT;
613 /* invalidate the soft TLB mapping */
614 if (k < SOFT_TLB_SIZE) {
615 TLBEntry *e;
616 e = &s->soft_tlb[k];
617 e->vaddr[0] = -1;
618 e->vaddr[1] = -1;
619 e->vaddr[2] = -1;
620 e->vaddr[3] = -1;
622 /* invalidate the ram page cache */
623 s->slot_to_ram_addr[k] = -1;
625 mon_set_pte(s, GET_AS(vaddr), addr, 0, 0);
626 if (IS_LAST_VADDR(vaddr))
627 break;
628 ptep = get_ram_page_next_mapping(s, GET_AS(vaddr), addr);
629 vaddr = *ptep;
631 rp->vaddr = -1;
633 /* remove from mapping list */
634 MON_RP_PTR(s, rp->map_prev)->map_next = rp->map_next;
635 MON_RP_PTR(s, rp->map_next)->map_prev = rp->map_prev;
637 #endif
639 /* Note: we use a format close to a real x86 page table. XXX: add more
640 physical address bits */
641 static uint32_t *phys_page_findp(struct kqemu_state *s,
642 unsigned long page_index, int alloc)
644 int l1_index, l2_index;
645 unsigned long pde, pt_page_index;
646 uint32_t *pt_page;
647 void *ptr;
649 l1_index = (page_index >> 10) & 0x3ff;
650 pde = s->phys_to_ram_map_pages[l1_index];
651 if (!(pde & PG_PRESENT_MASK)) {
652 if (!alloc)
653 return NULL;
654 ptr = mon_alloc_page(s, &pt_page_index);
655 if (!ptr)
656 return NULL;
657 s->phys_to_ram_map_pages[l1_index] =
658 (pt_page_index << PAGE_SHIFT) | PG_PRESENT_MASK;
659 } else {
660 pt_page_index = pde >> PAGE_SHIFT;
662 pt_page = page_index_to_addr(s, pt_page_index);
663 l2_index = page_index & PTE_MASK;
664 return pt_page + l2_index;
667 static inline void map_ram_init(struct kqemu_state *s)
669 struct kqemu_ram_page *rp_head;
670 rp_head = &s->mapped_page_head;
671 rp_head->map_next = KER_RP_PTR(s, rp_head);
672 rp_head->map_prev = KER_RP_PTR(s, rp_head);
675 static void soft_tlb_flush(struct kqemu_state *s)
677 int i;
678 for(i = 0;i < SOFT_TLB_SIZE; i++) {
679 s->soft_tlb[i].vaddr[0] = -1;
680 s->soft_tlb[i].vaddr[1] = -1;
681 s->soft_tlb[i].vaddr[2] = -1;
682 s->soft_tlb[i].vaddr[3] = -1;
686 #ifndef IN_MONITOR
687 static inline void lock_ram_init(struct kqemu_state *s)
689 struct kqemu_ram_page *rp_head;
690 rp_head = &s->locked_page_head;
691 rp_head->lock_next = KER_RP_PTR(s, rp_head);
692 rp_head->lock_prev = KER_RP_PTR(s, rp_head);
693 s->nb_locked_ram_pages = 0;
695 #endif
697 #ifdef IN_MONITOR
698 static inline void soft_tlb_invalidate(struct kqemu_state *s,
699 unsigned long vaddr)
701 TLBEntry *e;
702 vaddr &= PAGE_MASK;
703 e = &s->soft_tlb[(vaddr >> PAGE_SHIFT) & (SOFT_TLB_SIZE - 1)];
704 if (e->vaddr[0] == vaddr ||
705 e->vaddr[1] == vaddr ||
706 e->vaddr[2] == vaddr ||
707 e->vaddr[3] == vaddr) {
708 e->vaddr[0] = -1;
709 e->vaddr[1] = -1;
710 e->vaddr[2] = -1;
711 e->vaddr[3] = -1;
715 static void tlb_flush(struct kqemu_state *s, int global)
717 struct kqemu_ram_page *rp, *rp_next;
718 #ifdef PROFILE_INTERP2
719 int64_t ti;
720 #endif
722 #ifdef PROFILE_INTERP2
723 ti = getclock();
724 #endif
725 for(rp = s->mapped_page_head.map_next;
726 rp != KER_RP_PTR(s, &s->mapped_page_head);
727 rp = rp_next) {
728 rp_next = rp->map_next;
729 rp = MON_RP_PTR(s, rp);
730 unmap_ram_page(s, rp);
732 /* init list */
733 map_ram_init(s);
734 soft_tlb_flush(s);
735 #ifdef IN_MONITOR
736 #ifdef USE_USER_PG_GLOBAL
737 if (PG_GLOBAL(s)) {
738 unsigned long host_cr4;
739 /* flush global pages too */
740 asm volatile("mov %%cr4, %0" : "=r" (host_cr4));
741 asm volatile ("mov %0, %%cr4" : : "r" (host_cr4 & ~CR4_PGE_MASK));
742 asm volatile ("mov %0, %%cr3" : : "r" (s->monitor_cr3));
743 asm volatile ("mov %0, %%cr4" : : "r" (host_cr4));
744 } else
745 #endif
747 asm volatile ("mov %0, %%cr3" : : "r" (s->monitor_cr3));
749 #endif
750 #ifdef PROFILE_INTERP2
751 s->tlb_flush_cycles += getclock() - ti;
752 s->tlb_flush_count++;
753 #endif
756 static void tlb_flush_page(struct kqemu_state *s, unsigned long vaddr)
758 #ifdef PROFILE_INTERP2
759 int64_t ti;
760 #endif
762 #ifdef PROFILE_INTERP2
763 ti = getclock();
764 #endif
765 vaddr &= PAGE_MASK;
766 if ((vaddr - s->monitor_vaddr) < MONITOR_MEM_SIZE)
767 return;
768 /* flush user and kernel pages */
769 unmap_virtual_ram_page(s, 0, vaddr);
770 mon_set_pte(s, 0, vaddr, 0, 0);
772 unmap_virtual_ram_page(s, 1, vaddr);
773 mon_set_pte(s, 1, vaddr, 0, 0);
775 soft_tlb_invalidate(s, vaddr);
776 #ifdef PROFILE_INTERP2
777 s->tlb_flush_page_cycles += getclock() - ti;
778 s->tlb_flush_page_count++;
779 #endif
782 #endif