kvm: libkvm: remove user_alloc field from slot representation
[qemu-kvm/fedora.git] / exec.c
blobbf037f0ffa9169b9b5fa3a9ed49b811daba416c5
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #if defined(CONFIG_USER_ONLY)
47 #include <qemu.h>
48 #endif
50 //#define DEBUG_TB_INVALIDATE
51 //#define DEBUG_FLUSH
52 //#define DEBUG_TLB
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
64 #undef DEBUG_TB_CHECK
65 #endif
67 #define SMC_BITMAP_USE_THRESHOLD 10
69 #define MMAP_AREA_START 0x00000000
70 #define MMAP_AREA_END 0xa8000000
72 #if defined(TARGET_SPARC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 41
74 #elif defined(TARGET_SPARC)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 36
76 #elif defined(TARGET_ALPHA)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #define TARGET_VIRT_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_PPC64)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
85 #else
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
88 #endif
90 TranslationBlock *tbs;
91 int code_gen_max_blocks;
92 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93 int nb_tbs;
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
97 #if defined(__arm__) || defined(__sparc_v9__)
98 /* The prologue must be reachable with a direct jump. ARM and Sparc64
99 have limited branch ranges (possibly also PPC) so place it in a
100 section close to code segment. */
101 #define code_gen_section \
102 __attribute__((__section__(".gen_code"))) \
103 __attribute__((aligned (32)))
104 #else
105 #define code_gen_section \
106 __attribute__((aligned (32)))
107 #endif
109 uint8_t code_gen_prologue[1024] code_gen_section;
110 uint8_t *code_gen_buffer;
111 unsigned long code_gen_buffer_size;
112 /* threshold to flush the translated code buffer */
113 unsigned long code_gen_buffer_max_size;
114 uint8_t *code_gen_ptr;
116 #if !defined(CONFIG_USER_ONLY)
117 ram_addr_t phys_ram_size;
118 int phys_ram_fd;
119 uint8_t *phys_ram_base;
120 uint8_t *phys_ram_dirty;
121 uint8_t *bios_mem;
122 static int in_migration;
123 static ram_addr_t phys_ram_alloc_offset = 0;
124 #endif
126 CPUState *first_cpu;
127 /* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
129 CPUState *cpu_single_env;
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
133 int use_icount = 0;
134 /* Current instruction counter. While executing translated code this may
135 include some instructions that have not yet been executed. */
136 int64_t qemu_icount;
138 typedef struct PageDesc {
139 /* list of TBs intersecting this ram page */
140 TranslationBlock *first_tb;
141 /* in order to optimize self modifying code, we count the number
142 of lookups we do to a given page to use a bitmap */
143 unsigned int code_write_count;
144 uint8_t *code_bitmap;
145 #if defined(CONFIG_USER_ONLY)
146 unsigned long flags;
147 #endif
148 } PageDesc;
150 typedef struct PhysPageDesc {
151 /* offset in host memory of the page + io_index in the low bits */
152 ram_addr_t phys_offset;
153 } PhysPageDesc;
155 #define L2_BITS 10
156 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
157 /* XXX: this is a temporary hack for alpha target.
158 * In the future, this is to be replaced by a multi-level table
159 * to actually be able to handle the complete 64 bits address space.
161 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
162 #else
163 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
164 #endif
166 #define L1_SIZE (1 << L1_BITS)
167 #define L2_SIZE (1 << L2_BITS)
169 unsigned long qemu_real_host_page_size;
170 unsigned long qemu_host_page_bits;
171 unsigned long qemu_host_page_size;
172 unsigned long qemu_host_page_mask;
174 /* XXX: for system emulation, it could just be an array */
175 static PageDesc *l1_map[L1_SIZE];
176 PhysPageDesc **l1_phys_map;
178 #if !defined(CONFIG_USER_ONLY)
179 static void io_mem_init(void);
181 /* io memory support */
182 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
183 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
184 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
185 char io_mem_used[IO_MEM_NB_ENTRIES];
186 static int io_mem_watch;
187 #endif
189 /* log support */
190 char *logfilename = "/tmp/qemu.log";
191 FILE *logfile;
192 int loglevel;
193 static int log_append = 0;
195 /* statistics */
196 static int tlb_flush_count;
197 static int tb_flush_count;
198 static int tb_phys_invalidate_count;
200 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
201 typedef struct subpage_t {
202 target_phys_addr_t base;
203 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
204 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
205 void *opaque[TARGET_PAGE_SIZE][2][4];
206 } subpage_t;
208 #ifdef _WIN32
209 static void map_exec(void *addr, long size)
211 DWORD old_protect;
212 VirtualProtect(addr, size,
213 PAGE_EXECUTE_READWRITE, &old_protect);
216 #else
217 static void map_exec(void *addr, long size)
219 unsigned long start, end, page_size;
221 page_size = getpagesize();
222 start = (unsigned long)addr;
223 start &= ~(page_size - 1);
225 end = (unsigned long)addr + size;
226 end += page_size - 1;
227 end &= ~(page_size - 1);
229 mprotect((void *)start, end - start,
230 PROT_READ | PROT_WRITE | PROT_EXEC);
232 #endif
234 static void page_init(void)
236 /* NOTE: we can always suppose that qemu_host_page_size >=
237 TARGET_PAGE_SIZE */
238 #ifdef _WIN32
240 SYSTEM_INFO system_info;
241 DWORD old_protect;
243 GetSystemInfo(&system_info);
244 qemu_real_host_page_size = system_info.dwPageSize;
246 #else
247 qemu_real_host_page_size = getpagesize();
248 #endif
249 if (qemu_host_page_size == 0)
250 qemu_host_page_size = qemu_real_host_page_size;
251 if (qemu_host_page_size < TARGET_PAGE_SIZE)
252 qemu_host_page_size = TARGET_PAGE_SIZE;
253 qemu_host_page_bits = 0;
254 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
255 qemu_host_page_bits++;
256 qemu_host_page_mask = ~(qemu_host_page_size - 1);
257 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
258 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
260 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
262 long long startaddr, endaddr;
263 FILE *f;
264 int n;
266 mmap_lock();
267 last_brk = (unsigned long)sbrk(0);
268 f = fopen("/proc/self/maps", "r");
269 if (f) {
270 do {
271 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
272 if (n == 2) {
273 startaddr = MIN(startaddr,
274 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
275 endaddr = MIN(endaddr,
276 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
277 page_set_flags(startaddr & TARGET_PAGE_MASK,
278 TARGET_PAGE_ALIGN(endaddr),
279 PAGE_RESERVED);
281 } while (!feof(f));
282 fclose(f);
284 mmap_unlock();
286 #endif
289 static inline PageDesc *page_find_alloc(target_ulong index)
291 PageDesc **lp, *p;
293 #if TARGET_LONG_BITS > 32
294 /* Host memory outside guest VM. For 32-bit targets we have already
295 excluded high addresses. */
296 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
297 return NULL;
298 #endif
299 lp = &l1_map[index >> L2_BITS];
300 p = *lp;
301 if (!p) {
302 /* allocate if not found */
303 #if defined(CONFIG_USER_ONLY)
304 unsigned long addr;
305 size_t len = sizeof(PageDesc) * L2_SIZE;
306 /* Don't use qemu_malloc because it may recurse. */
307 p = mmap(0, len, PROT_READ | PROT_WRITE,
308 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
309 *lp = p;
310 addr = h2g(p);
311 if (addr == (target_ulong)addr) {
312 page_set_flags(addr & TARGET_PAGE_MASK,
313 TARGET_PAGE_ALIGN(addr + len),
314 PAGE_RESERVED);
316 #else
317 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
318 *lp = p;
319 #endif
321 return p + (index & (L2_SIZE - 1));
324 static inline PageDesc *page_find(target_ulong index)
326 PageDesc *p;
328 p = l1_map[index >> L2_BITS];
329 if (!p)
330 return 0;
331 return p + (index & (L2_SIZE - 1));
334 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
336 void **lp, **p;
337 PhysPageDesc *pd;
339 p = (void **)l1_phys_map;
340 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
342 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
343 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
344 #endif
345 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
346 p = *lp;
347 if (!p) {
348 /* allocate if not found */
349 if (!alloc)
350 return NULL;
351 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
352 memset(p, 0, sizeof(void *) * L1_SIZE);
353 *lp = p;
355 #endif
356 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
357 pd = *lp;
358 if (!pd) {
359 int i;
360 /* allocate if not found */
361 if (!alloc)
362 return NULL;
363 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
364 *lp = pd;
365 for (i = 0; i < L2_SIZE; i++)
366 pd[i].phys_offset = IO_MEM_UNASSIGNED;
368 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
371 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
373 return phys_page_find_alloc(index, 0);
376 #if !defined(CONFIG_USER_ONLY)
377 static void tlb_protect_code(ram_addr_t ram_addr);
378 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
379 target_ulong vaddr);
380 #define mmap_lock() do { } while(0)
381 #define mmap_unlock() do { } while(0)
382 #endif
384 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
386 #if defined(CONFIG_USER_ONLY)
387 /* Currently it is not recommanded to allocate big chunks of data in
388 user mode. It will change when a dedicated libc will be used */
389 #define USE_STATIC_CODE_GEN_BUFFER
390 #endif
392 #ifdef USE_STATIC_CODE_GEN_BUFFER
393 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
394 #endif
396 static void code_gen_alloc(unsigned long tb_size)
398 #ifdef USE_STATIC_CODE_GEN_BUFFER
399 code_gen_buffer = static_code_gen_buffer;
400 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
401 map_exec(code_gen_buffer, code_gen_buffer_size);
402 #else
403 code_gen_buffer_size = tb_size;
404 if (code_gen_buffer_size == 0) {
405 #if defined(CONFIG_USER_ONLY)
406 /* in user mode, phys_ram_size is not meaningful */
407 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
408 #else
409 /* XXX: needs ajustments */
410 code_gen_buffer_size = (int)(phys_ram_size / 4);
411 #endif
413 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
414 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
415 /* The code gen buffer location may have constraints depending on
416 the host cpu and OS */
417 #if defined(__linux__)
419 int flags;
420 void *start = NULL;
422 flags = MAP_PRIVATE | MAP_ANONYMOUS;
423 #if defined(__x86_64__)
424 flags |= MAP_32BIT;
425 /* Cannot map more than that */
426 if (code_gen_buffer_size > (800 * 1024 * 1024))
427 code_gen_buffer_size = (800 * 1024 * 1024);
428 #elif defined(__sparc_v9__)
429 // Map the buffer below 2G, so we can use direct calls and branches
430 flags |= MAP_FIXED;
431 start = (void *) 0x60000000UL;
432 if (code_gen_buffer_size > (512 * 1024 * 1024))
433 code_gen_buffer_size = (512 * 1024 * 1024);
434 #endif
435 code_gen_buffer = mmap(start, code_gen_buffer_size,
436 PROT_WRITE | PROT_READ | PROT_EXEC,
437 flags, -1, 0);
438 if (code_gen_buffer == MAP_FAILED) {
439 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
440 exit(1);
443 #else
444 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
445 if (!code_gen_buffer) {
446 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
447 exit(1);
449 map_exec(code_gen_buffer, code_gen_buffer_size);
450 #endif
451 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
452 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
453 code_gen_buffer_max_size = code_gen_buffer_size -
454 code_gen_max_block_size();
455 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
456 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
459 /* Must be called before using the QEMU cpus. 'tb_size' is the size
460 (in bytes) allocated to the translation buffer. Zero means default
461 size. */
462 void cpu_exec_init_all(unsigned long tb_size)
464 cpu_gen_init();
465 code_gen_alloc(tb_size);
466 code_gen_ptr = code_gen_buffer;
467 page_init();
468 #if !defined(CONFIG_USER_ONLY)
469 io_mem_init();
470 #endif
473 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
475 #define CPU_COMMON_SAVE_VERSION 1
477 static void cpu_common_save(QEMUFile *f, void *opaque)
479 CPUState *env = opaque;
481 qemu_put_be32s(f, &env->halted);
482 qemu_put_be32s(f, &env->interrupt_request);
485 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
487 CPUState *env = opaque;
489 if (version_id != CPU_COMMON_SAVE_VERSION)
490 return -EINVAL;
492 qemu_get_be32s(f, &env->halted);
493 qemu_get_be32s(f, &env->interrupt_request);
494 tlb_flush(env, 1);
496 return 0;
498 #endif
500 void cpu_exec_init(CPUState *env)
502 CPUState **penv;
503 int cpu_index;
505 env->next_cpu = NULL;
506 penv = &first_cpu;
507 cpu_index = 0;
508 while (*penv != NULL) {
509 penv = (CPUState **)&(*penv)->next_cpu;
510 cpu_index++;
512 env->cpu_index = cpu_index;
513 env->nb_watchpoints = 0;
514 #ifdef __WIN32
515 env->thread_id = GetCurrentProcessId();
516 #else
517 env->thread_id = getpid();
518 #endif
519 *penv = env;
520 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
521 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
522 cpu_common_save, cpu_common_load, env);
523 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
524 cpu_save, cpu_load, env);
525 #endif
528 static inline void invalidate_page_bitmap(PageDesc *p)
530 if (p->code_bitmap) {
531 qemu_free(p->code_bitmap);
532 p->code_bitmap = NULL;
534 p->code_write_count = 0;
537 /* set to NULL all the 'first_tb' fields in all PageDescs */
538 static void page_flush_tb(void)
540 int i, j;
541 PageDesc *p;
543 for(i = 0; i < L1_SIZE; i++) {
544 p = l1_map[i];
545 if (p) {
546 for(j = 0; j < L2_SIZE; j++) {
547 p->first_tb = NULL;
548 invalidate_page_bitmap(p);
549 p++;
555 /* flush all the translation blocks */
556 /* XXX: tb_flush is currently not thread safe */
557 void tb_flush(CPUState *env1)
559 CPUState *env;
560 #if defined(DEBUG_FLUSH)
561 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
562 (unsigned long)(code_gen_ptr - code_gen_buffer),
563 nb_tbs, nb_tbs > 0 ?
564 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
565 #endif
566 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
567 cpu_abort(env1, "Internal error: code buffer overflow\n");
569 nb_tbs = 0;
571 for(env = first_cpu; env != NULL; env = env->next_cpu) {
572 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
575 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
576 page_flush_tb();
578 code_gen_ptr = code_gen_buffer;
579 /* XXX: flush processor icache at this point if cache flush is
580 expensive */
581 tb_flush_count++;
584 #ifdef DEBUG_TB_CHECK
586 static void tb_invalidate_check(target_ulong address)
588 TranslationBlock *tb;
589 int i;
590 address &= TARGET_PAGE_MASK;
591 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
592 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
593 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
594 address >= tb->pc + tb->size)) {
595 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
596 address, (long)tb->pc, tb->size);
602 /* verify that all the pages have correct rights for code */
603 static void tb_page_check(void)
605 TranslationBlock *tb;
606 int i, flags1, flags2;
608 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
609 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
610 flags1 = page_get_flags(tb->pc);
611 flags2 = page_get_flags(tb->pc + tb->size - 1);
612 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
613 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
614 (long)tb->pc, tb->size, flags1, flags2);
620 void tb_jmp_check(TranslationBlock *tb)
622 TranslationBlock *tb1;
623 unsigned int n1;
625 /* suppress any remaining jumps to this TB */
626 tb1 = tb->jmp_first;
627 for(;;) {
628 n1 = (long)tb1 & 3;
629 tb1 = (TranslationBlock *)((long)tb1 & ~3);
630 if (n1 == 2)
631 break;
632 tb1 = tb1->jmp_next[n1];
634 /* check end of list */
635 if (tb1 != tb) {
636 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
640 #endif
642 /* invalidate one TB */
643 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
644 int next_offset)
646 TranslationBlock *tb1;
647 for(;;) {
648 tb1 = *ptb;
649 if (tb1 == tb) {
650 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
651 break;
653 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
657 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
659 TranslationBlock *tb1;
660 unsigned int n1;
662 for(;;) {
663 tb1 = *ptb;
664 n1 = (long)tb1 & 3;
665 tb1 = (TranslationBlock *)((long)tb1 & ~3);
666 if (tb1 == tb) {
667 *ptb = tb1->page_next[n1];
668 break;
670 ptb = &tb1->page_next[n1];
674 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
676 TranslationBlock *tb1, **ptb;
677 unsigned int n1;
679 ptb = &tb->jmp_next[n];
680 tb1 = *ptb;
681 if (tb1) {
682 /* find tb(n) in circular list */
683 for(;;) {
684 tb1 = *ptb;
685 n1 = (long)tb1 & 3;
686 tb1 = (TranslationBlock *)((long)tb1 & ~3);
687 if (n1 == n && tb1 == tb)
688 break;
689 if (n1 == 2) {
690 ptb = &tb1->jmp_first;
691 } else {
692 ptb = &tb1->jmp_next[n1];
695 /* now we can suppress tb(n) from the list */
696 *ptb = tb->jmp_next[n];
698 tb->jmp_next[n] = NULL;
702 /* reset the jump entry 'n' of a TB so that it is not chained to
703 another TB */
704 static inline void tb_reset_jump(TranslationBlock *tb, int n)
706 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
709 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
711 CPUState *env;
712 PageDesc *p;
713 unsigned int h, n1;
714 target_phys_addr_t phys_pc;
715 TranslationBlock *tb1, *tb2;
717 /* remove the TB from the hash list */
718 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
719 h = tb_phys_hash_func(phys_pc);
720 tb_remove(&tb_phys_hash[h], tb,
721 offsetof(TranslationBlock, phys_hash_next));
723 /* remove the TB from the page list */
724 if (tb->page_addr[0] != page_addr) {
725 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
726 tb_page_remove(&p->first_tb, tb);
727 invalidate_page_bitmap(p);
729 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
730 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
731 tb_page_remove(&p->first_tb, tb);
732 invalidate_page_bitmap(p);
735 tb_invalidated_flag = 1;
737 /* remove the TB from the hash list */
738 h = tb_jmp_cache_hash_func(tb->pc);
739 for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 if (env->tb_jmp_cache[h] == tb)
741 env->tb_jmp_cache[h] = NULL;
744 /* suppress this TB from the two jump lists */
745 tb_jmp_remove(tb, 0);
746 tb_jmp_remove(tb, 1);
748 /* suppress any remaining jumps to this TB */
749 tb1 = tb->jmp_first;
750 for(;;) {
751 n1 = (long)tb1 & 3;
752 if (n1 == 2)
753 break;
754 tb1 = (TranslationBlock *)((long)tb1 & ~3);
755 tb2 = tb1->jmp_next[n1];
756 tb_reset_jump(tb1, n1);
757 tb1->jmp_next[n1] = NULL;
758 tb1 = tb2;
760 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
762 tb_phys_invalidate_count++;
765 static inline void set_bits(uint8_t *tab, int start, int len)
767 int end, mask, end1;
769 end = start + len;
770 tab += start >> 3;
771 mask = 0xff << (start & 7);
772 if ((start & ~7) == (end & ~7)) {
773 if (start < end) {
774 mask &= ~(0xff << (end & 7));
775 *tab |= mask;
777 } else {
778 *tab++ |= mask;
779 start = (start + 8) & ~7;
780 end1 = end & ~7;
781 while (start < end1) {
782 *tab++ = 0xff;
783 start += 8;
785 if (start < end) {
786 mask = ~(0xff << (end & 7));
787 *tab |= mask;
792 static void build_page_bitmap(PageDesc *p)
794 int n, tb_start, tb_end;
795 TranslationBlock *tb;
797 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
798 if (!p->code_bitmap)
799 return;
801 tb = p->first_tb;
802 while (tb != NULL) {
803 n = (long)tb & 3;
804 tb = (TranslationBlock *)((long)tb & ~3);
805 /* NOTE: this is subtle as a TB may span two physical pages */
806 if (n == 0) {
807 /* NOTE: tb_end may be after the end of the page, but
808 it is not a problem */
809 tb_start = tb->pc & ~TARGET_PAGE_MASK;
810 tb_end = tb_start + tb->size;
811 if (tb_end > TARGET_PAGE_SIZE)
812 tb_end = TARGET_PAGE_SIZE;
813 } else {
814 tb_start = 0;
815 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
817 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
818 tb = tb->page_next[n];
822 TranslationBlock *tb_gen_code(CPUState *env,
823 target_ulong pc, target_ulong cs_base,
824 int flags, int cflags)
826 TranslationBlock *tb;
827 uint8_t *tc_ptr;
828 target_ulong phys_pc, phys_page2, virt_page2;
829 int code_gen_size;
831 phys_pc = get_phys_addr_code(env, pc);
832 tb = tb_alloc(pc);
833 if (!tb) {
834 /* flush must be done */
835 tb_flush(env);
836 /* cannot fail at this point */
837 tb = tb_alloc(pc);
838 /* Don't forget to invalidate previous TB info. */
839 tb_invalidated_flag = 1;
841 tc_ptr = code_gen_ptr;
842 tb->tc_ptr = tc_ptr;
843 tb->cs_base = cs_base;
844 tb->flags = flags;
845 tb->cflags = cflags;
846 cpu_gen_code(env, tb, &code_gen_size);
847 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
849 /* check next page if needed */
850 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
851 phys_page2 = -1;
852 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
853 phys_page2 = get_phys_addr_code(env, virt_page2);
855 tb_link_phys(tb, phys_pc, phys_page2);
856 return tb;
859 /* invalidate all TBs which intersect with the target physical page
860 starting in range [start;end[. NOTE: start and end must refer to
861 the same physical page. 'is_cpu_write_access' should be true if called
862 from a real cpu write access: the virtual CPU will exit the current
863 TB if code is modified inside this TB. */
864 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
865 int is_cpu_write_access)
867 int n, current_tb_modified, current_tb_not_found, current_flags;
868 CPUState *env = cpu_single_env;
869 PageDesc *p;
870 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
871 target_ulong tb_start, tb_end;
872 target_ulong current_pc, current_cs_base;
874 p = page_find(start >> TARGET_PAGE_BITS);
875 if (!p)
876 return;
877 if (!p->code_bitmap &&
878 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
879 is_cpu_write_access) {
880 /* build code bitmap */
881 build_page_bitmap(p);
884 /* we remove all the TBs in the range [start, end[ */
885 /* XXX: see if in some cases it could be faster to invalidate all the code */
886 current_tb_not_found = is_cpu_write_access;
887 current_tb_modified = 0;
888 current_tb = NULL; /* avoid warning */
889 current_pc = 0; /* avoid warning */
890 current_cs_base = 0; /* avoid warning */
891 current_flags = 0; /* avoid warning */
892 tb = p->first_tb;
893 while (tb != NULL) {
894 n = (long)tb & 3;
895 tb = (TranslationBlock *)((long)tb & ~3);
896 tb_next = tb->page_next[n];
897 /* NOTE: this is subtle as a TB may span two physical pages */
898 if (n == 0) {
899 /* NOTE: tb_end may be after the end of the page, but
900 it is not a problem */
901 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
902 tb_end = tb_start + tb->size;
903 } else {
904 tb_start = tb->page_addr[1];
905 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
907 if (!(tb_end <= start || tb_start >= end)) {
908 #ifdef TARGET_HAS_PRECISE_SMC
909 if (current_tb_not_found) {
910 current_tb_not_found = 0;
911 current_tb = NULL;
912 if (env->mem_io_pc) {
913 /* now we have a real cpu fault */
914 current_tb = tb_find_pc(env->mem_io_pc);
917 if (current_tb == tb &&
918 (current_tb->cflags & CF_COUNT_MASK) != 1) {
919 /* If we are modifying the current TB, we must stop
920 its execution. We could be more precise by checking
921 that the modification is after the current PC, but it
922 would require a specialized function to partially
923 restore the CPU state */
925 current_tb_modified = 1;
926 cpu_restore_state(current_tb, env,
927 env->mem_io_pc, NULL);
928 #if defined(TARGET_I386)
929 current_flags = env->hflags;
930 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
931 current_cs_base = (target_ulong)env->segs[R_CS].base;
932 current_pc = current_cs_base + env->eip;
933 #else
934 #error unsupported CPU
935 #endif
937 #endif /* TARGET_HAS_PRECISE_SMC */
938 /* we need to do that to handle the case where a signal
939 occurs while doing tb_phys_invalidate() */
940 saved_tb = NULL;
941 if (env) {
942 saved_tb = env->current_tb;
943 env->current_tb = NULL;
945 tb_phys_invalidate(tb, -1);
946 if (env) {
947 env->current_tb = saved_tb;
948 if (env->interrupt_request && env->current_tb)
949 cpu_interrupt(env, env->interrupt_request);
952 tb = tb_next;
954 #if !defined(CONFIG_USER_ONLY)
955 /* if no code remaining, no need to continue to use slow writes */
956 if (!p->first_tb) {
957 invalidate_page_bitmap(p);
958 if (is_cpu_write_access) {
959 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
962 #endif
963 #ifdef TARGET_HAS_PRECISE_SMC
964 if (current_tb_modified) {
965 /* we generate a block containing just the instruction
966 modifying the memory. It will ensure that it cannot modify
967 itself */
968 env->current_tb = NULL;
969 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
970 cpu_resume_from_signal(env, NULL);
972 #endif
975 /* len must be <= 8 and start must be a multiple of len */
976 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
978 PageDesc *p;
979 int offset, b;
980 #if 0
981 if (1) {
982 if (loglevel) {
983 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
984 cpu_single_env->mem_io_vaddr, len,
985 cpu_single_env->eip,
986 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
989 #endif
990 p = page_find(start >> TARGET_PAGE_BITS);
991 if (!p)
992 return;
993 if (p->code_bitmap) {
994 offset = start & ~TARGET_PAGE_MASK;
995 b = p->code_bitmap[offset >> 3] >> (offset & 7);
996 if (b & ((1 << len) - 1))
997 goto do_invalidate;
998 } else {
999 do_invalidate:
1000 tb_invalidate_phys_page_range(start, start + len, 1);
1004 #if !defined(CONFIG_SOFTMMU)
1005 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1006 unsigned long pc, void *puc)
1008 int n, current_flags, current_tb_modified;
1009 target_ulong current_pc, current_cs_base;
1010 PageDesc *p;
1011 TranslationBlock *tb, *current_tb;
1012 #ifdef TARGET_HAS_PRECISE_SMC
1013 CPUState *env = cpu_single_env;
1014 #endif
1016 addr &= TARGET_PAGE_MASK;
1017 p = page_find(addr >> TARGET_PAGE_BITS);
1018 if (!p)
1019 return;
1020 tb = p->first_tb;
1021 current_tb_modified = 0;
1022 current_tb = NULL;
1023 current_pc = 0; /* avoid warning */
1024 current_cs_base = 0; /* avoid warning */
1025 current_flags = 0; /* avoid warning */
1026 #ifdef TARGET_HAS_PRECISE_SMC
1027 if (tb && pc != 0) {
1028 current_tb = tb_find_pc(pc);
1030 #endif
1031 while (tb != NULL) {
1032 n = (long)tb & 3;
1033 tb = (TranslationBlock *)((long)tb & ~3);
1034 #ifdef TARGET_HAS_PRECISE_SMC
1035 if (current_tb == tb &&
1036 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1037 /* If we are modifying the current TB, we must stop
1038 its execution. We could be more precise by checking
1039 that the modification is after the current PC, but it
1040 would require a specialized function to partially
1041 restore the CPU state */
1043 current_tb_modified = 1;
1044 cpu_restore_state(current_tb, env, pc, puc);
1045 #if defined(TARGET_I386)
1046 current_flags = env->hflags;
1047 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1048 current_cs_base = (target_ulong)env->segs[R_CS].base;
1049 current_pc = current_cs_base + env->eip;
1050 #else
1051 #error unsupported CPU
1052 #endif
1054 #endif /* TARGET_HAS_PRECISE_SMC */
1055 tb_phys_invalidate(tb, addr);
1056 tb = tb->page_next[n];
1058 p->first_tb = NULL;
1059 #ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb_modified) {
1061 /* we generate a block containing just the instruction
1062 modifying the memory. It will ensure that it cannot modify
1063 itself */
1064 env->current_tb = NULL;
1065 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1066 cpu_resume_from_signal(env, puc);
1068 #endif
1070 #endif
1072 /* add the tb in the target page and protect it if necessary */
1073 static inline void tb_alloc_page(TranslationBlock *tb,
1074 unsigned int n, target_ulong page_addr)
1076 PageDesc *p;
1077 TranslationBlock *last_first_tb;
1079 tb->page_addr[n] = page_addr;
1080 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1081 tb->page_next[n] = p->first_tb;
1082 last_first_tb = p->first_tb;
1083 p->first_tb = (TranslationBlock *)((long)tb | n);
1084 invalidate_page_bitmap(p);
1086 #if defined(TARGET_HAS_SMC) || 1
1088 #if defined(CONFIG_USER_ONLY)
1089 if (p->flags & PAGE_WRITE) {
1090 target_ulong addr;
1091 PageDesc *p2;
1092 int prot;
1094 /* force the host page as non writable (writes will have a
1095 page fault + mprotect overhead) */
1096 page_addr &= qemu_host_page_mask;
1097 prot = 0;
1098 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1099 addr += TARGET_PAGE_SIZE) {
1101 p2 = page_find (addr >> TARGET_PAGE_BITS);
1102 if (!p2)
1103 continue;
1104 prot |= p2->flags;
1105 p2->flags &= ~PAGE_WRITE;
1106 page_get_flags(addr);
1108 mprotect(g2h(page_addr), qemu_host_page_size,
1109 (prot & PAGE_BITS) & ~PAGE_WRITE);
1110 #ifdef DEBUG_TB_INVALIDATE
1111 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1112 page_addr);
1113 #endif
1115 #else
1116 /* if some code is already present, then the pages are already
1117 protected. So we handle the case where only the first TB is
1118 allocated in a physical page */
1119 if (!last_first_tb) {
1120 tlb_protect_code(page_addr);
1122 #endif
1124 #endif /* TARGET_HAS_SMC */
1127 /* Allocate a new translation block. Flush the translation buffer if
1128 too many translation blocks or too much generated code. */
1129 TranslationBlock *tb_alloc(target_ulong pc)
1131 TranslationBlock *tb;
1133 if (nb_tbs >= code_gen_max_blocks ||
1134 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1135 return NULL;
1136 tb = &tbs[nb_tbs++];
1137 tb->pc = pc;
1138 tb->cflags = 0;
1139 return tb;
1142 void tb_free(TranslationBlock *tb)
1144 /* In practice this is mostly used for single use temporary TB
1145 Ignore the hard cases and just back up if this TB happens to
1146 be the last one generated. */
1147 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1148 code_gen_ptr = tb->tc_ptr;
1149 nb_tbs--;
1153 /* add a new TB and link it to the physical page tables. phys_page2 is
1154 (-1) to indicate that only one page contains the TB. */
1155 void tb_link_phys(TranslationBlock *tb,
1156 target_ulong phys_pc, target_ulong phys_page2)
1158 unsigned int h;
1159 TranslationBlock **ptb;
1161 /* Grab the mmap lock to stop another thread invalidating this TB
1162 before we are done. */
1163 mmap_lock();
1164 /* add in the physical hash table */
1165 h = tb_phys_hash_func(phys_pc);
1166 ptb = &tb_phys_hash[h];
1167 tb->phys_hash_next = *ptb;
1168 *ptb = tb;
1170 /* add in the page list */
1171 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1172 if (phys_page2 != -1)
1173 tb_alloc_page(tb, 1, phys_page2);
1174 else
1175 tb->page_addr[1] = -1;
1177 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1178 tb->jmp_next[0] = NULL;
1179 tb->jmp_next[1] = NULL;
1181 /* init original jump addresses */
1182 if (tb->tb_next_offset[0] != 0xffff)
1183 tb_reset_jump(tb, 0);
1184 if (tb->tb_next_offset[1] != 0xffff)
1185 tb_reset_jump(tb, 1);
1187 #ifdef DEBUG_TB_CHECK
1188 tb_page_check();
1189 #endif
1190 mmap_unlock();
1193 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1194 tb[1].tc_ptr. Return NULL if not found */
1195 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1197 int m_min, m_max, m;
1198 unsigned long v;
1199 TranslationBlock *tb;
1201 if (nb_tbs <= 0)
1202 return NULL;
1203 if (tc_ptr < (unsigned long)code_gen_buffer ||
1204 tc_ptr >= (unsigned long)code_gen_ptr)
1205 return NULL;
1206 /* binary search (cf Knuth) */
1207 m_min = 0;
1208 m_max = nb_tbs - 1;
1209 while (m_min <= m_max) {
1210 m = (m_min + m_max) >> 1;
1211 tb = &tbs[m];
1212 v = (unsigned long)tb->tc_ptr;
1213 if (v == tc_ptr)
1214 return tb;
1215 else if (tc_ptr < v) {
1216 m_max = m - 1;
1217 } else {
1218 m_min = m + 1;
1221 return &tbs[m_max];
1224 static void tb_reset_jump_recursive(TranslationBlock *tb);
1226 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1228 TranslationBlock *tb1, *tb_next, **ptb;
1229 unsigned int n1;
1231 tb1 = tb->jmp_next[n];
1232 if (tb1 != NULL) {
1233 /* find head of list */
1234 for(;;) {
1235 n1 = (long)tb1 & 3;
1236 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1237 if (n1 == 2)
1238 break;
1239 tb1 = tb1->jmp_next[n1];
1241 /* we are now sure now that tb jumps to tb1 */
1242 tb_next = tb1;
1244 /* remove tb from the jmp_first list */
1245 ptb = &tb_next->jmp_first;
1246 for(;;) {
1247 tb1 = *ptb;
1248 n1 = (long)tb1 & 3;
1249 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1250 if (n1 == n && tb1 == tb)
1251 break;
1252 ptb = &tb1->jmp_next[n1];
1254 *ptb = tb->jmp_next[n];
1255 tb->jmp_next[n] = NULL;
1257 /* suppress the jump to next tb in generated code */
1258 tb_reset_jump(tb, n);
1260 /* suppress jumps in the tb on which we could have jumped */
1261 tb_reset_jump_recursive(tb_next);
1265 static void tb_reset_jump_recursive(TranslationBlock *tb)
1267 tb_reset_jump_recursive2(tb, 0);
1268 tb_reset_jump_recursive2(tb, 1);
1271 #if defined(TARGET_HAS_ICE)
1272 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1274 target_phys_addr_t addr;
1275 target_ulong pd;
1276 ram_addr_t ram_addr;
1277 PhysPageDesc *p;
1279 addr = cpu_get_phys_page_debug(env, pc);
1280 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1281 if (!p) {
1282 pd = IO_MEM_UNASSIGNED;
1283 } else {
1284 pd = p->phys_offset;
1286 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1287 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1289 #endif
1291 /* Add a watchpoint. */
1292 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1294 int i;
1296 for (i = 0; i < env->nb_watchpoints; i++) {
1297 if (addr == env->watchpoint[i].vaddr)
1298 return 0;
1300 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1301 return -1;
1303 i = env->nb_watchpoints++;
1304 env->watchpoint[i].vaddr = addr;
1305 env->watchpoint[i].type = type;
1306 tlb_flush_page(env, addr);
1307 /* FIXME: This flush is needed because of the hack to make memory ops
1308 terminate the TB. It can be removed once the proper IO trap and
1309 re-execute bits are in. */
1310 tb_flush(env);
1311 return i;
1314 /* Remove a watchpoint. */
1315 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1317 int i;
1319 for (i = 0; i < env->nb_watchpoints; i++) {
1320 if (addr == env->watchpoint[i].vaddr) {
1321 env->nb_watchpoints--;
1322 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1323 tlb_flush_page(env, addr);
1324 return 0;
1327 return -1;
1330 /* Remove all watchpoints. */
1331 void cpu_watchpoint_remove_all(CPUState *env) {
1332 int i;
1334 for (i = 0; i < env->nb_watchpoints; i++) {
1335 tlb_flush_page(env, env->watchpoint[i].vaddr);
1337 env->nb_watchpoints = 0;
1340 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1341 breakpoint is reached */
1342 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1344 #if defined(TARGET_HAS_ICE)
1345 int i;
1347 for(i = 0; i < env->nb_breakpoints; i++) {
1348 if (env->breakpoints[i] == pc)
1349 return 0;
1352 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1353 return -1;
1354 env->breakpoints[env->nb_breakpoints++] = pc;
1356 if (kvm_enabled())
1357 kvm_update_debugger(env);
1359 breakpoint_invalidate(env, pc);
1360 return 0;
1361 #else
1362 return -1;
1363 #endif
1366 /* remove all breakpoints */
1367 void cpu_breakpoint_remove_all(CPUState *env) {
1368 #if defined(TARGET_HAS_ICE)
1369 int i;
1370 for(i = 0; i < env->nb_breakpoints; i++) {
1371 breakpoint_invalidate(env, env->breakpoints[i]);
1373 env->nb_breakpoints = 0;
1374 #endif
1377 /* remove a breakpoint */
1378 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1380 #if defined(TARGET_HAS_ICE)
1381 int i;
1382 for(i = 0; i < env->nb_breakpoints; i++) {
1383 if (env->breakpoints[i] == pc)
1384 goto found;
1386 return -1;
1387 found:
1388 env->nb_breakpoints--;
1389 if (i < env->nb_breakpoints)
1390 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1392 if (kvm_enabled())
1393 kvm_update_debugger(env);
1395 breakpoint_invalidate(env, pc);
1396 return 0;
1397 #else
1398 return -1;
1399 #endif
1402 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1403 CPU loop after each instruction */
1404 void cpu_single_step(CPUState *env, int enabled)
1406 #if defined(TARGET_HAS_ICE)
1407 if (env->singlestep_enabled != enabled) {
1408 env->singlestep_enabled = enabled;
1409 /* must flush all the translated code to avoid inconsistancies */
1410 /* XXX: only flush what is necessary */
1411 tb_flush(env);
1413 if (kvm_enabled())
1414 kvm_update_debugger(env);
1415 #endif
1418 /* enable or disable low levels log */
1419 void cpu_set_log(int log_flags)
1421 loglevel = log_flags;
1422 if (loglevel && !logfile) {
1423 logfile = fopen(logfilename, log_append ? "a" : "w");
1424 if (!logfile) {
1425 perror(logfilename);
1426 _exit(1);
1428 #if !defined(CONFIG_SOFTMMU)
1429 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1431 static uint8_t logfile_buf[4096];
1432 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1434 #else
1435 setvbuf(logfile, NULL, _IOLBF, 0);
1436 #endif
1437 log_append = 1;
1439 if (!loglevel && logfile) {
1440 fclose(logfile);
1441 logfile = NULL;
1445 void cpu_set_log_filename(const char *filename)
1447 logfilename = strdup(filename);
1448 if (logfile) {
1449 fclose(logfile);
1450 logfile = NULL;
1452 cpu_set_log(loglevel);
1455 /* mask must never be zero, except for A20 change call */
1456 void cpu_interrupt(CPUState *env, int mask)
1458 #if !defined(USE_NPTL)
1459 TranslationBlock *tb;
1460 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1461 #endif
1462 int old_mask;
1464 old_mask = env->interrupt_request;
1465 /* FIXME: This is probably not threadsafe. A different thread could
1466 be in the middle of a read-modify-write operation. */
1467 env->interrupt_request |= mask;
1468 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1469 kvm_update_interrupt_request(env);
1470 #if defined(USE_NPTL)
1471 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1472 problem and hope the cpu will stop of its own accord. For userspace
1473 emulation this often isn't actually as bad as it sounds. Often
1474 signals are used primarily to interrupt blocking syscalls. */
1475 #else
1476 if (use_icount) {
1477 env->icount_decr.u16.high = 0xffff;
1478 #ifndef CONFIG_USER_ONLY
1479 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1480 an async event happened and we need to process it. */
1481 if (!can_do_io(env)
1482 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1483 cpu_abort(env, "Raised interrupt while not in I/O function");
1485 #endif
1486 } else {
1487 tb = env->current_tb;
1488 /* if the cpu is currently executing code, we must unlink it and
1489 all the potentially executing TB */
1490 if (tb && !testandset(&interrupt_lock)) {
1491 env->current_tb = NULL;
1492 tb_reset_jump_recursive(tb);
1493 resetlock(&interrupt_lock);
1496 #endif
1499 void cpu_reset_interrupt(CPUState *env, int mask)
1501 env->interrupt_request &= ~mask;
1504 CPULogItem cpu_log_items[] = {
1505 { CPU_LOG_TB_OUT_ASM, "out_asm",
1506 "show generated host assembly code for each compiled TB" },
1507 { CPU_LOG_TB_IN_ASM, "in_asm",
1508 "show target assembly code for each compiled TB" },
1509 { CPU_LOG_TB_OP, "op",
1510 "show micro ops for each compiled TB" },
1511 { CPU_LOG_TB_OP_OPT, "op_opt",
1512 "show micro ops "
1513 #ifdef TARGET_I386
1514 "before eflags optimization and "
1515 #endif
1516 "after liveness analysis" },
1517 { CPU_LOG_INT, "int",
1518 "show interrupts/exceptions in short format" },
1519 { CPU_LOG_EXEC, "exec",
1520 "show trace before each executed TB (lots of logs)" },
1521 { CPU_LOG_TB_CPU, "cpu",
1522 "show CPU state before block translation" },
1523 #ifdef TARGET_I386
1524 { CPU_LOG_PCALL, "pcall",
1525 "show protected mode far calls/returns/exceptions" },
1526 #endif
1527 #ifdef DEBUG_IOPORT
1528 { CPU_LOG_IOPORT, "ioport",
1529 "show all i/o ports accesses" },
1530 #endif
1531 { 0, NULL, NULL },
1534 static int cmp1(const char *s1, int n, const char *s2)
1536 if (strlen(s2) != n)
1537 return 0;
1538 return memcmp(s1, s2, n) == 0;
1541 /* takes a comma separated list of log masks. Return 0 if error. */
1542 int cpu_str_to_log_mask(const char *str)
1544 CPULogItem *item;
1545 int mask;
1546 const char *p, *p1;
1548 p = str;
1549 mask = 0;
1550 for(;;) {
1551 p1 = strchr(p, ',');
1552 if (!p1)
1553 p1 = p + strlen(p);
1554 if(cmp1(p,p1-p,"all")) {
1555 for(item = cpu_log_items; item->mask != 0; item++) {
1556 mask |= item->mask;
1558 } else {
1559 for(item = cpu_log_items; item->mask != 0; item++) {
1560 if (cmp1(p, p1 - p, item->name))
1561 goto found;
1563 return 0;
1565 found:
1566 mask |= item->mask;
1567 if (*p1 != ',')
1568 break;
1569 p = p1 + 1;
1571 return mask;
1574 void cpu_abort(CPUState *env, const char *fmt, ...)
1576 va_list ap;
1577 va_list ap2;
1579 va_start(ap, fmt);
1580 va_copy(ap2, ap);
1581 fprintf(stderr, "qemu: fatal: ");
1582 vfprintf(stderr, fmt, ap);
1583 fprintf(stderr, "\n");
1584 #ifdef TARGET_I386
1585 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1586 #else
1587 cpu_dump_state(env, stderr, fprintf, 0);
1588 #endif
1589 if (logfile) {
1590 fprintf(logfile, "qemu: fatal: ");
1591 vfprintf(logfile, fmt, ap2);
1592 fprintf(logfile, "\n");
1593 #ifdef TARGET_I386
1594 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1595 #else
1596 cpu_dump_state(env, logfile, fprintf, 0);
1597 #endif
1598 fflush(logfile);
1599 fclose(logfile);
1601 va_end(ap2);
1602 va_end(ap);
1603 abort();
1606 CPUState *cpu_copy(CPUState *env)
1608 CPUState *new_env = cpu_init(env->cpu_model_str);
1609 /* preserve chaining and index */
1610 CPUState *next_cpu = new_env->next_cpu;
1611 int cpu_index = new_env->cpu_index;
1612 memcpy(new_env, env, sizeof(CPUState));
1613 new_env->next_cpu = next_cpu;
1614 new_env->cpu_index = cpu_index;
1615 return new_env;
1618 #if !defined(CONFIG_USER_ONLY)
1620 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1622 unsigned int i;
1624 /* Discard jump cache entries for any tb which might potentially
1625 overlap the flushed page. */
1626 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1627 memset (&env->tb_jmp_cache[i], 0,
1628 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1630 i = tb_jmp_cache_hash_page(addr);
1631 memset (&env->tb_jmp_cache[i], 0,
1632 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1635 /* NOTE: if flush_global is true, also flush global entries (not
1636 implemented yet) */
1637 void tlb_flush(CPUState *env, int flush_global)
1639 int i;
1641 #if defined(DEBUG_TLB)
1642 printf("tlb_flush:\n");
1643 #endif
1644 /* must reset current TB so that interrupts cannot modify the
1645 links while we are modifying them */
1646 env->current_tb = NULL;
1648 for(i = 0; i < CPU_TLB_SIZE; i++) {
1649 env->tlb_table[0][i].addr_read = -1;
1650 env->tlb_table[0][i].addr_write = -1;
1651 env->tlb_table[0][i].addr_code = -1;
1652 env->tlb_table[1][i].addr_read = -1;
1653 env->tlb_table[1][i].addr_write = -1;
1654 env->tlb_table[1][i].addr_code = -1;
1655 #if (NB_MMU_MODES >= 3)
1656 env->tlb_table[2][i].addr_read = -1;
1657 env->tlb_table[2][i].addr_write = -1;
1658 env->tlb_table[2][i].addr_code = -1;
1659 #if (NB_MMU_MODES == 4)
1660 env->tlb_table[3][i].addr_read = -1;
1661 env->tlb_table[3][i].addr_write = -1;
1662 env->tlb_table[3][i].addr_code = -1;
1663 #endif
1664 #endif
1667 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1669 #ifdef USE_KQEMU
1670 if (env->kqemu_enabled) {
1671 kqemu_flush(env, flush_global);
1673 #endif
1674 tlb_flush_count++;
1677 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1679 if (addr == (tlb_entry->addr_read &
1680 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1681 addr == (tlb_entry->addr_write &
1682 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1683 addr == (tlb_entry->addr_code &
1684 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1685 tlb_entry->addr_read = -1;
1686 tlb_entry->addr_write = -1;
1687 tlb_entry->addr_code = -1;
1691 void tlb_flush_page(CPUState *env, target_ulong addr)
1693 int i;
1695 #if defined(DEBUG_TLB)
1696 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1697 #endif
1698 /* must reset current TB so that interrupts cannot modify the
1699 links while we are modifying them */
1700 env->current_tb = NULL;
1702 addr &= TARGET_PAGE_MASK;
1703 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1704 tlb_flush_entry(&env->tlb_table[0][i], addr);
1705 tlb_flush_entry(&env->tlb_table[1][i], addr);
1706 #if (NB_MMU_MODES >= 3)
1707 tlb_flush_entry(&env->tlb_table[2][i], addr);
1708 #if (NB_MMU_MODES == 4)
1709 tlb_flush_entry(&env->tlb_table[3][i], addr);
1710 #endif
1711 #endif
1713 tlb_flush_jmp_cache(env, addr);
1715 #ifdef USE_KQEMU
1716 if (env->kqemu_enabled) {
1717 kqemu_flush_page(env, addr);
1719 #endif
1722 /* update the TLBs so that writes to code in the virtual page 'addr'
1723 can be detected */
1724 static void tlb_protect_code(ram_addr_t ram_addr)
1726 cpu_physical_memory_reset_dirty(ram_addr,
1727 ram_addr + TARGET_PAGE_SIZE,
1728 CODE_DIRTY_FLAG);
1731 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1732 tested for self modifying code */
1733 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1734 target_ulong vaddr)
1736 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1739 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1740 unsigned long start, unsigned long length)
1742 unsigned long addr;
1743 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1744 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1745 if ((addr - start) < length) {
1746 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1751 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1752 int dirty_flags)
1754 CPUState *env;
1755 unsigned long length, start1;
1756 int i, mask, len;
1757 uint8_t *p;
1759 start &= TARGET_PAGE_MASK;
1760 end = TARGET_PAGE_ALIGN(end);
1762 length = end - start;
1763 if (length == 0)
1764 return;
1765 len = length >> TARGET_PAGE_BITS;
1766 #ifdef USE_KQEMU
1767 /* XXX: should not depend on cpu context */
1768 env = first_cpu;
1769 if (env->kqemu_enabled) {
1770 ram_addr_t addr;
1771 addr = start;
1772 for(i = 0; i < len; i++) {
1773 kqemu_set_notdirty(env, addr);
1774 addr += TARGET_PAGE_SIZE;
1777 #endif
1778 mask = ~dirty_flags;
1779 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1780 for(i = 0; i < len; i++)
1781 p[i] &= mask;
1783 /* we modify the TLB cache so that the dirty bit will be set again
1784 when accessing the range */
1785 start1 = start + (unsigned long)phys_ram_base;
1786 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1787 for(i = 0; i < CPU_TLB_SIZE; i++)
1788 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1789 for(i = 0; i < CPU_TLB_SIZE; i++)
1790 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1791 #if (NB_MMU_MODES >= 3)
1792 for(i = 0; i < CPU_TLB_SIZE; i++)
1793 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1794 #if (NB_MMU_MODES == 4)
1795 for(i = 0; i < CPU_TLB_SIZE; i++)
1796 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1797 #endif
1798 #endif
1802 int cpu_physical_memory_set_dirty_tracking(int enable)
1804 int r=0;
1806 if (kvm_enabled())
1807 r = kvm_physical_memory_set_dirty_tracking(enable);
1808 in_migration = enable;
1809 return r;
1812 int cpu_physical_memory_get_dirty_tracking(void)
1814 return in_migration;
1817 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1819 ram_addr_t ram_addr;
1821 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1822 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1823 tlb_entry->addend - (unsigned long)phys_ram_base;
1824 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1825 tlb_entry->addr_write |= TLB_NOTDIRTY;
1830 /* update the TLB according to the current state of the dirty bits */
1831 void cpu_tlb_update_dirty(CPUState *env)
1833 int i;
1834 for(i = 0; i < CPU_TLB_SIZE; i++)
1835 tlb_update_dirty(&env->tlb_table[0][i]);
1836 for(i = 0; i < CPU_TLB_SIZE; i++)
1837 tlb_update_dirty(&env->tlb_table[1][i]);
1838 #if (NB_MMU_MODES >= 3)
1839 for(i = 0; i < CPU_TLB_SIZE; i++)
1840 tlb_update_dirty(&env->tlb_table[2][i]);
1841 #if (NB_MMU_MODES == 4)
1842 for(i = 0; i < CPU_TLB_SIZE; i++)
1843 tlb_update_dirty(&env->tlb_table[3][i]);
1844 #endif
1845 #endif
1848 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1850 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1851 tlb_entry->addr_write = vaddr;
1854 /* update the TLB corresponding to virtual page vaddr
1855 so that it is no longer dirty */
1856 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1858 int i;
1860 vaddr &= TARGET_PAGE_MASK;
1861 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1862 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1863 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1864 #if (NB_MMU_MODES >= 3)
1865 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1866 #if (NB_MMU_MODES == 4)
1867 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1868 #endif
1869 #endif
1872 /* add a new TLB entry. At most one entry for a given virtual address
1873 is permitted. Return 0 if OK or 2 if the page could not be mapped
1874 (can only happen in non SOFTMMU mode for I/O pages or pages
1875 conflicting with the host address space). */
1876 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1877 target_phys_addr_t paddr, int prot,
1878 int mmu_idx, int is_softmmu)
1880 PhysPageDesc *p;
1881 unsigned long pd;
1882 unsigned int index;
1883 target_ulong address;
1884 target_ulong code_address;
1885 target_phys_addr_t addend;
1886 int ret;
1887 CPUTLBEntry *te;
1888 int i;
1889 target_phys_addr_t iotlb;
1891 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1892 if (!p) {
1893 pd = IO_MEM_UNASSIGNED;
1894 } else {
1895 pd = p->phys_offset;
1897 #if defined(DEBUG_TLB)
1898 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1899 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1900 #endif
1902 ret = 0;
1903 address = vaddr;
1904 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1905 /* IO memory case (romd handled later) */
1906 address |= TLB_MMIO;
1908 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1909 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1910 /* Normal RAM. */
1911 iotlb = pd & TARGET_PAGE_MASK;
1912 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1913 iotlb |= IO_MEM_NOTDIRTY;
1914 else
1915 iotlb |= IO_MEM_ROM;
1916 } else {
1917 /* IO handlers are currently passed a phsical address.
1918 It would be nice to pass an offset from the base address
1919 of that region. This would avoid having to special case RAM,
1920 and avoid full address decoding in every device.
1921 We can't use the high bits of pd for this because
1922 IO_MEM_ROMD uses these as a ram address. */
1923 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1926 code_address = address;
1927 /* Make accesses to pages with watchpoints go via the
1928 watchpoint trap routines. */
1929 for (i = 0; i < env->nb_watchpoints; i++) {
1930 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1931 iotlb = io_mem_watch + paddr;
1932 /* TODO: The memory case can be optimized by not trapping
1933 reads of pages with a write breakpoint. */
1934 address |= TLB_MMIO;
1938 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1939 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1940 te = &env->tlb_table[mmu_idx][index];
1941 te->addend = addend - vaddr;
1942 if (prot & PAGE_READ) {
1943 te->addr_read = address;
1944 } else {
1945 te->addr_read = -1;
1948 if (prot & PAGE_EXEC) {
1949 te->addr_code = code_address;
1950 } else {
1951 te->addr_code = -1;
1953 if (prot & PAGE_WRITE) {
1954 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1955 (pd & IO_MEM_ROMD)) {
1956 /* Write access calls the I/O callback. */
1957 te->addr_write = address | TLB_MMIO;
1958 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1959 !cpu_physical_memory_is_dirty(pd)) {
1960 te->addr_write = address | TLB_NOTDIRTY;
1961 } else {
1962 te->addr_write = address;
1964 } else {
1965 te->addr_write = -1;
1967 return ret;
1970 #else
1972 void tlb_flush(CPUState *env, int flush_global)
1976 void tlb_flush_page(CPUState *env, target_ulong addr)
1980 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1981 target_phys_addr_t paddr, int prot,
1982 int mmu_idx, int is_softmmu)
1984 return 0;
1987 /* dump memory mappings */
1988 void page_dump(FILE *f)
1990 unsigned long start, end;
1991 int i, j, prot, prot1;
1992 PageDesc *p;
1994 fprintf(f, "%-8s %-8s %-8s %s\n",
1995 "start", "end", "size", "prot");
1996 start = -1;
1997 end = -1;
1998 prot = 0;
1999 for(i = 0; i <= L1_SIZE; i++) {
2000 if (i < L1_SIZE)
2001 p = l1_map[i];
2002 else
2003 p = NULL;
2004 for(j = 0;j < L2_SIZE; j++) {
2005 if (!p)
2006 prot1 = 0;
2007 else
2008 prot1 = p[j].flags;
2009 if (prot1 != prot) {
2010 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2011 if (start != -1) {
2012 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2013 start, end, end - start,
2014 prot & PAGE_READ ? 'r' : '-',
2015 prot & PAGE_WRITE ? 'w' : '-',
2016 prot & PAGE_EXEC ? 'x' : '-');
2018 if (prot1 != 0)
2019 start = end;
2020 else
2021 start = -1;
2022 prot = prot1;
2024 if (!p)
2025 break;
2030 int page_get_flags(target_ulong address)
2032 PageDesc *p;
2034 p = page_find(address >> TARGET_PAGE_BITS);
2035 if (!p)
2036 return 0;
2037 return p->flags;
2040 /* modify the flags of a page and invalidate the code if
2041 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2042 depending on PAGE_WRITE */
2043 void page_set_flags(target_ulong start, target_ulong end, int flags)
2045 PageDesc *p;
2046 target_ulong addr;
2048 /* mmap_lock should already be held. */
2049 start = start & TARGET_PAGE_MASK;
2050 end = TARGET_PAGE_ALIGN(end);
2051 if (flags & PAGE_WRITE)
2052 flags |= PAGE_WRITE_ORG;
2053 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2054 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2055 /* We may be called for host regions that are outside guest
2056 address space. */
2057 if (!p)
2058 return;
2059 /* if the write protection is set, then we invalidate the code
2060 inside */
2061 if (!(p->flags & PAGE_WRITE) &&
2062 (flags & PAGE_WRITE) &&
2063 p->first_tb) {
2064 tb_invalidate_phys_page(addr, 0, NULL);
2066 p->flags = flags;
2070 int page_check_range(target_ulong start, target_ulong len, int flags)
2072 PageDesc *p;
2073 target_ulong end;
2074 target_ulong addr;
2076 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2077 start = start & TARGET_PAGE_MASK;
2079 if( end < start )
2080 /* we've wrapped around */
2081 return -1;
2082 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2083 p = page_find(addr >> TARGET_PAGE_BITS);
2084 if( !p )
2085 return -1;
2086 if( !(p->flags & PAGE_VALID) )
2087 return -1;
2089 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2090 return -1;
2091 if (flags & PAGE_WRITE) {
2092 if (!(p->flags & PAGE_WRITE_ORG))
2093 return -1;
2094 /* unprotect the page if it was put read-only because it
2095 contains translated code */
2096 if (!(p->flags & PAGE_WRITE)) {
2097 if (!page_unprotect(addr, 0, NULL))
2098 return -1;
2100 return 0;
2103 return 0;
2106 /* called from signal handler: invalidate the code and unprotect the
2107 page. Return TRUE if the fault was succesfully handled. */
2108 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2110 unsigned int page_index, prot, pindex;
2111 PageDesc *p, *p1;
2112 target_ulong host_start, host_end, addr;
2114 /* Technically this isn't safe inside a signal handler. However we
2115 know this only ever happens in a synchronous SEGV handler, so in
2116 practice it seems to be ok. */
2117 mmap_lock();
2119 host_start = address & qemu_host_page_mask;
2120 page_index = host_start >> TARGET_PAGE_BITS;
2121 p1 = page_find(page_index);
2122 if (!p1) {
2123 mmap_unlock();
2124 return 0;
2126 host_end = host_start + qemu_host_page_size;
2127 p = p1;
2128 prot = 0;
2129 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2130 prot |= p->flags;
2131 p++;
2133 /* if the page was really writable, then we change its
2134 protection back to writable */
2135 if (prot & PAGE_WRITE_ORG) {
2136 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2137 if (!(p1[pindex].flags & PAGE_WRITE)) {
2138 mprotect((void *)g2h(host_start), qemu_host_page_size,
2139 (prot & PAGE_BITS) | PAGE_WRITE);
2140 p1[pindex].flags |= PAGE_WRITE;
2141 /* and since the content will be modified, we must invalidate
2142 the corresponding translated code. */
2143 tb_invalidate_phys_page(address, pc, puc);
2144 #ifdef DEBUG_TB_CHECK
2145 tb_invalidate_check(address);
2146 #endif
2147 mmap_unlock();
2148 return 1;
2151 mmap_unlock();
2152 return 0;
2155 static inline void tlb_set_dirty(CPUState *env,
2156 unsigned long addr, target_ulong vaddr)
2159 #endif /* defined(CONFIG_USER_ONLY) */
2161 #if !defined(CONFIG_USER_ONLY)
2162 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2163 ram_addr_t memory);
2164 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2165 ram_addr_t orig_memory);
2166 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2167 need_subpage) \
2168 do { \
2169 if (addr > start_addr) \
2170 start_addr2 = 0; \
2171 else { \
2172 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2173 if (start_addr2 > 0) \
2174 need_subpage = 1; \
2177 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2178 end_addr2 = TARGET_PAGE_SIZE - 1; \
2179 else { \
2180 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2181 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2182 need_subpage = 1; \
2184 } while (0)
2186 /* register physical memory. 'size' must be a multiple of the target
2187 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2188 io memory page */
2189 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2190 ram_addr_t size,
2191 ram_addr_t phys_offset)
2193 target_phys_addr_t addr, end_addr;
2194 PhysPageDesc *p;
2195 CPUState *env;
2196 ram_addr_t orig_size = size;
2197 void *subpage;
2199 #ifdef USE_KQEMU
2200 /* XXX: should not depend on cpu context */
2201 env = first_cpu;
2202 if (env->kqemu_enabled) {
2203 kqemu_set_phys_mem(start_addr, size, phys_offset);
2205 #endif
2206 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2207 end_addr = start_addr + (target_phys_addr_t)size;
2208 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2209 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2210 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2211 ram_addr_t orig_memory = p->phys_offset;
2212 target_phys_addr_t start_addr2, end_addr2;
2213 int need_subpage = 0;
2215 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2216 need_subpage);
2217 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2218 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2219 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2220 &p->phys_offset, orig_memory);
2221 } else {
2222 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2223 >> IO_MEM_SHIFT];
2225 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2226 } else {
2227 p->phys_offset = phys_offset;
2228 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2229 (phys_offset & IO_MEM_ROMD))
2230 phys_offset += TARGET_PAGE_SIZE;
2232 } else {
2233 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2234 p->phys_offset = phys_offset;
2235 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2236 (phys_offset & IO_MEM_ROMD))
2237 phys_offset += TARGET_PAGE_SIZE;
2238 else {
2239 target_phys_addr_t start_addr2, end_addr2;
2240 int need_subpage = 0;
2242 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2243 end_addr2, need_subpage);
2245 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2246 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2247 &p->phys_offset, IO_MEM_UNASSIGNED);
2248 subpage_register(subpage, start_addr2, end_addr2,
2249 phys_offset);
2255 /* since each CPU stores ram addresses in its TLB cache, we must
2256 reset the modified entries */
2257 /* XXX: slow ! */
2258 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2259 tlb_flush(env, 1);
2263 /* XXX: temporary until new memory mapping API */
2264 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2266 PhysPageDesc *p;
2268 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2269 if (!p)
2270 return IO_MEM_UNASSIGNED;
2271 return p->phys_offset;
2274 /* XXX: better than nothing */
2275 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2277 ram_addr_t addr;
2278 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2279 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2280 (uint64_t)size, (uint64_t)phys_ram_size);
2281 abort();
2283 addr = phys_ram_alloc_offset;
2284 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2285 return addr;
2288 void qemu_ram_free(ram_addr_t addr)
2292 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2294 #ifdef DEBUG_UNASSIGNED
2295 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2296 #endif
2297 #ifdef TARGET_SPARC
2298 do_unassigned_access(addr, 0, 0, 0);
2299 #elif TARGET_CRIS
2300 do_unassigned_access(addr, 0, 0, 0);
2301 #endif
2302 return 0;
2305 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2307 #ifdef DEBUG_UNASSIGNED
2308 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2309 #endif
2310 #ifdef TARGET_SPARC
2311 do_unassigned_access(addr, 1, 0, 0);
2312 #elif TARGET_CRIS
2313 do_unassigned_access(addr, 1, 0, 0);
2314 #endif
2317 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2318 unassigned_mem_readb,
2319 unassigned_mem_readb,
2320 unassigned_mem_readb,
2323 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2324 unassigned_mem_writeb,
2325 unassigned_mem_writeb,
2326 unassigned_mem_writeb,
2329 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2330 uint32_t val)
2332 int dirty_flags;
2333 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2334 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2335 #if !defined(CONFIG_USER_ONLY)
2336 tb_invalidate_phys_page_fast(ram_addr, 1);
2337 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2338 #endif
2340 stb_p(phys_ram_base + ram_addr, val);
2341 #ifdef USE_KQEMU
2342 if (cpu_single_env->kqemu_enabled &&
2343 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2344 kqemu_modify_page(cpu_single_env, ram_addr);
2345 #endif
2346 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2347 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2348 /* we remove the notdirty callback only if the code has been
2349 flushed */
2350 if (dirty_flags == 0xff)
2351 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2354 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2355 uint32_t val)
2357 int dirty_flags;
2358 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2359 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2360 #if !defined(CONFIG_USER_ONLY)
2361 tb_invalidate_phys_page_fast(ram_addr, 2);
2362 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2363 #endif
2365 stw_p(phys_ram_base + ram_addr, val);
2366 #ifdef USE_KQEMU
2367 if (cpu_single_env->kqemu_enabled &&
2368 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2369 kqemu_modify_page(cpu_single_env, ram_addr);
2370 #endif
2371 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2372 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2373 /* we remove the notdirty callback only if the code has been
2374 flushed */
2375 if (dirty_flags == 0xff)
2376 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2379 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2380 uint32_t val)
2382 int dirty_flags;
2383 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2384 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2385 #if !defined(CONFIG_USER_ONLY)
2386 tb_invalidate_phys_page_fast(ram_addr, 4);
2387 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2388 #endif
2390 stl_p(phys_ram_base + ram_addr, val);
2391 #ifdef USE_KQEMU
2392 if (cpu_single_env->kqemu_enabled &&
2393 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2394 kqemu_modify_page(cpu_single_env, ram_addr);
2395 #endif
2396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2397 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2398 /* we remove the notdirty callback only if the code has been
2399 flushed */
2400 if (dirty_flags == 0xff)
2401 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2404 static CPUReadMemoryFunc *error_mem_read[3] = {
2405 NULL, /* never used */
2406 NULL, /* never used */
2407 NULL, /* never used */
2410 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2411 notdirty_mem_writeb,
2412 notdirty_mem_writew,
2413 notdirty_mem_writel,
2416 /* Generate a debug exception if a watchpoint has been hit. */
2417 static void check_watchpoint(int offset, int flags)
2419 CPUState *env = cpu_single_env;
2420 target_ulong vaddr;
2421 int i;
2423 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2424 for (i = 0; i < env->nb_watchpoints; i++) {
2425 if (vaddr == env->watchpoint[i].vaddr
2426 && (env->watchpoint[i].type & flags)) {
2427 env->watchpoint_hit = i + 1;
2428 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2429 break;
2434 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2435 so these check for a hit then pass through to the normal out-of-line
2436 phys routines. */
2437 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2439 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2440 return ldub_phys(addr);
2443 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2445 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2446 return lduw_phys(addr);
2449 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2451 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2452 return ldl_phys(addr);
2455 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2456 uint32_t val)
2458 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2459 stb_phys(addr, val);
2462 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2463 uint32_t val)
2465 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2466 stw_phys(addr, val);
2469 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2470 uint32_t val)
2472 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2473 stl_phys(addr, val);
2476 static CPUReadMemoryFunc *watch_mem_read[3] = {
2477 watch_mem_readb,
2478 watch_mem_readw,
2479 watch_mem_readl,
2482 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2483 watch_mem_writeb,
2484 watch_mem_writew,
2485 watch_mem_writel,
2488 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2489 unsigned int len)
2491 uint32_t ret;
2492 unsigned int idx;
2494 idx = SUBPAGE_IDX(addr - mmio->base);
2495 #if defined(DEBUG_SUBPAGE)
2496 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2497 mmio, len, addr, idx);
2498 #endif
2499 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2501 return ret;
2504 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2505 uint32_t value, unsigned int len)
2507 unsigned int idx;
2509 idx = SUBPAGE_IDX(addr - mmio->base);
2510 #if defined(DEBUG_SUBPAGE)
2511 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2512 mmio, len, addr, idx, value);
2513 #endif
2514 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2517 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2519 #if defined(DEBUG_SUBPAGE)
2520 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2521 #endif
2523 return subpage_readlen(opaque, addr, 0);
2526 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2527 uint32_t value)
2529 #if defined(DEBUG_SUBPAGE)
2530 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2531 #endif
2532 subpage_writelen(opaque, addr, value, 0);
2535 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2537 #if defined(DEBUG_SUBPAGE)
2538 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2539 #endif
2541 return subpage_readlen(opaque, addr, 1);
2544 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2545 uint32_t value)
2547 #if defined(DEBUG_SUBPAGE)
2548 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2549 #endif
2550 subpage_writelen(opaque, addr, value, 1);
2553 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2555 #if defined(DEBUG_SUBPAGE)
2556 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2557 #endif
2559 return subpage_readlen(opaque, addr, 2);
2562 static void subpage_writel (void *opaque,
2563 target_phys_addr_t addr, uint32_t value)
2565 #if defined(DEBUG_SUBPAGE)
2566 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2567 #endif
2568 subpage_writelen(opaque, addr, value, 2);
2571 static CPUReadMemoryFunc *subpage_read[] = {
2572 &subpage_readb,
2573 &subpage_readw,
2574 &subpage_readl,
2577 static CPUWriteMemoryFunc *subpage_write[] = {
2578 &subpage_writeb,
2579 &subpage_writew,
2580 &subpage_writel,
2583 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2584 ram_addr_t memory)
2586 int idx, eidx;
2587 unsigned int i;
2589 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2590 return -1;
2591 idx = SUBPAGE_IDX(start);
2592 eidx = SUBPAGE_IDX(end);
2593 #if defined(DEBUG_SUBPAGE)
2594 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2595 mmio, start, end, idx, eidx, memory);
2596 #endif
2597 memory >>= IO_MEM_SHIFT;
2598 for (; idx <= eidx; idx++) {
2599 for (i = 0; i < 4; i++) {
2600 if (io_mem_read[memory][i]) {
2601 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2602 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2604 if (io_mem_write[memory][i]) {
2605 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2606 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2611 return 0;
2614 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2615 ram_addr_t orig_memory)
2617 subpage_t *mmio;
2618 int subpage_memory;
2620 mmio = qemu_mallocz(sizeof(subpage_t));
2621 if (mmio != NULL) {
2622 mmio->base = base;
2623 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2624 #if defined(DEBUG_SUBPAGE)
2625 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2626 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2627 #endif
2628 *phys = subpage_memory | IO_MEM_SUBPAGE;
2629 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2632 return mmio;
2635 static int get_free_io_mem_idx(void)
2637 int i;
2639 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2640 if (!io_mem_used[i]) {
2641 io_mem_used[i] = 1;
2642 return i;
2645 return -1;
2648 static void io_mem_init(void)
2650 int i;
2652 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2653 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2654 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2655 for (i=0; i<5; i++)
2656 io_mem_used[i] = 1;
2658 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2659 watch_mem_write, NULL);
2660 /* alloc dirty bits array */
2661 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2662 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2665 /* mem_read and mem_write are arrays of functions containing the
2666 function to access byte (index 0), word (index 1) and dword (index
2667 2). Functions can be omitted with a NULL function pointer. The
2668 registered functions may be modified dynamically later.
2669 If io_index is non zero, the corresponding io zone is
2670 modified. If it is zero, a new io zone is allocated. The return
2671 value can be used with cpu_register_physical_memory(). (-1) is
2672 returned if error. */
2673 int cpu_register_io_memory(int io_index,
2674 CPUReadMemoryFunc **mem_read,
2675 CPUWriteMemoryFunc **mem_write,
2676 void *opaque)
2678 int i, subwidth = 0;
2680 if (io_index <= 0) {
2681 io_index = get_free_io_mem_idx();
2682 if (io_index == -1)
2683 return io_index;
2684 } else {
2685 if (io_index >= IO_MEM_NB_ENTRIES)
2686 return -1;
2689 for(i = 0;i < 3; i++) {
2690 if (!mem_read[i] || !mem_write[i])
2691 subwidth = IO_MEM_SUBWIDTH;
2692 io_mem_read[io_index][i] = mem_read[i];
2693 io_mem_write[io_index][i] = mem_write[i];
2695 io_mem_opaque[io_index] = opaque;
2696 return (io_index << IO_MEM_SHIFT) | subwidth;
2699 void cpu_unregister_io_memory(int io_table_address)
2701 int i;
2702 int io_index = io_table_address >> IO_MEM_SHIFT;
2704 for (i=0;i < 3; i++) {
2705 io_mem_read[io_index][i] = unassigned_mem_read[i];
2706 io_mem_write[io_index][i] = unassigned_mem_write[i];
2708 io_mem_opaque[io_index] = NULL;
2709 io_mem_used[io_index] = 0;
2712 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2714 return io_mem_write[io_index >> IO_MEM_SHIFT];
2717 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2719 return io_mem_read[io_index >> IO_MEM_SHIFT];
2722 #endif /* !defined(CONFIG_USER_ONLY) */
2724 /* physical memory access (slow version, mainly for debug) */
2725 #if defined(CONFIG_USER_ONLY)
2726 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2727 int len, int is_write)
2729 int l, flags;
2730 target_ulong page;
2731 void * p;
2733 while (len > 0) {
2734 page = addr & TARGET_PAGE_MASK;
2735 l = (page + TARGET_PAGE_SIZE) - addr;
2736 if (l > len)
2737 l = len;
2738 flags = page_get_flags(page);
2739 if (!(flags & PAGE_VALID))
2740 return;
2741 if (is_write) {
2742 if (!(flags & PAGE_WRITE))
2743 return;
2744 /* XXX: this code should not depend on lock_user */
2745 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2746 /* FIXME - should this return an error rather than just fail? */
2747 return;
2748 memcpy(p, buf, l);
2749 unlock_user(p, addr, l);
2750 } else {
2751 if (!(flags & PAGE_READ))
2752 return;
2753 /* XXX: this code should not depend on lock_user */
2754 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2755 /* FIXME - should this return an error rather than just fail? */
2756 return;
2757 memcpy(buf, p, l);
2758 unlock_user(p, addr, 0);
2760 len -= l;
2761 buf += l;
2762 addr += l;
2766 #else
2767 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2768 int len, int is_write)
2770 int l, io_index;
2771 uint8_t *ptr;
2772 uint32_t val;
2773 target_phys_addr_t page;
2774 unsigned long pd;
2775 PhysPageDesc *p;
2777 while (len > 0) {
2778 page = addr & TARGET_PAGE_MASK;
2779 l = (page + TARGET_PAGE_SIZE) - addr;
2780 if (l > len)
2781 l = len;
2782 p = phys_page_find(page >> TARGET_PAGE_BITS);
2783 if (!p) {
2784 pd = IO_MEM_UNASSIGNED;
2785 } else {
2786 pd = p->phys_offset;
2789 if (is_write) {
2790 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2791 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2792 /* XXX: could force cpu_single_env to NULL to avoid
2793 potential bugs */
2794 if (l >= 4 && ((addr & 3) == 0)) {
2795 /* 32 bit write access */
2796 val = ldl_p(buf);
2797 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2798 l = 4;
2799 } else if (l >= 2 && ((addr & 1) == 0)) {
2800 /* 16 bit write access */
2801 val = lduw_p(buf);
2802 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2803 l = 2;
2804 } else {
2805 /* 8 bit write access */
2806 val = ldub_p(buf);
2807 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2808 l = 1;
2810 } else {
2811 unsigned long addr1;
2812 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2813 /* RAM case */
2814 ptr = phys_ram_base + addr1;
2815 memcpy(ptr, buf, l);
2816 if (!cpu_physical_memory_is_dirty(addr1)) {
2817 /* invalidate code */
2818 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2819 /* set dirty bit */
2820 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2821 (0xff & ~CODE_DIRTY_FLAG);
2823 /* qemu doesn't execute guest code directly, but kvm does
2824 therefore fluch instruction caches */
2825 if (kvm_enabled())
2826 flush_icache_range((unsigned long)ptr,
2827 ((unsigned long)ptr)+l);
2829 } else {
2830 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2831 !(pd & IO_MEM_ROMD)) {
2832 /* I/O case */
2833 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2834 if (l >= 4 && ((addr & 3) == 0)) {
2835 /* 32 bit read access */
2836 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2837 stl_p(buf, val);
2838 l = 4;
2839 } else if (l >= 2 && ((addr & 1) == 0)) {
2840 /* 16 bit read access */
2841 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2842 stw_p(buf, val);
2843 l = 2;
2844 } else {
2845 /* 8 bit read access */
2846 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2847 stb_p(buf, val);
2848 l = 1;
2850 } else {
2851 /* RAM case */
2852 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2853 (addr & ~TARGET_PAGE_MASK);
2854 memcpy(buf, ptr, l);
2857 len -= l;
2858 buf += l;
2859 addr += l;
2863 /* used for ROM loading : can write in RAM and ROM */
2864 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2865 const uint8_t *buf, int len)
2867 int l;
2868 uint8_t *ptr;
2869 target_phys_addr_t page;
2870 unsigned long pd;
2871 PhysPageDesc *p;
2873 while (len > 0) {
2874 page = addr & TARGET_PAGE_MASK;
2875 l = (page + TARGET_PAGE_SIZE) - addr;
2876 if (l > len)
2877 l = len;
2878 p = phys_page_find(page >> TARGET_PAGE_BITS);
2879 if (!p) {
2880 pd = IO_MEM_UNASSIGNED;
2881 } else {
2882 pd = p->phys_offset;
2885 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2886 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2887 !(pd & IO_MEM_ROMD)) {
2888 /* do nothing */
2889 } else {
2890 unsigned long addr1;
2891 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2892 /* ROM/RAM case */
2893 ptr = phys_ram_base + addr1;
2894 memcpy(ptr, buf, l);
2896 len -= l;
2897 buf += l;
2898 addr += l;
2903 /* warning: addr must be aligned */
2904 uint32_t ldl_phys(target_phys_addr_t addr)
2906 int io_index;
2907 uint8_t *ptr;
2908 uint32_t val;
2909 unsigned long pd;
2910 PhysPageDesc *p;
2912 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2913 if (!p) {
2914 pd = IO_MEM_UNASSIGNED;
2915 } else {
2916 pd = p->phys_offset;
2919 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2920 !(pd & IO_MEM_ROMD)) {
2921 /* I/O case */
2922 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2923 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2924 } else {
2925 /* RAM case */
2926 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2927 (addr & ~TARGET_PAGE_MASK);
2928 val = ldl_p(ptr);
2930 return val;
2933 /* warning: addr must be aligned */
2934 uint64_t ldq_phys(target_phys_addr_t addr)
2936 int io_index;
2937 uint8_t *ptr;
2938 uint64_t val;
2939 unsigned long pd;
2940 PhysPageDesc *p;
2942 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2943 if (!p) {
2944 pd = IO_MEM_UNASSIGNED;
2945 } else {
2946 pd = p->phys_offset;
2949 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2950 !(pd & IO_MEM_ROMD)) {
2951 /* I/O case */
2952 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2953 #ifdef TARGET_WORDS_BIGENDIAN
2954 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2955 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2956 #else
2957 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2958 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2959 #endif
2960 } else {
2961 /* RAM case */
2962 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2963 (addr & ~TARGET_PAGE_MASK);
2964 val = ldq_p(ptr);
2966 return val;
2969 /* XXX: optimize */
2970 uint32_t ldub_phys(target_phys_addr_t addr)
2972 uint8_t val;
2973 cpu_physical_memory_read(addr, &val, 1);
2974 return val;
2977 /* XXX: optimize */
2978 uint32_t lduw_phys(target_phys_addr_t addr)
2980 uint16_t val;
2981 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2982 return tswap16(val);
2985 #ifdef __GNUC__
2986 #define likely(x) __builtin_expect(!!(x), 1)
2987 #define unlikely(x) __builtin_expect(!!(x), 0)
2988 #else
2989 #define likely(x) x
2990 #define unlikely(x) x
2991 #endif
2993 /* warning: addr must be aligned. The ram page is not masked as dirty
2994 and the code inside is not invalidated. It is useful if the dirty
2995 bits are used to track modified PTEs */
2996 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2998 int io_index;
2999 uint8_t *ptr;
3000 unsigned long pd;
3001 PhysPageDesc *p;
3003 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3004 if (!p) {
3005 pd = IO_MEM_UNASSIGNED;
3006 } else {
3007 pd = p->phys_offset;
3010 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3011 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3012 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3013 } else {
3014 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3015 ptr = phys_ram_base + addr1;
3016 stl_p(ptr, val);
3018 if (unlikely(in_migration)) {
3019 if (!cpu_physical_memory_is_dirty(addr1)) {
3020 /* invalidate code */
3021 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3022 /* set dirty bit */
3023 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3024 (0xff & ~CODE_DIRTY_FLAG);
3030 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3032 int io_index;
3033 uint8_t *ptr;
3034 unsigned long pd;
3035 PhysPageDesc *p;
3037 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3038 if (!p) {
3039 pd = IO_MEM_UNASSIGNED;
3040 } else {
3041 pd = p->phys_offset;
3044 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3045 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3046 #ifdef TARGET_WORDS_BIGENDIAN
3047 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3048 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3049 #else
3050 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3051 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3052 #endif
3053 } else {
3054 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3055 (addr & ~TARGET_PAGE_MASK);
3056 stq_p(ptr, val);
3060 /* warning: addr must be aligned */
3061 void stl_phys(target_phys_addr_t addr, uint32_t val)
3063 int io_index;
3064 uint8_t *ptr;
3065 unsigned long pd;
3066 PhysPageDesc *p;
3068 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3069 if (!p) {
3070 pd = IO_MEM_UNASSIGNED;
3071 } else {
3072 pd = p->phys_offset;
3075 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3076 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3077 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3078 } else {
3079 unsigned long addr1;
3080 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3081 /* RAM case */
3082 ptr = phys_ram_base + addr1;
3083 stl_p(ptr, val);
3084 if (!cpu_physical_memory_is_dirty(addr1)) {
3085 /* invalidate code */
3086 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3087 /* set dirty bit */
3088 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3089 (0xff & ~CODE_DIRTY_FLAG);
3094 /* XXX: optimize */
3095 void stb_phys(target_phys_addr_t addr, uint32_t val)
3097 uint8_t v = val;
3098 cpu_physical_memory_write(addr, &v, 1);
3101 /* XXX: optimize */
3102 void stw_phys(target_phys_addr_t addr, uint32_t val)
3104 uint16_t v = tswap16(val);
3105 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3108 /* XXX: optimize */
3109 void stq_phys(target_phys_addr_t addr, uint64_t val)
3111 val = tswap64(val);
3112 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3115 #endif
3117 /* virtual memory access for debug */
3118 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3119 uint8_t *buf, int len, int is_write)
3121 int l;
3122 target_phys_addr_t phys_addr;
3123 target_ulong page;
3125 while (len > 0) {
3126 page = addr & TARGET_PAGE_MASK;
3127 phys_addr = cpu_get_phys_page_debug(env, page);
3128 /* if no physical page mapped, return an error */
3129 if (phys_addr == -1)
3130 return -1;
3131 l = (page + TARGET_PAGE_SIZE) - addr;
3132 if (l > len)
3133 l = len;
3134 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3135 buf, l, is_write);
3136 len -= l;
3137 buf += l;
3138 addr += l;
3140 return 0;
3143 /* in deterministic execution mode, instructions doing device I/Os
3144 must be at the end of the TB */
3145 void cpu_io_recompile(CPUState *env, void *retaddr)
3147 TranslationBlock *tb;
3148 uint32_t n, cflags;
3149 target_ulong pc, cs_base;
3150 uint64_t flags;
3152 tb = tb_find_pc((unsigned long)retaddr);
3153 if (!tb) {
3154 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3155 retaddr);
3157 n = env->icount_decr.u16.low + tb->icount;
3158 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3159 /* Calculate how many instructions had been executed before the fault
3160 occurred. */
3161 n = n - env->icount_decr.u16.low;
3162 /* Generate a new TB ending on the I/O insn. */
3163 n++;
3164 /* On MIPS and SH, delay slot instructions can only be restarted if
3165 they were already the first instruction in the TB. If this is not
3166 the first instruction in a TB then re-execute the preceding
3167 branch. */
3168 #if defined(TARGET_MIPS)
3169 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3170 env->active_tc.PC -= 4;
3171 env->icount_decr.u16.low++;
3172 env->hflags &= ~MIPS_HFLAG_BMASK;
3174 #elif defined(TARGET_SH4)
3175 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3176 && n > 1) {
3177 env->pc -= 2;
3178 env->icount_decr.u16.low++;
3179 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3181 #endif
3182 /* This should never happen. */
3183 if (n > CF_COUNT_MASK)
3184 cpu_abort(env, "TB too big during recompile");
3186 cflags = n | CF_LAST_IO;
3187 pc = tb->pc;
3188 cs_base = tb->cs_base;
3189 flags = tb->flags;
3190 tb_phys_invalidate(tb, -1);
3191 /* FIXME: In theory this could raise an exception. In practice
3192 we have already translated the block once so it's probably ok. */
3193 tb_gen_code(env, pc, cs_base, flags, cflags);
3194 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3195 the first in the TB) then we end up generating a whole new TB and
3196 repeating the fault, which is horribly inefficient.
3197 Better would be to execute just this insn uncached, or generate a
3198 second new TB. */
3199 cpu_resume_from_signal(env, NULL);
3202 void dump_exec_info(FILE *f,
3203 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3205 int i, target_code_size, max_target_code_size;
3206 int direct_jmp_count, direct_jmp2_count, cross_page;
3207 TranslationBlock *tb;
3209 target_code_size = 0;
3210 max_target_code_size = 0;
3211 cross_page = 0;
3212 direct_jmp_count = 0;
3213 direct_jmp2_count = 0;
3214 for(i = 0; i < nb_tbs; i++) {
3215 tb = &tbs[i];
3216 target_code_size += tb->size;
3217 if (tb->size > max_target_code_size)
3218 max_target_code_size = tb->size;
3219 if (tb->page_addr[1] != -1)
3220 cross_page++;
3221 if (tb->tb_next_offset[0] != 0xffff) {
3222 direct_jmp_count++;
3223 if (tb->tb_next_offset[1] != 0xffff) {
3224 direct_jmp2_count++;
3228 /* XXX: avoid using doubles ? */
3229 cpu_fprintf(f, "Translation buffer state:\n");
3230 cpu_fprintf(f, "gen code size %ld/%ld\n",
3231 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3232 cpu_fprintf(f, "TB count %d/%d\n",
3233 nb_tbs, code_gen_max_blocks);
3234 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3235 nb_tbs ? target_code_size / nb_tbs : 0,
3236 max_target_code_size);
3237 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3238 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3239 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3240 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3241 cross_page,
3242 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3243 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3244 direct_jmp_count,
3245 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3246 direct_jmp2_count,
3247 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3248 cpu_fprintf(f, "\nStatistics:\n");
3249 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3250 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3251 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3252 tcg_dump_info(f, cpu_fprintf);
3255 #if !defined(CONFIG_USER_ONLY)
3257 #define MMUSUFFIX _cmmu
3258 #define GETPC() NULL
3259 #define env cpu_single_env
3260 #define SOFTMMU_CODE_ACCESS
3262 #define SHIFT 0
3263 #include "softmmu_template.h"
3265 #define SHIFT 1
3266 #include "softmmu_template.h"
3268 #define SHIFT 2
3269 #include "softmmu_template.h"
3271 #define SHIFT 3
3272 #include "softmmu_template.h"
3274 #undef env
3276 #endif