kvm: configure: pass --with-patched-kernel to kernel/configure
[qemu-kvm/fedora.git] / exec.c
blobb77d6b8de344d130a0aa0ebc5752334ebbeea8f2
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
41 #include "tcg.h"
42 #endif
43 #include "qemu-kvm.h"
45 #include "hw/hw.h"
46 #include "osdep.h"
47 #include "kvm.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
52 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_FLUSH
54 //#define DEBUG_TLB
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #if defined(TARGET_SPARC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 41
73 #elif defined(TARGET_SPARC)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 36
75 #elif defined(TARGET_ALPHA)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #define TARGET_VIRT_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_PPC64)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 36
84 #elif defined(TARGET_IA64)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 #else
87 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88 #define TARGET_PHYS_ADDR_SPACE_BITS 32
89 #endif
91 static TranslationBlock *tbs;
92 int code_gen_max_blocks;
93 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
94 static int nb_tbs;
95 /* any access to the tbs or the page table must use this lock */
96 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
98 #if defined(__arm__) || defined(__sparc_v9__)
99 /* The prologue must be reachable with a direct jump. ARM and Sparc64
100 have limited branch ranges (possibly also PPC) so place it in a
101 section close to code segment. */
102 #define code_gen_section \
103 __attribute__((__section__(".gen_code"))) \
104 __attribute__((aligned (32)))
105 #else
106 #define code_gen_section \
107 __attribute__((aligned (32)))
108 #endif
110 uint8_t code_gen_prologue[1024] code_gen_section;
111 static uint8_t *code_gen_buffer;
112 static unsigned long code_gen_buffer_size;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size;
115 uint8_t *code_gen_ptr;
117 #if !defined(CONFIG_USER_ONLY)
118 ram_addr_t phys_ram_size;
119 int phys_ram_fd;
120 uint8_t *phys_ram_base;
121 uint8_t *phys_ram_dirty;
122 uint8_t *bios_mem;
123 static int in_migration;
124 static ram_addr_t phys_ram_alloc_offset = 0;
125 #endif
127 CPUState *first_cpu;
128 /* current CPU in the current thread. It is only valid inside
129 cpu_exec() */
130 CPUState *cpu_single_env;
131 /* 0 = Do not count executed instructions.
132 1 = Precise instruction counting.
133 2 = Adaptive rate instruction counting. */
134 int use_icount = 0;
135 /* Current instruction counter. While executing translated code this may
136 include some instructions that have not yet been executed. */
137 int64_t qemu_icount;
139 typedef struct PageDesc {
140 /* list of TBs intersecting this ram page */
141 TranslationBlock *first_tb;
142 /* in order to optimize self modifying code, we count the number
143 of lookups we do to a given page to use a bitmap */
144 unsigned int code_write_count;
145 uint8_t *code_bitmap;
146 #if defined(CONFIG_USER_ONLY)
147 unsigned long flags;
148 #endif
149 } PageDesc;
151 typedef struct PhysPageDesc {
152 /* offset in host memory of the page + io_index in the low bits */
153 ram_addr_t phys_offset;
154 ram_addr_t region_offset;
155 } PhysPageDesc;
157 #define L2_BITS 10
158 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
159 /* XXX: this is a temporary hack for alpha target.
160 * In the future, this is to be replaced by a multi-level table
161 * to actually be able to handle the complete 64 bits address space.
163 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
164 #else
165 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
166 #endif
168 #define L1_SIZE (1 << L1_BITS)
169 #define L2_SIZE (1 << L2_BITS)
171 unsigned long qemu_real_host_page_size;
172 unsigned long qemu_host_page_bits;
173 unsigned long qemu_host_page_size;
174 unsigned long qemu_host_page_mask;
176 /* XXX: for system emulation, it could just be an array */
177 static PageDesc *l1_map[L1_SIZE];
178 static PhysPageDesc **l1_phys_map;
180 #if !defined(CONFIG_USER_ONLY)
181 static void io_mem_init(void);
183 /* io memory support */
184 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
185 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
186 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
187 static char io_mem_used[IO_MEM_NB_ENTRIES];
188 static int io_mem_watch;
189 #endif
191 /* log support */
192 static const char *logfilename = "/tmp/qemu.log";
193 FILE *logfile;
194 int loglevel;
195 static int log_append = 0;
197 /* statistics */
198 static int tlb_flush_count;
199 static int tb_flush_count;
200 static int tb_phys_invalidate_count;
202 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
203 typedef struct subpage_t {
204 target_phys_addr_t base;
205 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
206 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
207 void *opaque[TARGET_PAGE_SIZE][2][4];
208 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
209 } subpage_t;
211 #ifdef _WIN32
212 static void map_exec(void *addr, long size)
214 DWORD old_protect;
215 VirtualProtect(addr, size,
216 PAGE_EXECUTE_READWRITE, &old_protect);
219 #else
220 static void map_exec(void *addr, long size)
222 unsigned long start, end, page_size;
224 page_size = getpagesize();
225 start = (unsigned long)addr;
226 start &= ~(page_size - 1);
228 end = (unsigned long)addr + size;
229 end += page_size - 1;
230 end &= ~(page_size - 1);
232 mprotect((void *)start, end - start,
233 PROT_READ | PROT_WRITE | PROT_EXEC);
235 #endif
237 static void page_init(void)
239 /* NOTE: we can always suppose that qemu_host_page_size >=
240 TARGET_PAGE_SIZE */
241 #ifdef _WIN32
243 SYSTEM_INFO system_info;
245 GetSystemInfo(&system_info);
246 qemu_real_host_page_size = system_info.dwPageSize;
248 #else
249 qemu_real_host_page_size = getpagesize();
250 #endif
251 if (qemu_host_page_size == 0)
252 qemu_host_page_size = qemu_real_host_page_size;
253 if (qemu_host_page_size < TARGET_PAGE_SIZE)
254 qemu_host_page_size = TARGET_PAGE_SIZE;
255 qemu_host_page_bits = 0;
256 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
257 qemu_host_page_bits++;
258 qemu_host_page_mask = ~(qemu_host_page_size - 1);
259 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
260 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264 long long startaddr, endaddr;
265 FILE *f;
266 int n;
268 mmap_lock();
269 last_brk = (unsigned long)sbrk(0);
270 f = fopen("/proc/self/maps", "r");
271 if (f) {
272 do {
273 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
274 if (n == 2) {
275 startaddr = MIN(startaddr,
276 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
277 endaddr = MIN(endaddr,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279 page_set_flags(startaddr & TARGET_PAGE_MASK,
280 TARGET_PAGE_ALIGN(endaddr),
281 PAGE_RESERVED);
283 } while (!feof(f));
284 fclose(f);
286 mmap_unlock();
288 #endif
291 static inline PageDesc **page_l1_map(target_ulong index)
293 #if TARGET_LONG_BITS > 32
294 /* Host memory outside guest VM. For 32-bit targets we have already
295 excluded high addresses. */
296 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
297 return NULL;
298 #endif
299 return &l1_map[index >> L2_BITS];
302 static inline PageDesc *page_find_alloc(target_ulong index)
304 PageDesc **lp, *p;
305 lp = page_l1_map(index);
306 if (!lp)
307 return NULL;
309 p = *lp;
310 if (!p) {
311 /* allocate if not found */
312 #if defined(CONFIG_USER_ONLY)
313 size_t len = sizeof(PageDesc) * L2_SIZE;
314 /* Don't use qemu_malloc because it may recurse. */
315 p = mmap(0, len, PROT_READ | PROT_WRITE,
316 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
317 *lp = p;
318 if (h2g_valid(p)) {
319 unsigned long addr = h2g(p);
320 page_set_flags(addr & TARGET_PAGE_MASK,
321 TARGET_PAGE_ALIGN(addr + len),
322 PAGE_RESERVED);
324 #else
325 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
326 *lp = p;
327 #endif
329 return p + (index & (L2_SIZE - 1));
332 static inline PageDesc *page_find(target_ulong index)
334 PageDesc **lp, *p;
335 lp = page_l1_map(index);
336 if (!lp)
337 return NULL;
339 p = *lp;
340 if (!p)
341 return 0;
342 return p + (index & (L2_SIZE - 1));
345 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347 void **lp, **p;
348 PhysPageDesc *pd;
350 p = (void **)l1_phys_map;
351 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
353 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
354 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
355 #endif
356 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
357 p = *lp;
358 if (!p) {
359 /* allocate if not found */
360 if (!alloc)
361 return NULL;
362 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
363 memset(p, 0, sizeof(void *) * L1_SIZE);
364 *lp = p;
366 #endif
367 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
368 pd = *lp;
369 if (!pd) {
370 int i;
371 /* allocate if not found */
372 if (!alloc)
373 return NULL;
374 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
375 *lp = pd;
376 for (i = 0; i < L2_SIZE; i++) {
377 pd[i].phys_offset = IO_MEM_UNASSIGNED;
378 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
381 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
384 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386 return phys_page_find_alloc(index, 0);
389 #if !defined(CONFIG_USER_ONLY)
390 static void tlb_protect_code(ram_addr_t ram_addr);
391 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
392 target_ulong vaddr);
393 #define mmap_lock() do { } while(0)
394 #define mmap_unlock() do { } while(0)
395 #endif
397 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 #if defined(CONFIG_USER_ONLY)
400 /* Currently it is not recommanded to allocate big chunks of data in
401 user mode. It will change when a dedicated libc will be used */
402 #define USE_STATIC_CODE_GEN_BUFFER
403 #endif
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
407 #endif
409 static void code_gen_alloc(unsigned long tb_size)
411 if (kvm_enabled())
412 return;
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer = static_code_gen_buffer;
416 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
417 map_exec(code_gen_buffer, code_gen_buffer_size);
418 #else
419 code_gen_buffer_size = tb_size;
420 if (code_gen_buffer_size == 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424 #else
425 /* XXX: needs ajustments */
426 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
427 #endif
429 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
430 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
435 int flags;
436 void *start = NULL;
438 flags = MAP_PRIVATE | MAP_ANONYMOUS;
439 #if defined(__x86_64__)
440 flags |= MAP_32BIT;
441 /* Cannot map more than that */
442 if (code_gen_buffer_size > (800 * 1024 * 1024))
443 code_gen_buffer_size = (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
446 flags |= MAP_FIXED;
447 start = (void *) 0x60000000UL;
448 if (code_gen_buffer_size > (512 * 1024 * 1024))
449 code_gen_buffer_size = (512 * 1024 * 1024);
450 #elif defined(__arm__)
451 /* Map the buffer below 32M, so we can use direct calls and branches */
452 flags |= MAP_FIXED;
453 start = (void *) 0x01000000UL;
454 if (code_gen_buffer_size > 16 * 1024 * 1024)
455 code_gen_buffer_size = 16 * 1024 * 1024;
456 #endif
457 code_gen_buffer = mmap(start, code_gen_buffer_size,
458 PROT_WRITE | PROT_READ | PROT_EXEC,
459 flags, -1, 0);
460 if (code_gen_buffer == MAP_FAILED) {
461 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
462 exit(1);
465 #elif defined(__FreeBSD__) || defined(__DragonFly__)
467 int flags;
468 void *addr = NULL;
469 flags = MAP_PRIVATE | MAP_ANONYMOUS;
470 #if defined(__x86_64__)
471 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
472 * 0x40000000 is free */
473 flags |= MAP_FIXED;
474 addr = (void *)0x40000000;
475 /* Cannot map more than that */
476 if (code_gen_buffer_size > (800 * 1024 * 1024))
477 code_gen_buffer_size = (800 * 1024 * 1024);
478 #endif
479 code_gen_buffer = mmap(addr, code_gen_buffer_size,
480 PROT_WRITE | PROT_READ | PROT_EXEC,
481 flags, -1, 0);
482 if (code_gen_buffer == MAP_FAILED) {
483 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
484 exit(1);
487 #else
488 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
489 map_exec(code_gen_buffer, code_gen_buffer_size);
490 #endif
491 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
492 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
493 code_gen_buffer_max_size = code_gen_buffer_size -
494 code_gen_max_block_size();
495 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
496 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
499 /* Must be called before using the QEMU cpus. 'tb_size' is the size
500 (in bytes) allocated to the translation buffer. Zero means default
501 size. */
502 void cpu_exec_init_all(unsigned long tb_size)
504 cpu_gen_init();
505 code_gen_alloc(tb_size);
506 code_gen_ptr = code_gen_buffer;
507 page_init();
508 #if !defined(CONFIG_USER_ONLY)
509 io_mem_init();
510 #endif
513 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515 #define CPU_COMMON_SAVE_VERSION 1
517 static void cpu_common_save(QEMUFile *f, void *opaque)
519 CPUState *env = opaque;
521 qemu_put_be32s(f, &env->halted);
522 qemu_put_be32s(f, &env->interrupt_request);
525 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
527 CPUState *env = opaque;
529 if (version_id != CPU_COMMON_SAVE_VERSION)
530 return -EINVAL;
532 qemu_get_be32s(f, &env->halted);
533 qemu_get_be32s(f, &env->interrupt_request);
534 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535 version_id is increased. */
536 env->interrupt_request &= ~0x01;
537 tlb_flush(env, 1);
539 return 0;
541 #endif
543 void cpu_exec_init(CPUState *env)
545 CPUState **penv;
546 int cpu_index;
548 #if defined(CONFIG_USER_ONLY)
549 cpu_list_lock();
550 #endif
551 env->next_cpu = NULL;
552 penv = &first_cpu;
553 cpu_index = 0;
554 while (*penv != NULL) {
555 penv = (CPUState **)&(*penv)->next_cpu;
556 cpu_index++;
558 env->cpu_index = cpu_index;
559 TAILQ_INIT(&env->breakpoints);
560 TAILQ_INIT(&env->watchpoints);
561 #ifdef __WIN32
562 env->thread_id = GetCurrentProcessId();
563 #else
564 env->thread_id = getpid();
565 #endif
566 *penv = env;
567 #if defined(CONFIG_USER_ONLY)
568 cpu_list_unlock();
569 #endif
570 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
571 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
572 cpu_common_save, cpu_common_load, env);
573 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
574 cpu_save, cpu_load, env);
575 #endif
578 static inline void invalidate_page_bitmap(PageDesc *p)
580 if (p->code_bitmap) {
581 qemu_free(p->code_bitmap);
582 p->code_bitmap = NULL;
584 p->code_write_count = 0;
587 /* set to NULL all the 'first_tb' fields in all PageDescs */
588 static void page_flush_tb(void)
590 int i, j;
591 PageDesc *p;
593 for(i = 0; i < L1_SIZE; i++) {
594 p = l1_map[i];
595 if (p) {
596 for(j = 0; j < L2_SIZE; j++) {
597 p->first_tb = NULL;
598 invalidate_page_bitmap(p);
599 p++;
605 /* flush all the translation blocks */
606 /* XXX: tb_flush is currently not thread safe */
607 void tb_flush(CPUState *env1)
609 CPUState *env;
610 #if defined(DEBUG_FLUSH)
611 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
612 (unsigned long)(code_gen_ptr - code_gen_buffer),
613 nb_tbs, nb_tbs > 0 ?
614 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
615 #endif
616 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
617 cpu_abort(env1, "Internal error: code buffer overflow\n");
619 nb_tbs = 0;
621 for(env = first_cpu; env != NULL; env = env->next_cpu) {
622 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
625 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
626 page_flush_tb();
628 code_gen_ptr = code_gen_buffer;
629 /* XXX: flush processor icache at this point if cache flush is
630 expensive */
631 tb_flush_count++;
634 #ifdef DEBUG_TB_CHECK
636 static void tb_invalidate_check(target_ulong address)
638 TranslationBlock *tb;
639 int i;
640 address &= TARGET_PAGE_MASK;
641 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
643 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
644 address >= tb->pc + tb->size)) {
645 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
646 address, (long)tb->pc, tb->size);
652 /* verify that all the pages have correct rights for code */
653 static void tb_page_check(void)
655 TranslationBlock *tb;
656 int i, flags1, flags2;
658 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
659 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
660 flags1 = page_get_flags(tb->pc);
661 flags2 = page_get_flags(tb->pc + tb->size - 1);
662 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
663 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
664 (long)tb->pc, tb->size, flags1, flags2);
670 static void tb_jmp_check(TranslationBlock *tb)
672 TranslationBlock *tb1;
673 unsigned int n1;
675 /* suppress any remaining jumps to this TB */
676 tb1 = tb->jmp_first;
677 for(;;) {
678 n1 = (long)tb1 & 3;
679 tb1 = (TranslationBlock *)((long)tb1 & ~3);
680 if (n1 == 2)
681 break;
682 tb1 = tb1->jmp_next[n1];
684 /* check end of list */
685 if (tb1 != tb) {
686 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
690 #endif
692 /* invalidate one TB */
693 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
694 int next_offset)
696 TranslationBlock *tb1;
697 for(;;) {
698 tb1 = *ptb;
699 if (tb1 == tb) {
700 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
701 break;
703 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
707 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
709 TranslationBlock *tb1;
710 unsigned int n1;
712 for(;;) {
713 tb1 = *ptb;
714 n1 = (long)tb1 & 3;
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (tb1 == tb) {
717 *ptb = tb1->page_next[n1];
718 break;
720 ptb = &tb1->page_next[n1];
724 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
726 TranslationBlock *tb1, **ptb;
727 unsigned int n1;
729 ptb = &tb->jmp_next[n];
730 tb1 = *ptb;
731 if (tb1) {
732 /* find tb(n) in circular list */
733 for(;;) {
734 tb1 = *ptb;
735 n1 = (long)tb1 & 3;
736 tb1 = (TranslationBlock *)((long)tb1 & ~3);
737 if (n1 == n && tb1 == tb)
738 break;
739 if (n1 == 2) {
740 ptb = &tb1->jmp_first;
741 } else {
742 ptb = &tb1->jmp_next[n1];
745 /* now we can suppress tb(n) from the list */
746 *ptb = tb->jmp_next[n];
748 tb->jmp_next[n] = NULL;
752 /* reset the jump entry 'n' of a TB so that it is not chained to
753 another TB */
754 static inline void tb_reset_jump(TranslationBlock *tb, int n)
756 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
759 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
761 CPUState *env;
762 PageDesc *p;
763 unsigned int h, n1;
764 target_phys_addr_t phys_pc;
765 TranslationBlock *tb1, *tb2;
767 /* remove the TB from the hash list */
768 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
769 h = tb_phys_hash_func(phys_pc);
770 tb_remove(&tb_phys_hash[h], tb,
771 offsetof(TranslationBlock, phys_hash_next));
773 /* remove the TB from the page list */
774 if (tb->page_addr[0] != page_addr) {
775 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
776 tb_page_remove(&p->first_tb, tb);
777 invalidate_page_bitmap(p);
779 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
780 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
781 tb_page_remove(&p->first_tb, tb);
782 invalidate_page_bitmap(p);
785 tb_invalidated_flag = 1;
787 /* remove the TB from the hash list */
788 h = tb_jmp_cache_hash_func(tb->pc);
789 for(env = first_cpu; env != NULL; env = env->next_cpu) {
790 if (env->tb_jmp_cache[h] == tb)
791 env->tb_jmp_cache[h] = NULL;
794 /* suppress this TB from the two jump lists */
795 tb_jmp_remove(tb, 0);
796 tb_jmp_remove(tb, 1);
798 /* suppress any remaining jumps to this TB */
799 tb1 = tb->jmp_first;
800 for(;;) {
801 n1 = (long)tb1 & 3;
802 if (n1 == 2)
803 break;
804 tb1 = (TranslationBlock *)((long)tb1 & ~3);
805 tb2 = tb1->jmp_next[n1];
806 tb_reset_jump(tb1, n1);
807 tb1->jmp_next[n1] = NULL;
808 tb1 = tb2;
810 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
812 tb_phys_invalidate_count++;
815 static inline void set_bits(uint8_t *tab, int start, int len)
817 int end, mask, end1;
819 end = start + len;
820 tab += start >> 3;
821 mask = 0xff << (start & 7);
822 if ((start & ~7) == (end & ~7)) {
823 if (start < end) {
824 mask &= ~(0xff << (end & 7));
825 *tab |= mask;
827 } else {
828 *tab++ |= mask;
829 start = (start + 8) & ~7;
830 end1 = end & ~7;
831 while (start < end1) {
832 *tab++ = 0xff;
833 start += 8;
835 if (start < end) {
836 mask = ~(0xff << (end & 7));
837 *tab |= mask;
842 static void build_page_bitmap(PageDesc *p)
844 int n, tb_start, tb_end;
845 TranslationBlock *tb;
847 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
849 tb = p->first_tb;
850 while (tb != NULL) {
851 n = (long)tb & 3;
852 tb = (TranslationBlock *)((long)tb & ~3);
853 /* NOTE: this is subtle as a TB may span two physical pages */
854 if (n == 0) {
855 /* NOTE: tb_end may be after the end of the page, but
856 it is not a problem */
857 tb_start = tb->pc & ~TARGET_PAGE_MASK;
858 tb_end = tb_start + tb->size;
859 if (tb_end > TARGET_PAGE_SIZE)
860 tb_end = TARGET_PAGE_SIZE;
861 } else {
862 tb_start = 0;
863 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
865 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
866 tb = tb->page_next[n];
870 TranslationBlock *tb_gen_code(CPUState *env,
871 target_ulong pc, target_ulong cs_base,
872 int flags, int cflags)
874 TranslationBlock *tb;
875 uint8_t *tc_ptr;
876 target_ulong phys_pc, phys_page2, virt_page2;
877 int code_gen_size;
879 phys_pc = get_phys_addr_code(env, pc);
880 tb = tb_alloc(pc);
881 if (!tb) {
882 /* flush must be done */
883 tb_flush(env);
884 /* cannot fail at this point */
885 tb = tb_alloc(pc);
886 /* Don't forget to invalidate previous TB info. */
887 tb_invalidated_flag = 1;
889 tc_ptr = code_gen_ptr;
890 tb->tc_ptr = tc_ptr;
891 tb->cs_base = cs_base;
892 tb->flags = flags;
893 tb->cflags = cflags;
894 cpu_gen_code(env, tb, &code_gen_size);
895 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
897 /* check next page if needed */
898 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
899 phys_page2 = -1;
900 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
901 phys_page2 = get_phys_addr_code(env, virt_page2);
903 tb_link_phys(tb, phys_pc, phys_page2);
904 return tb;
907 /* invalidate all TBs which intersect with the target physical page
908 starting in range [start;end[. NOTE: start and end must refer to
909 the same physical page. 'is_cpu_write_access' should be true if called
910 from a real cpu write access: the virtual CPU will exit the current
911 TB if code is modified inside this TB. */
912 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
913 int is_cpu_write_access)
915 TranslationBlock *tb, *tb_next, *saved_tb;
916 CPUState *env = cpu_single_env;
917 target_ulong tb_start, tb_end;
918 PageDesc *p;
919 int n;
920 #ifdef TARGET_HAS_PRECISE_SMC
921 int current_tb_not_found = is_cpu_write_access;
922 TranslationBlock *current_tb = NULL;
923 int current_tb_modified = 0;
924 target_ulong current_pc = 0;
925 target_ulong current_cs_base = 0;
926 int current_flags = 0;
927 #endif /* TARGET_HAS_PRECISE_SMC */
929 p = page_find(start >> TARGET_PAGE_BITS);
930 if (!p)
931 return;
932 if (!p->code_bitmap &&
933 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
934 is_cpu_write_access) {
935 /* build code bitmap */
936 build_page_bitmap(p);
939 /* we remove all the TBs in the range [start, end[ */
940 /* XXX: see if in some cases it could be faster to invalidate all the code */
941 tb = p->first_tb;
942 while (tb != NULL) {
943 n = (long)tb & 3;
944 tb = (TranslationBlock *)((long)tb & ~3);
945 tb_next = tb->page_next[n];
946 /* NOTE: this is subtle as a TB may span two physical pages */
947 if (n == 0) {
948 /* NOTE: tb_end may be after the end of the page, but
949 it is not a problem */
950 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
951 tb_end = tb_start + tb->size;
952 } else {
953 tb_start = tb->page_addr[1];
954 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
956 if (!(tb_end <= start || tb_start >= end)) {
957 #ifdef TARGET_HAS_PRECISE_SMC
958 if (current_tb_not_found) {
959 current_tb_not_found = 0;
960 current_tb = NULL;
961 if (env->mem_io_pc) {
962 /* now we have a real cpu fault */
963 current_tb = tb_find_pc(env->mem_io_pc);
966 if (current_tb == tb &&
967 (current_tb->cflags & CF_COUNT_MASK) != 1) {
968 /* If we are modifying the current TB, we must stop
969 its execution. We could be more precise by checking
970 that the modification is after the current PC, but it
971 would require a specialized function to partially
972 restore the CPU state */
974 current_tb_modified = 1;
975 cpu_restore_state(current_tb, env,
976 env->mem_io_pc, NULL);
977 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
978 &current_flags);
980 #endif /* TARGET_HAS_PRECISE_SMC */
981 /* we need to do that to handle the case where a signal
982 occurs while doing tb_phys_invalidate() */
983 saved_tb = NULL;
984 if (env) {
985 saved_tb = env->current_tb;
986 env->current_tb = NULL;
988 tb_phys_invalidate(tb, -1);
989 if (env) {
990 env->current_tb = saved_tb;
991 if (env->interrupt_request && env->current_tb)
992 cpu_interrupt(env, env->interrupt_request);
995 tb = tb_next;
997 #if !defined(CONFIG_USER_ONLY)
998 /* if no code remaining, no need to continue to use slow writes */
999 if (!p->first_tb) {
1000 invalidate_page_bitmap(p);
1001 if (is_cpu_write_access) {
1002 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1005 #endif
1006 #ifdef TARGET_HAS_PRECISE_SMC
1007 if (current_tb_modified) {
1008 /* we generate a block containing just the instruction
1009 modifying the memory. It will ensure that it cannot modify
1010 itself */
1011 env->current_tb = NULL;
1012 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1013 cpu_resume_from_signal(env, NULL);
1015 #endif
1018 /* len must be <= 8 and start must be a multiple of len */
1019 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1021 PageDesc *p;
1022 int offset, b;
1023 #if 0
1024 if (1) {
1025 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1026 cpu_single_env->mem_io_vaddr, len,
1027 cpu_single_env->eip,
1028 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1030 #endif
1031 p = page_find(start >> TARGET_PAGE_BITS);
1032 if (!p)
1033 return;
1034 if (p->code_bitmap) {
1035 offset = start & ~TARGET_PAGE_MASK;
1036 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1037 if (b & ((1 << len) - 1))
1038 goto do_invalidate;
1039 } else {
1040 do_invalidate:
1041 tb_invalidate_phys_page_range(start, start + len, 1);
1045 #if !defined(CONFIG_SOFTMMU)
1046 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1047 unsigned long pc, void *puc)
1049 TranslationBlock *tb;
1050 PageDesc *p;
1051 int n;
1052 #ifdef TARGET_HAS_PRECISE_SMC
1053 TranslationBlock *current_tb = NULL;
1054 CPUState *env = cpu_single_env;
1055 int current_tb_modified = 0;
1056 target_ulong current_pc = 0;
1057 target_ulong current_cs_base = 0;
1058 int current_flags = 0;
1059 #endif
1061 addr &= TARGET_PAGE_MASK;
1062 p = page_find(addr >> TARGET_PAGE_BITS);
1063 if (!p)
1064 return;
1065 tb = p->first_tb;
1066 #ifdef TARGET_HAS_PRECISE_SMC
1067 if (tb && pc != 0) {
1068 current_tb = tb_find_pc(pc);
1070 #endif
1071 while (tb != NULL) {
1072 n = (long)tb & 3;
1073 tb = (TranslationBlock *)((long)tb & ~3);
1074 #ifdef TARGET_HAS_PRECISE_SMC
1075 if (current_tb == tb &&
1076 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1077 /* If we are modifying the current TB, we must stop
1078 its execution. We could be more precise by checking
1079 that the modification is after the current PC, but it
1080 would require a specialized function to partially
1081 restore the CPU state */
1083 current_tb_modified = 1;
1084 cpu_restore_state(current_tb, env, pc, puc);
1085 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1086 &current_flags);
1088 #endif /* TARGET_HAS_PRECISE_SMC */
1089 tb_phys_invalidate(tb, addr);
1090 tb = tb->page_next[n];
1092 p->first_tb = NULL;
1093 #ifdef TARGET_HAS_PRECISE_SMC
1094 if (current_tb_modified) {
1095 /* we generate a block containing just the instruction
1096 modifying the memory. It will ensure that it cannot modify
1097 itself */
1098 env->current_tb = NULL;
1099 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1100 cpu_resume_from_signal(env, puc);
1102 #endif
1104 #endif
1106 /* add the tb in the target page and protect it if necessary */
1107 static inline void tb_alloc_page(TranslationBlock *tb,
1108 unsigned int n, target_ulong page_addr)
1110 PageDesc *p;
1111 TranslationBlock *last_first_tb;
1113 tb->page_addr[n] = page_addr;
1114 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1115 tb->page_next[n] = p->first_tb;
1116 last_first_tb = p->first_tb;
1117 p->first_tb = (TranslationBlock *)((long)tb | n);
1118 invalidate_page_bitmap(p);
1120 #if defined(TARGET_HAS_SMC) || 1
1122 #if defined(CONFIG_USER_ONLY)
1123 if (p->flags & PAGE_WRITE) {
1124 target_ulong addr;
1125 PageDesc *p2;
1126 int prot;
1128 /* force the host page as non writable (writes will have a
1129 page fault + mprotect overhead) */
1130 page_addr &= qemu_host_page_mask;
1131 prot = 0;
1132 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1133 addr += TARGET_PAGE_SIZE) {
1135 p2 = page_find (addr >> TARGET_PAGE_BITS);
1136 if (!p2)
1137 continue;
1138 prot |= p2->flags;
1139 p2->flags &= ~PAGE_WRITE;
1140 page_get_flags(addr);
1142 mprotect(g2h(page_addr), qemu_host_page_size,
1143 (prot & PAGE_BITS) & ~PAGE_WRITE);
1144 #ifdef DEBUG_TB_INVALIDATE
1145 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1146 page_addr);
1147 #endif
1149 #else
1150 /* if some code is already present, then the pages are already
1151 protected. So we handle the case where only the first TB is
1152 allocated in a physical page */
1153 if (!last_first_tb) {
1154 tlb_protect_code(page_addr);
1156 #endif
1158 #endif /* TARGET_HAS_SMC */
1161 /* Allocate a new translation block. Flush the translation buffer if
1162 too many translation blocks or too much generated code. */
1163 TranslationBlock *tb_alloc(target_ulong pc)
1165 TranslationBlock *tb;
1167 if (nb_tbs >= code_gen_max_blocks ||
1168 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1169 return NULL;
1170 tb = &tbs[nb_tbs++];
1171 tb->pc = pc;
1172 tb->cflags = 0;
1173 return tb;
1176 void tb_free(TranslationBlock *tb)
1178 /* In practice this is mostly used for single use temporary TB
1179 Ignore the hard cases and just back up if this TB happens to
1180 be the last one generated. */
1181 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1182 code_gen_ptr = tb->tc_ptr;
1183 nb_tbs--;
1187 /* add a new TB and link it to the physical page tables. phys_page2 is
1188 (-1) to indicate that only one page contains the TB. */
1189 void tb_link_phys(TranslationBlock *tb,
1190 target_ulong phys_pc, target_ulong phys_page2)
1192 unsigned int h;
1193 TranslationBlock **ptb;
1195 /* Grab the mmap lock to stop another thread invalidating this TB
1196 before we are done. */
1197 mmap_lock();
1198 /* add in the physical hash table */
1199 h = tb_phys_hash_func(phys_pc);
1200 ptb = &tb_phys_hash[h];
1201 tb->phys_hash_next = *ptb;
1202 *ptb = tb;
1204 /* add in the page list */
1205 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1206 if (phys_page2 != -1)
1207 tb_alloc_page(tb, 1, phys_page2);
1208 else
1209 tb->page_addr[1] = -1;
1211 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1212 tb->jmp_next[0] = NULL;
1213 tb->jmp_next[1] = NULL;
1215 /* init original jump addresses */
1216 if (tb->tb_next_offset[0] != 0xffff)
1217 tb_reset_jump(tb, 0);
1218 if (tb->tb_next_offset[1] != 0xffff)
1219 tb_reset_jump(tb, 1);
1221 #ifdef DEBUG_TB_CHECK
1222 tb_page_check();
1223 #endif
1224 mmap_unlock();
1227 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1228 tb[1].tc_ptr. Return NULL if not found */
1229 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1231 int m_min, m_max, m;
1232 unsigned long v;
1233 TranslationBlock *tb;
1235 if (nb_tbs <= 0)
1236 return NULL;
1237 if (tc_ptr < (unsigned long)code_gen_buffer ||
1238 tc_ptr >= (unsigned long)code_gen_ptr)
1239 return NULL;
1240 /* binary search (cf Knuth) */
1241 m_min = 0;
1242 m_max = nb_tbs - 1;
1243 while (m_min <= m_max) {
1244 m = (m_min + m_max) >> 1;
1245 tb = &tbs[m];
1246 v = (unsigned long)tb->tc_ptr;
1247 if (v == tc_ptr)
1248 return tb;
1249 else if (tc_ptr < v) {
1250 m_max = m - 1;
1251 } else {
1252 m_min = m + 1;
1255 return &tbs[m_max];
1258 static void tb_reset_jump_recursive(TranslationBlock *tb);
1260 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1262 TranslationBlock *tb1, *tb_next, **ptb;
1263 unsigned int n1;
1265 tb1 = tb->jmp_next[n];
1266 if (tb1 != NULL) {
1267 /* find head of list */
1268 for(;;) {
1269 n1 = (long)tb1 & 3;
1270 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1271 if (n1 == 2)
1272 break;
1273 tb1 = tb1->jmp_next[n1];
1275 /* we are now sure now that tb jumps to tb1 */
1276 tb_next = tb1;
1278 /* remove tb from the jmp_first list */
1279 ptb = &tb_next->jmp_first;
1280 for(;;) {
1281 tb1 = *ptb;
1282 n1 = (long)tb1 & 3;
1283 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1284 if (n1 == n && tb1 == tb)
1285 break;
1286 ptb = &tb1->jmp_next[n1];
1288 *ptb = tb->jmp_next[n];
1289 tb->jmp_next[n] = NULL;
1291 /* suppress the jump to next tb in generated code */
1292 tb_reset_jump(tb, n);
1294 /* suppress jumps in the tb on which we could have jumped */
1295 tb_reset_jump_recursive(tb_next);
1299 static void tb_reset_jump_recursive(TranslationBlock *tb)
1301 tb_reset_jump_recursive2(tb, 0);
1302 tb_reset_jump_recursive2(tb, 1);
1305 #if defined(TARGET_HAS_ICE)
1306 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1308 target_phys_addr_t addr;
1309 target_ulong pd;
1310 ram_addr_t ram_addr;
1311 PhysPageDesc *p;
1313 addr = cpu_get_phys_page_debug(env, pc);
1314 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1315 if (!p) {
1316 pd = IO_MEM_UNASSIGNED;
1317 } else {
1318 pd = p->phys_offset;
1320 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1321 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1323 #endif
1325 /* Add a watchpoint. */
1326 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1327 int flags, CPUWatchpoint **watchpoint)
1329 target_ulong len_mask = ~(len - 1);
1330 CPUWatchpoint *wp;
1332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1333 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1334 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1335 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1336 return -EINVAL;
1338 wp = qemu_malloc(sizeof(*wp));
1340 wp->vaddr = addr;
1341 wp->len_mask = len_mask;
1342 wp->flags = flags;
1344 /* keep all GDB-injected watchpoints in front */
1345 if (flags & BP_GDB)
1346 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1347 else
1348 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1350 tlb_flush_page(env, addr);
1352 if (watchpoint)
1353 *watchpoint = wp;
1354 return 0;
1357 /* Remove a specific watchpoint. */
1358 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1359 int flags)
1361 target_ulong len_mask = ~(len - 1);
1362 CPUWatchpoint *wp;
1364 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1365 if (addr == wp->vaddr && len_mask == wp->len_mask
1366 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1367 cpu_watchpoint_remove_by_ref(env, wp);
1368 return 0;
1371 return -ENOENT;
1374 /* Remove a specific watchpoint by reference. */
1375 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1377 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1379 tlb_flush_page(env, watchpoint->vaddr);
1381 qemu_free(watchpoint);
1384 /* Remove all matching watchpoints. */
1385 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1387 CPUWatchpoint *wp, *next;
1389 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1390 if (wp->flags & mask)
1391 cpu_watchpoint_remove_by_ref(env, wp);
1395 /* Add a breakpoint. */
1396 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1397 CPUBreakpoint **breakpoint)
1399 #if defined(TARGET_HAS_ICE)
1400 CPUBreakpoint *bp;
1402 bp = qemu_malloc(sizeof(*bp));
1404 bp->pc = pc;
1405 bp->flags = flags;
1407 /* keep all GDB-injected breakpoints in front */
1408 if (flags & BP_GDB)
1409 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1410 else
1411 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1413 breakpoint_invalidate(env, pc);
1415 if (breakpoint)
1416 *breakpoint = bp;
1417 return 0;
1418 #else
1419 return -ENOSYS;
1420 #endif
1423 /* Remove a specific breakpoint. */
1424 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1426 #if defined(TARGET_HAS_ICE)
1427 CPUBreakpoint *bp;
1429 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1430 if (bp->pc == pc && bp->flags == flags) {
1431 cpu_breakpoint_remove_by_ref(env, bp);
1432 return 0;
1435 return -ENOENT;
1436 #else
1437 return -ENOSYS;
1438 #endif
1441 /* Remove a specific breakpoint by reference. */
1442 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1444 #if defined(TARGET_HAS_ICE)
1445 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1447 breakpoint_invalidate(env, breakpoint->pc);
1449 qemu_free(breakpoint);
1450 #endif
1453 /* Remove all matching breakpoints. */
1454 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1456 #if defined(TARGET_HAS_ICE)
1457 CPUBreakpoint *bp, *next;
1459 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1460 if (bp->flags & mask)
1461 cpu_breakpoint_remove_by_ref(env, bp);
1463 #endif
1466 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1467 CPU loop after each instruction */
1468 void cpu_single_step(CPUState *env, int enabled)
1470 #if defined(TARGET_HAS_ICE)
1471 if (env->singlestep_enabled != enabled) {
1472 env->singlestep_enabled = enabled;
1473 if (kvm_enabled())
1474 kvm_update_guest_debug(env, 0);
1475 else {
1476 /* must flush all the translated code to avoid inconsistancies */
1477 /* XXX: only flush what is necessary */
1478 tb_flush(env);
1481 #endif
1484 /* enable or disable low levels log */
1485 void cpu_set_log(int log_flags)
1487 loglevel = log_flags;
1488 if (loglevel && !logfile) {
1489 logfile = fopen(logfilename, log_append ? "a" : "w");
1490 if (!logfile) {
1491 perror(logfilename);
1492 _exit(1);
1494 #if !defined(CONFIG_SOFTMMU)
1495 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1497 static char logfile_buf[4096];
1498 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1500 #else
1501 setvbuf(logfile, NULL, _IOLBF, 0);
1502 #endif
1503 log_append = 1;
1505 if (!loglevel && logfile) {
1506 fclose(logfile);
1507 logfile = NULL;
1511 void cpu_set_log_filename(const char *filename)
1513 logfilename = strdup(filename);
1514 if (logfile) {
1515 fclose(logfile);
1516 logfile = NULL;
1518 cpu_set_log(loglevel);
1521 static void cpu_unlink_tb(CPUState *env)
1523 #if defined(USE_NPTL)
1524 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1525 problem and hope the cpu will stop of its own accord. For userspace
1526 emulation this often isn't actually as bad as it sounds. Often
1527 signals are used primarily to interrupt blocking syscalls. */
1528 #else
1529 TranslationBlock *tb;
1530 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1532 tb = env->current_tb;
1533 /* if the cpu is currently executing code, we must unlink it and
1534 all the potentially executing TB */
1535 if (tb && !testandset(&interrupt_lock)) {
1536 env->current_tb = NULL;
1537 tb_reset_jump_recursive(tb);
1538 resetlock(&interrupt_lock);
1540 #endif
1543 /* mask must never be zero, except for A20 change call */
1544 void cpu_interrupt(CPUState *env, int mask)
1546 int old_mask;
1548 old_mask = env->interrupt_request;
1549 env->interrupt_request |= mask;
1550 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1551 kvm_update_interrupt_request(env);
1553 if (use_icount) {
1554 env->icount_decr.u16.high = 0xffff;
1555 #ifndef CONFIG_USER_ONLY
1556 if (!can_do_io(env)
1557 && (mask & ~old_mask) != 0) {
1558 cpu_abort(env, "Raised interrupt while not in I/O function");
1560 #endif
1561 } else {
1562 cpu_unlink_tb(env);
1566 void cpu_reset_interrupt(CPUState *env, int mask)
1568 env->interrupt_request &= ~mask;
1571 void cpu_exit(CPUState *env)
1573 env->exit_request = 1;
1574 cpu_unlink_tb(env);
1577 const CPULogItem cpu_log_items[] = {
1578 { CPU_LOG_TB_OUT_ASM, "out_asm",
1579 "show generated host assembly code for each compiled TB" },
1580 { CPU_LOG_TB_IN_ASM, "in_asm",
1581 "show target assembly code for each compiled TB" },
1582 { CPU_LOG_TB_OP, "op",
1583 "show micro ops for each compiled TB" },
1584 { CPU_LOG_TB_OP_OPT, "op_opt",
1585 "show micro ops "
1586 #ifdef TARGET_I386
1587 "before eflags optimization and "
1588 #endif
1589 "after liveness analysis" },
1590 { CPU_LOG_INT, "int",
1591 "show interrupts/exceptions in short format" },
1592 { CPU_LOG_EXEC, "exec",
1593 "show trace before each executed TB (lots of logs)" },
1594 { CPU_LOG_TB_CPU, "cpu",
1595 "show CPU state before block translation" },
1596 #ifdef TARGET_I386
1597 { CPU_LOG_PCALL, "pcall",
1598 "show protected mode far calls/returns/exceptions" },
1599 { CPU_LOG_RESET, "cpu_reset",
1600 "show CPU state before CPU resets" },
1601 #endif
1602 #ifdef DEBUG_IOPORT
1603 { CPU_LOG_IOPORT, "ioport",
1604 "show all i/o ports accesses" },
1605 #endif
1606 { 0, NULL, NULL },
1609 static int cmp1(const char *s1, int n, const char *s2)
1611 if (strlen(s2) != n)
1612 return 0;
1613 return memcmp(s1, s2, n) == 0;
1616 /* takes a comma separated list of log masks. Return 0 if error. */
1617 int cpu_str_to_log_mask(const char *str)
1619 const CPULogItem *item;
1620 int mask;
1621 const char *p, *p1;
1623 p = str;
1624 mask = 0;
1625 for(;;) {
1626 p1 = strchr(p, ',');
1627 if (!p1)
1628 p1 = p + strlen(p);
1629 if(cmp1(p,p1-p,"all")) {
1630 for(item = cpu_log_items; item->mask != 0; item++) {
1631 mask |= item->mask;
1633 } else {
1634 for(item = cpu_log_items; item->mask != 0; item++) {
1635 if (cmp1(p, p1 - p, item->name))
1636 goto found;
1638 return 0;
1640 found:
1641 mask |= item->mask;
1642 if (*p1 != ',')
1643 break;
1644 p = p1 + 1;
1646 return mask;
1649 void cpu_abort(CPUState *env, const char *fmt, ...)
1651 va_list ap;
1652 va_list ap2;
1654 va_start(ap, fmt);
1655 va_copy(ap2, ap);
1656 fprintf(stderr, "qemu: fatal: ");
1657 vfprintf(stderr, fmt, ap);
1658 fprintf(stderr, "\n");
1659 #ifdef TARGET_I386
1660 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1661 #else
1662 cpu_dump_state(env, stderr, fprintf, 0);
1663 #endif
1664 if (qemu_log_enabled()) {
1665 qemu_log("qemu: fatal: ");
1666 qemu_log_vprintf(fmt, ap2);
1667 qemu_log("\n");
1668 #ifdef TARGET_I386
1669 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1670 #else
1671 log_cpu_state(env, 0);
1672 #endif
1673 qemu_log_flush();
1674 qemu_log_close();
1676 va_end(ap2);
1677 va_end(ap);
1678 abort();
1681 CPUState *cpu_copy(CPUState *env)
1683 CPUState *new_env = cpu_init(env->cpu_model_str);
1684 CPUState *next_cpu = new_env->next_cpu;
1685 int cpu_index = new_env->cpu_index;
1686 #if defined(TARGET_HAS_ICE)
1687 CPUBreakpoint *bp;
1688 CPUWatchpoint *wp;
1689 #endif
1691 memcpy(new_env, env, sizeof(CPUState));
1693 /* Preserve chaining and index. */
1694 new_env->next_cpu = next_cpu;
1695 new_env->cpu_index = cpu_index;
1697 /* Clone all break/watchpoints.
1698 Note: Once we support ptrace with hw-debug register access, make sure
1699 BP_CPU break/watchpoints are handled correctly on clone. */
1700 TAILQ_INIT(&env->breakpoints);
1701 TAILQ_INIT(&env->watchpoints);
1702 #if defined(TARGET_HAS_ICE)
1703 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1704 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1706 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1707 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1708 wp->flags, NULL);
1710 #endif
1712 return new_env;
1715 #if !defined(CONFIG_USER_ONLY)
1717 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1719 unsigned int i;
1721 /* Discard jump cache entries for any tb which might potentially
1722 overlap the flushed page. */
1723 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1724 memset (&env->tb_jmp_cache[i], 0,
1725 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1727 i = tb_jmp_cache_hash_page(addr);
1728 memset (&env->tb_jmp_cache[i], 0,
1729 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1732 /* NOTE: if flush_global is true, also flush global entries (not
1733 implemented yet) */
1734 void tlb_flush(CPUState *env, int flush_global)
1736 int i;
1738 #if defined(DEBUG_TLB)
1739 printf("tlb_flush:\n");
1740 #endif
1741 /* must reset current TB so that interrupts cannot modify the
1742 links while we are modifying them */
1743 env->current_tb = NULL;
1745 for(i = 0; i < CPU_TLB_SIZE; i++) {
1746 env->tlb_table[0][i].addr_read = -1;
1747 env->tlb_table[0][i].addr_write = -1;
1748 env->tlb_table[0][i].addr_code = -1;
1749 env->tlb_table[1][i].addr_read = -1;
1750 env->tlb_table[1][i].addr_write = -1;
1751 env->tlb_table[1][i].addr_code = -1;
1752 #if (NB_MMU_MODES >= 3)
1753 env->tlb_table[2][i].addr_read = -1;
1754 env->tlb_table[2][i].addr_write = -1;
1755 env->tlb_table[2][i].addr_code = -1;
1756 #if (NB_MMU_MODES == 4)
1757 env->tlb_table[3][i].addr_read = -1;
1758 env->tlb_table[3][i].addr_write = -1;
1759 env->tlb_table[3][i].addr_code = -1;
1760 #endif
1761 #endif
1764 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1766 #ifdef USE_KQEMU
1767 if (env->kqemu_enabled) {
1768 kqemu_flush(env, flush_global);
1770 #endif
1771 tlb_flush_count++;
1774 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1776 if (addr == (tlb_entry->addr_read &
1777 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1778 addr == (tlb_entry->addr_write &
1779 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1780 addr == (tlb_entry->addr_code &
1781 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1782 tlb_entry->addr_read = -1;
1783 tlb_entry->addr_write = -1;
1784 tlb_entry->addr_code = -1;
1788 void tlb_flush_page(CPUState *env, target_ulong addr)
1790 int i;
1792 #if defined(DEBUG_TLB)
1793 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1794 #endif
1795 /* must reset current TB so that interrupts cannot modify the
1796 links while we are modifying them */
1797 env->current_tb = NULL;
1799 addr &= TARGET_PAGE_MASK;
1800 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1801 tlb_flush_entry(&env->tlb_table[0][i], addr);
1802 tlb_flush_entry(&env->tlb_table[1][i], addr);
1803 #if (NB_MMU_MODES >= 3)
1804 tlb_flush_entry(&env->tlb_table[2][i], addr);
1805 #if (NB_MMU_MODES == 4)
1806 tlb_flush_entry(&env->tlb_table[3][i], addr);
1807 #endif
1808 #endif
1810 tlb_flush_jmp_cache(env, addr);
1812 #ifdef USE_KQEMU
1813 if (env->kqemu_enabled) {
1814 kqemu_flush_page(env, addr);
1816 #endif
1819 /* update the TLBs so that writes to code in the virtual page 'addr'
1820 can be detected */
1821 static void tlb_protect_code(ram_addr_t ram_addr)
1823 cpu_physical_memory_reset_dirty(ram_addr,
1824 ram_addr + TARGET_PAGE_SIZE,
1825 CODE_DIRTY_FLAG);
1828 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1829 tested for self modifying code */
1830 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1831 target_ulong vaddr)
1833 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1836 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1837 unsigned long start, unsigned long length)
1839 unsigned long addr;
1840 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1841 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1842 if ((addr - start) < length) {
1843 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1848 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1849 int dirty_flags)
1851 CPUState *env;
1852 unsigned long length, start1;
1853 int i, mask, len;
1854 uint8_t *p;
1856 start &= TARGET_PAGE_MASK;
1857 end = TARGET_PAGE_ALIGN(end);
1859 length = end - start;
1860 if (length == 0)
1861 return;
1862 len = length >> TARGET_PAGE_BITS;
1863 #ifdef USE_KQEMU
1864 /* XXX: should not depend on cpu context */
1865 env = first_cpu;
1866 if (env->kqemu_enabled) {
1867 ram_addr_t addr;
1868 addr = start;
1869 for(i = 0; i < len; i++) {
1870 kqemu_set_notdirty(env, addr);
1871 addr += TARGET_PAGE_SIZE;
1874 #endif
1875 mask = ~dirty_flags;
1876 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1877 for(i = 0; i < len; i++)
1878 p[i] &= mask;
1880 /* we modify the TLB cache so that the dirty bit will be set again
1881 when accessing the range */
1882 start1 = start + (unsigned long)phys_ram_base;
1883 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1884 for(i = 0; i < CPU_TLB_SIZE; i++)
1885 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1886 for(i = 0; i < CPU_TLB_SIZE; i++)
1887 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1888 #if (NB_MMU_MODES >= 3)
1889 for(i = 0; i < CPU_TLB_SIZE; i++)
1890 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1891 #if (NB_MMU_MODES == 4)
1892 for(i = 0; i < CPU_TLB_SIZE; i++)
1893 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1894 #endif
1895 #endif
1899 int cpu_physical_memory_set_dirty_tracking(int enable)
1901 int r=0;
1903 if (kvm_enabled())
1904 r = kvm_physical_memory_set_dirty_tracking(enable);
1905 in_migration = enable;
1906 return r;
1909 int cpu_physical_memory_get_dirty_tracking(void)
1911 return in_migration;
1914 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1916 if (kvm_enabled())
1917 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1920 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1922 ram_addr_t ram_addr;
1924 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1925 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1926 tlb_entry->addend - (unsigned long)phys_ram_base;
1927 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1928 tlb_entry->addr_write |= TLB_NOTDIRTY;
1933 /* update the TLB according to the current state of the dirty bits */
1934 void cpu_tlb_update_dirty(CPUState *env)
1936 int i;
1937 for(i = 0; i < CPU_TLB_SIZE; i++)
1938 tlb_update_dirty(&env->tlb_table[0][i]);
1939 for(i = 0; i < CPU_TLB_SIZE; i++)
1940 tlb_update_dirty(&env->tlb_table[1][i]);
1941 #if (NB_MMU_MODES >= 3)
1942 for(i = 0; i < CPU_TLB_SIZE; i++)
1943 tlb_update_dirty(&env->tlb_table[2][i]);
1944 #if (NB_MMU_MODES == 4)
1945 for(i = 0; i < CPU_TLB_SIZE; i++)
1946 tlb_update_dirty(&env->tlb_table[3][i]);
1947 #endif
1948 #endif
1951 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1953 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1954 tlb_entry->addr_write = vaddr;
1957 /* update the TLB corresponding to virtual page vaddr
1958 so that it is no longer dirty */
1959 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1961 int i;
1963 vaddr &= TARGET_PAGE_MASK;
1964 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1965 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1966 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1967 #if (NB_MMU_MODES >= 3)
1968 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1969 #if (NB_MMU_MODES == 4)
1970 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1971 #endif
1972 #endif
1975 /* add a new TLB entry. At most one entry for a given virtual address
1976 is permitted. Return 0 if OK or 2 if the page could not be mapped
1977 (can only happen in non SOFTMMU mode for I/O pages or pages
1978 conflicting with the host address space). */
1979 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1980 target_phys_addr_t paddr, int prot,
1981 int mmu_idx, int is_softmmu)
1983 PhysPageDesc *p;
1984 unsigned long pd;
1985 unsigned int index;
1986 target_ulong address;
1987 target_ulong code_address;
1988 target_phys_addr_t addend;
1989 int ret;
1990 CPUTLBEntry *te;
1991 CPUWatchpoint *wp;
1992 target_phys_addr_t iotlb;
1994 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1995 if (!p) {
1996 pd = IO_MEM_UNASSIGNED;
1997 } else {
1998 pd = p->phys_offset;
2000 #if defined(DEBUG_TLB)
2001 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2002 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2003 #endif
2005 ret = 0;
2006 address = vaddr;
2007 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2008 /* IO memory case (romd handled later) */
2009 address |= TLB_MMIO;
2011 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2012 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2013 /* Normal RAM. */
2014 iotlb = pd & TARGET_PAGE_MASK;
2015 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2016 iotlb |= IO_MEM_NOTDIRTY;
2017 else
2018 iotlb |= IO_MEM_ROM;
2019 } else {
2020 /* IO handlers are currently passed a phsical address.
2021 It would be nice to pass an offset from the base address
2022 of that region. This would avoid having to special case RAM,
2023 and avoid full address decoding in every device.
2024 We can't use the high bits of pd for this because
2025 IO_MEM_ROMD uses these as a ram address. */
2026 iotlb = (pd & ~TARGET_PAGE_MASK);
2027 if (p) {
2028 iotlb += p->region_offset;
2029 } else {
2030 iotlb += paddr;
2034 code_address = address;
2035 /* Make accesses to pages with watchpoints go via the
2036 watchpoint trap routines. */
2037 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2038 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2039 iotlb = io_mem_watch + paddr;
2040 /* TODO: The memory case can be optimized by not trapping
2041 reads of pages with a write breakpoint. */
2042 address |= TLB_MMIO;
2046 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2047 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2048 te = &env->tlb_table[mmu_idx][index];
2049 te->addend = addend - vaddr;
2050 if (prot & PAGE_READ) {
2051 te->addr_read = address;
2052 } else {
2053 te->addr_read = -1;
2056 if (prot & PAGE_EXEC) {
2057 te->addr_code = code_address;
2058 } else {
2059 te->addr_code = -1;
2061 if (prot & PAGE_WRITE) {
2062 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2063 (pd & IO_MEM_ROMD)) {
2064 /* Write access calls the I/O callback. */
2065 te->addr_write = address | TLB_MMIO;
2066 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2067 !cpu_physical_memory_is_dirty(pd)) {
2068 te->addr_write = address | TLB_NOTDIRTY;
2069 } else {
2070 te->addr_write = address;
2072 } else {
2073 te->addr_write = -1;
2075 return ret;
2078 #else
2080 void tlb_flush(CPUState *env, int flush_global)
2084 void tlb_flush_page(CPUState *env, target_ulong addr)
2088 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2089 target_phys_addr_t paddr, int prot,
2090 int mmu_idx, int is_softmmu)
2092 return 0;
2095 /* dump memory mappings */
2096 void page_dump(FILE *f)
2098 unsigned long start, end;
2099 int i, j, prot, prot1;
2100 PageDesc *p;
2102 fprintf(f, "%-8s %-8s %-8s %s\n",
2103 "start", "end", "size", "prot");
2104 start = -1;
2105 end = -1;
2106 prot = 0;
2107 for(i = 0; i <= L1_SIZE; i++) {
2108 if (i < L1_SIZE)
2109 p = l1_map[i];
2110 else
2111 p = NULL;
2112 for(j = 0;j < L2_SIZE; j++) {
2113 if (!p)
2114 prot1 = 0;
2115 else
2116 prot1 = p[j].flags;
2117 if (prot1 != prot) {
2118 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2119 if (start != -1) {
2120 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2121 start, end, end - start,
2122 prot & PAGE_READ ? 'r' : '-',
2123 prot & PAGE_WRITE ? 'w' : '-',
2124 prot & PAGE_EXEC ? 'x' : '-');
2126 if (prot1 != 0)
2127 start = end;
2128 else
2129 start = -1;
2130 prot = prot1;
2132 if (!p)
2133 break;
2138 int page_get_flags(target_ulong address)
2140 PageDesc *p;
2142 p = page_find(address >> TARGET_PAGE_BITS);
2143 if (!p)
2144 return 0;
2145 return p->flags;
2148 /* modify the flags of a page and invalidate the code if
2149 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2150 depending on PAGE_WRITE */
2151 void page_set_flags(target_ulong start, target_ulong end, int flags)
2153 PageDesc *p;
2154 target_ulong addr;
2156 /* mmap_lock should already be held. */
2157 start = start & TARGET_PAGE_MASK;
2158 end = TARGET_PAGE_ALIGN(end);
2159 if (flags & PAGE_WRITE)
2160 flags |= PAGE_WRITE_ORG;
2161 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2162 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2163 /* We may be called for host regions that are outside guest
2164 address space. */
2165 if (!p)
2166 return;
2167 /* if the write protection is set, then we invalidate the code
2168 inside */
2169 if (!(p->flags & PAGE_WRITE) &&
2170 (flags & PAGE_WRITE) &&
2171 p->first_tb) {
2172 tb_invalidate_phys_page(addr, 0, NULL);
2174 p->flags = flags;
2178 int page_check_range(target_ulong start, target_ulong len, int flags)
2180 PageDesc *p;
2181 target_ulong end;
2182 target_ulong addr;
2184 if (start + len < start)
2185 /* we've wrapped around */
2186 return -1;
2188 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2189 start = start & TARGET_PAGE_MASK;
2191 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2192 p = page_find(addr >> TARGET_PAGE_BITS);
2193 if( !p )
2194 return -1;
2195 if( !(p->flags & PAGE_VALID) )
2196 return -1;
2198 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2199 return -1;
2200 if (flags & PAGE_WRITE) {
2201 if (!(p->flags & PAGE_WRITE_ORG))
2202 return -1;
2203 /* unprotect the page if it was put read-only because it
2204 contains translated code */
2205 if (!(p->flags & PAGE_WRITE)) {
2206 if (!page_unprotect(addr, 0, NULL))
2207 return -1;
2209 return 0;
2212 return 0;
2215 /* called from signal handler: invalidate the code and unprotect the
2216 page. Return TRUE if the fault was succesfully handled. */
2217 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2219 unsigned int page_index, prot, pindex;
2220 PageDesc *p, *p1;
2221 target_ulong host_start, host_end, addr;
2223 /* Technically this isn't safe inside a signal handler. However we
2224 know this only ever happens in a synchronous SEGV handler, so in
2225 practice it seems to be ok. */
2226 mmap_lock();
2228 host_start = address & qemu_host_page_mask;
2229 page_index = host_start >> TARGET_PAGE_BITS;
2230 p1 = page_find(page_index);
2231 if (!p1) {
2232 mmap_unlock();
2233 return 0;
2235 host_end = host_start + qemu_host_page_size;
2236 p = p1;
2237 prot = 0;
2238 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2239 prot |= p->flags;
2240 p++;
2242 /* if the page was really writable, then we change its
2243 protection back to writable */
2244 if (prot & PAGE_WRITE_ORG) {
2245 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2246 if (!(p1[pindex].flags & PAGE_WRITE)) {
2247 mprotect((void *)g2h(host_start), qemu_host_page_size,
2248 (prot & PAGE_BITS) | PAGE_WRITE);
2249 p1[pindex].flags |= PAGE_WRITE;
2250 /* and since the content will be modified, we must invalidate
2251 the corresponding translated code. */
2252 tb_invalidate_phys_page(address, pc, puc);
2253 #ifdef DEBUG_TB_CHECK
2254 tb_invalidate_check(address);
2255 #endif
2256 mmap_unlock();
2257 return 1;
2260 mmap_unlock();
2261 return 0;
2264 static inline void tlb_set_dirty(CPUState *env,
2265 unsigned long addr, target_ulong vaddr)
2268 #endif /* defined(CONFIG_USER_ONLY) */
2270 #if !defined(CONFIG_USER_ONLY)
2272 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2273 ram_addr_t memory, ram_addr_t region_offset);
2274 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2275 ram_addr_t orig_memory, ram_addr_t region_offset);
2276 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2277 need_subpage) \
2278 do { \
2279 if (addr > start_addr) \
2280 start_addr2 = 0; \
2281 else { \
2282 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2283 if (start_addr2 > 0) \
2284 need_subpage = 1; \
2287 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2288 end_addr2 = TARGET_PAGE_SIZE - 1; \
2289 else { \
2290 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2291 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2292 need_subpage = 1; \
2294 } while (0)
2296 /* register physical memory. 'size' must be a multiple of the target
2297 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2298 io memory page. The address used when calling the IO function is
2299 the offset from the start of the region, plus region_offset. Both
2300 start_region and regon_offset are rounded down to a page boundary
2301 before calculating this offset. This should not be a problem unless
2302 the low bits of start_addr and region_offset differ. */
2303 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2304 ram_addr_t size,
2305 ram_addr_t phys_offset,
2306 ram_addr_t region_offset)
2308 target_phys_addr_t addr, end_addr;
2309 PhysPageDesc *p;
2310 CPUState *env;
2311 ram_addr_t orig_size = size;
2312 void *subpage;
2314 #ifdef USE_KQEMU
2315 /* XXX: should not depend on cpu context */
2316 env = first_cpu;
2317 if (env->kqemu_enabled) {
2318 kqemu_set_phys_mem(start_addr, size, phys_offset);
2320 #endif
2321 if (kvm_enabled())
2322 kvm_set_phys_mem(start_addr, size, phys_offset);
2324 if (phys_offset == IO_MEM_UNASSIGNED) {
2325 region_offset = start_addr;
2327 region_offset &= TARGET_PAGE_MASK;
2328 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2329 end_addr = start_addr + (target_phys_addr_t)size;
2330 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2331 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2332 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2333 ram_addr_t orig_memory = p->phys_offset;
2334 target_phys_addr_t start_addr2, end_addr2;
2335 int need_subpage = 0;
2337 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2338 need_subpage);
2339 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2340 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2341 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2342 &p->phys_offset, orig_memory,
2343 p->region_offset);
2344 } else {
2345 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2346 >> IO_MEM_SHIFT];
2348 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2349 region_offset);
2350 p->region_offset = 0;
2351 } else {
2352 p->phys_offset = phys_offset;
2353 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2354 (phys_offset & IO_MEM_ROMD))
2355 phys_offset += TARGET_PAGE_SIZE;
2357 } else {
2358 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2359 p->phys_offset = phys_offset;
2360 p->region_offset = region_offset;
2361 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2362 (phys_offset & IO_MEM_ROMD)) {
2363 phys_offset += TARGET_PAGE_SIZE;
2364 } else {
2365 target_phys_addr_t start_addr2, end_addr2;
2366 int need_subpage = 0;
2368 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2369 end_addr2, need_subpage);
2371 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2372 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2373 &p->phys_offset, IO_MEM_UNASSIGNED,
2374 addr & TARGET_PAGE_MASK);
2375 subpage_register(subpage, start_addr2, end_addr2,
2376 phys_offset, region_offset);
2377 p->region_offset = 0;
2381 region_offset += TARGET_PAGE_SIZE;
2384 /* since each CPU stores ram addresses in its TLB cache, we must
2385 reset the modified entries */
2386 /* XXX: slow ! */
2387 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2388 tlb_flush(env, 1);
2392 /* XXX: temporary until new memory mapping API */
2393 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2395 PhysPageDesc *p;
2397 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2398 if (!p)
2399 return IO_MEM_UNASSIGNED;
2400 return p->phys_offset;
2403 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2405 if (kvm_enabled())
2406 kvm_coalesce_mmio_region(addr, size);
2409 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2411 if (kvm_enabled())
2412 kvm_uncoalesce_mmio_region(addr, size);
2415 /* XXX: better than nothing */
2416 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2418 ram_addr_t addr;
2419 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2420 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2421 (uint64_t)size, (uint64_t)phys_ram_size);
2422 abort();
2424 addr = phys_ram_alloc_offset;
2425 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2426 return addr;
2429 void qemu_ram_free(ram_addr_t addr)
2433 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2435 #ifdef DEBUG_UNASSIGNED
2436 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2437 #endif
2438 #if defined(TARGET_SPARC)
2439 do_unassigned_access(addr, 0, 0, 0, 1);
2440 #endif
2441 return 0;
2444 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2446 #ifdef DEBUG_UNASSIGNED
2447 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2448 #endif
2449 #if defined(TARGET_SPARC)
2450 do_unassigned_access(addr, 0, 0, 0, 2);
2451 #endif
2452 return 0;
2455 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2457 #ifdef DEBUG_UNASSIGNED
2458 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2459 #endif
2460 #if defined(TARGET_SPARC)
2461 do_unassigned_access(addr, 0, 0, 0, 4);
2462 #endif
2463 return 0;
2466 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2468 #ifdef DEBUG_UNASSIGNED
2469 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2470 #endif
2471 #if defined(TARGET_SPARC)
2472 do_unassigned_access(addr, 1, 0, 0, 1);
2473 #endif
2476 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2478 #ifdef DEBUG_UNASSIGNED
2479 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2480 #endif
2481 #if defined(TARGET_SPARC)
2482 do_unassigned_access(addr, 1, 0, 0, 2);
2483 #endif
2486 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2488 #ifdef DEBUG_UNASSIGNED
2489 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2490 #endif
2491 #if defined(TARGET_SPARC)
2492 do_unassigned_access(addr, 1, 0, 0, 4);
2493 #endif
2496 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2497 unassigned_mem_readb,
2498 unassigned_mem_readw,
2499 unassigned_mem_readl,
2502 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2503 unassigned_mem_writeb,
2504 unassigned_mem_writew,
2505 unassigned_mem_writel,
2508 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2509 uint32_t val)
2511 int dirty_flags;
2512 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2513 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2514 #if !defined(CONFIG_USER_ONLY)
2515 tb_invalidate_phys_page_fast(ram_addr, 1);
2516 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2517 #endif
2519 stb_p(phys_ram_base + ram_addr, val);
2520 #ifdef USE_KQEMU
2521 if (cpu_single_env->kqemu_enabled &&
2522 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2523 kqemu_modify_page(cpu_single_env, ram_addr);
2524 #endif
2525 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2526 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2527 /* we remove the notdirty callback only if the code has been
2528 flushed */
2529 if (dirty_flags == 0xff)
2530 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2533 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2534 uint32_t val)
2536 int dirty_flags;
2537 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2538 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2539 #if !defined(CONFIG_USER_ONLY)
2540 tb_invalidate_phys_page_fast(ram_addr, 2);
2541 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2542 #endif
2544 stw_p(phys_ram_base + ram_addr, val);
2545 #ifdef USE_KQEMU
2546 if (cpu_single_env->kqemu_enabled &&
2547 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2548 kqemu_modify_page(cpu_single_env, ram_addr);
2549 #endif
2550 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2551 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2552 /* we remove the notdirty callback only if the code has been
2553 flushed */
2554 if (dirty_flags == 0xff)
2555 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2558 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2559 uint32_t val)
2561 int dirty_flags;
2562 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2563 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2564 #if !defined(CONFIG_USER_ONLY)
2565 tb_invalidate_phys_page_fast(ram_addr, 4);
2566 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2567 #endif
2569 stl_p(phys_ram_base + ram_addr, val);
2570 #ifdef USE_KQEMU
2571 if (cpu_single_env->kqemu_enabled &&
2572 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2573 kqemu_modify_page(cpu_single_env, ram_addr);
2574 #endif
2575 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2576 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2577 /* we remove the notdirty callback only if the code has been
2578 flushed */
2579 if (dirty_flags == 0xff)
2580 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2583 static CPUReadMemoryFunc *error_mem_read[3] = {
2584 NULL, /* never used */
2585 NULL, /* never used */
2586 NULL, /* never used */
2589 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2590 notdirty_mem_writeb,
2591 notdirty_mem_writew,
2592 notdirty_mem_writel,
2595 /* Generate a debug exception if a watchpoint has been hit. */
2596 static void check_watchpoint(int offset, int len_mask, int flags)
2598 CPUState *env = cpu_single_env;
2599 target_ulong pc, cs_base;
2600 TranslationBlock *tb;
2601 target_ulong vaddr;
2602 CPUWatchpoint *wp;
2603 int cpu_flags;
2605 if (env->watchpoint_hit) {
2606 /* We re-entered the check after replacing the TB. Now raise
2607 * the debug interrupt so that is will trigger after the
2608 * current instruction. */
2609 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2610 return;
2612 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2613 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2614 if ((vaddr == (wp->vaddr & len_mask) ||
2615 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2616 wp->flags |= BP_WATCHPOINT_HIT;
2617 if (!env->watchpoint_hit) {
2618 env->watchpoint_hit = wp;
2619 tb = tb_find_pc(env->mem_io_pc);
2620 if (!tb) {
2621 cpu_abort(env, "check_watchpoint: could not find TB for "
2622 "pc=%p", (void *)env->mem_io_pc);
2624 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2625 tb_phys_invalidate(tb, -1);
2626 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2627 env->exception_index = EXCP_DEBUG;
2628 } else {
2629 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2630 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2632 cpu_resume_from_signal(env, NULL);
2634 } else {
2635 wp->flags &= ~BP_WATCHPOINT_HIT;
2640 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2641 so these check for a hit then pass through to the normal out-of-line
2642 phys routines. */
2643 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2645 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2646 return ldub_phys(addr);
2649 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2651 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2652 return lduw_phys(addr);
2655 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2657 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2658 return ldl_phys(addr);
2661 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2662 uint32_t val)
2664 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2665 stb_phys(addr, val);
2668 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2669 uint32_t val)
2671 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2672 stw_phys(addr, val);
2675 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2676 uint32_t val)
2678 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2679 stl_phys(addr, val);
2682 static CPUReadMemoryFunc *watch_mem_read[3] = {
2683 watch_mem_readb,
2684 watch_mem_readw,
2685 watch_mem_readl,
2688 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2689 watch_mem_writeb,
2690 watch_mem_writew,
2691 watch_mem_writel,
2694 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2695 unsigned int len)
2697 uint32_t ret;
2698 unsigned int idx;
2700 idx = SUBPAGE_IDX(addr);
2701 #if defined(DEBUG_SUBPAGE)
2702 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2703 mmio, len, addr, idx);
2704 #endif
2705 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2706 addr + mmio->region_offset[idx][0][len]);
2708 return ret;
2711 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2712 uint32_t value, unsigned int len)
2714 unsigned int idx;
2716 idx = SUBPAGE_IDX(addr);
2717 #if defined(DEBUG_SUBPAGE)
2718 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2719 mmio, len, addr, idx, value);
2720 #endif
2721 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2722 addr + mmio->region_offset[idx][1][len],
2723 value);
2726 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2728 #if defined(DEBUG_SUBPAGE)
2729 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2730 #endif
2732 return subpage_readlen(opaque, addr, 0);
2735 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2736 uint32_t value)
2738 #if defined(DEBUG_SUBPAGE)
2739 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2740 #endif
2741 subpage_writelen(opaque, addr, value, 0);
2744 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2746 #if defined(DEBUG_SUBPAGE)
2747 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2748 #endif
2750 return subpage_readlen(opaque, addr, 1);
2753 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2754 uint32_t value)
2756 #if defined(DEBUG_SUBPAGE)
2757 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2758 #endif
2759 subpage_writelen(opaque, addr, value, 1);
2762 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2764 #if defined(DEBUG_SUBPAGE)
2765 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2766 #endif
2768 return subpage_readlen(opaque, addr, 2);
2771 static void subpage_writel (void *opaque,
2772 target_phys_addr_t addr, uint32_t value)
2774 #if defined(DEBUG_SUBPAGE)
2775 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2776 #endif
2777 subpage_writelen(opaque, addr, value, 2);
2780 static CPUReadMemoryFunc *subpage_read[] = {
2781 &subpage_readb,
2782 &subpage_readw,
2783 &subpage_readl,
2786 static CPUWriteMemoryFunc *subpage_write[] = {
2787 &subpage_writeb,
2788 &subpage_writew,
2789 &subpage_writel,
2792 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2793 ram_addr_t memory, ram_addr_t region_offset)
2795 int idx, eidx;
2796 unsigned int i;
2798 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2799 return -1;
2800 idx = SUBPAGE_IDX(start);
2801 eidx = SUBPAGE_IDX(end);
2802 #if defined(DEBUG_SUBPAGE)
2803 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2804 mmio, start, end, idx, eidx, memory);
2805 #endif
2806 memory >>= IO_MEM_SHIFT;
2807 for (; idx <= eidx; idx++) {
2808 for (i = 0; i < 4; i++) {
2809 if (io_mem_read[memory][i]) {
2810 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2811 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2812 mmio->region_offset[idx][0][i] = region_offset;
2814 if (io_mem_write[memory][i]) {
2815 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2816 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2817 mmio->region_offset[idx][1][i] = region_offset;
2822 return 0;
2825 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2826 ram_addr_t orig_memory, ram_addr_t region_offset)
2828 subpage_t *mmio;
2829 int subpage_memory;
2831 mmio = qemu_mallocz(sizeof(subpage_t));
2833 mmio->base = base;
2834 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2835 #if defined(DEBUG_SUBPAGE)
2836 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2837 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2838 #endif
2839 *phys = subpage_memory | IO_MEM_SUBPAGE;
2840 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2841 region_offset);
2843 return mmio;
2846 static int get_free_io_mem_idx(void)
2848 int i;
2850 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2851 if (!io_mem_used[i]) {
2852 io_mem_used[i] = 1;
2853 return i;
2856 return -1;
2859 static void io_mem_init(void)
2861 int i;
2863 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2864 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2865 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2866 for (i=0; i<5; i++)
2867 io_mem_used[i] = 1;
2869 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2870 watch_mem_write, NULL);
2871 /* alloc dirty bits array */
2872 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2873 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2876 /* mem_read and mem_write are arrays of functions containing the
2877 function to access byte (index 0), word (index 1) and dword (index
2878 2). Functions can be omitted with a NULL function pointer. The
2879 registered functions may be modified dynamically later.
2880 If io_index is non zero, the corresponding io zone is
2881 modified. If it is zero, a new io zone is allocated. The return
2882 value can be used with cpu_register_physical_memory(). (-1) is
2883 returned if error. */
2884 int cpu_register_io_memory(int io_index,
2885 CPUReadMemoryFunc **mem_read,
2886 CPUWriteMemoryFunc **mem_write,
2887 void *opaque)
2889 int i, subwidth = 0;
2891 if (io_index <= 0) {
2892 io_index = get_free_io_mem_idx();
2893 if (io_index == -1)
2894 return io_index;
2895 } else {
2896 if (io_index >= IO_MEM_NB_ENTRIES)
2897 return -1;
2900 for(i = 0;i < 3; i++) {
2901 if (!mem_read[i] || !mem_write[i])
2902 subwidth = IO_MEM_SUBWIDTH;
2903 io_mem_read[io_index][i] = mem_read[i];
2904 io_mem_write[io_index][i] = mem_write[i];
2906 io_mem_opaque[io_index] = opaque;
2907 return (io_index << IO_MEM_SHIFT) | subwidth;
2910 void cpu_unregister_io_memory(int io_table_address)
2912 int i;
2913 int io_index = io_table_address >> IO_MEM_SHIFT;
2915 for (i=0;i < 3; i++) {
2916 io_mem_read[io_index][i] = unassigned_mem_read[i];
2917 io_mem_write[io_index][i] = unassigned_mem_write[i];
2919 io_mem_opaque[io_index] = NULL;
2920 io_mem_used[io_index] = 0;
2923 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2925 return io_mem_write[io_index >> IO_MEM_SHIFT];
2928 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2930 return io_mem_read[io_index >> IO_MEM_SHIFT];
2933 #endif /* !defined(CONFIG_USER_ONLY) */
2935 /* physical memory access (slow version, mainly for debug) */
2936 #if defined(CONFIG_USER_ONLY)
2937 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2938 int len, int is_write)
2940 int l, flags;
2941 target_ulong page;
2942 void * p;
2944 while (len > 0) {
2945 page = addr & TARGET_PAGE_MASK;
2946 l = (page + TARGET_PAGE_SIZE) - addr;
2947 if (l > len)
2948 l = len;
2949 flags = page_get_flags(page);
2950 if (!(flags & PAGE_VALID))
2951 return;
2952 if (is_write) {
2953 if (!(flags & PAGE_WRITE))
2954 return;
2955 /* XXX: this code should not depend on lock_user */
2956 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2957 /* FIXME - should this return an error rather than just fail? */
2958 return;
2959 memcpy(p, buf, l);
2960 unlock_user(p, addr, l);
2961 } else {
2962 if (!(flags & PAGE_READ))
2963 return;
2964 /* XXX: this code should not depend on lock_user */
2965 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2966 /* FIXME - should this return an error rather than just fail? */
2967 return;
2968 memcpy(buf, p, l);
2969 unlock_user(p, addr, 0);
2971 len -= l;
2972 buf += l;
2973 addr += l;
2977 #else
2978 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2979 int len, int is_write)
2981 int l, io_index;
2982 uint8_t *ptr;
2983 uint32_t val;
2984 target_phys_addr_t page;
2985 unsigned long pd;
2986 PhysPageDesc *p;
2988 while (len > 0) {
2989 page = addr & TARGET_PAGE_MASK;
2990 l = (page + TARGET_PAGE_SIZE) - addr;
2991 if (l > len)
2992 l = len;
2993 p = phys_page_find(page >> TARGET_PAGE_BITS);
2994 if (!p) {
2995 pd = IO_MEM_UNASSIGNED;
2996 } else {
2997 pd = p->phys_offset;
3000 if (is_write) {
3001 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3002 target_phys_addr_t addr1 = addr;
3003 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3004 if (p)
3005 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3006 /* XXX: could force cpu_single_env to NULL to avoid
3007 potential bugs */
3008 if (l >= 4 && ((addr1 & 3) == 0)) {
3009 /* 32 bit write access */
3010 val = ldl_p(buf);
3011 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3012 l = 4;
3013 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3014 /* 16 bit write access */
3015 val = lduw_p(buf);
3016 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3017 l = 2;
3018 } else {
3019 /* 8 bit write access */
3020 val = ldub_p(buf);
3021 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3022 l = 1;
3024 } else {
3025 unsigned long addr1;
3026 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3027 /* RAM case */
3028 ptr = phys_ram_base + addr1;
3029 memcpy(ptr, buf, l);
3030 if (!cpu_physical_memory_is_dirty(addr1)) {
3031 /* invalidate code */
3032 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3033 /* set dirty bit */
3034 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3035 (0xff & ~CODE_DIRTY_FLAG);
3037 /* qemu doesn't execute guest code directly, but kvm does
3038 therefore fluch instruction caches */
3039 if (kvm_enabled())
3040 flush_icache_range((unsigned long)ptr,
3041 ((unsigned long)ptr)+l);
3043 } else {
3044 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3045 !(pd & IO_MEM_ROMD)) {
3046 target_phys_addr_t addr1 = addr;
3047 /* I/O case */
3048 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3049 if (p)
3050 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3051 if (l >= 4 && ((addr1 & 3) == 0)) {
3052 /* 32 bit read access */
3053 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3054 stl_p(buf, val);
3055 l = 4;
3056 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3057 /* 16 bit read access */
3058 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3059 stw_p(buf, val);
3060 l = 2;
3061 } else {
3062 /* 8 bit read access */
3063 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3064 stb_p(buf, val);
3065 l = 1;
3067 } else {
3068 /* RAM case */
3069 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3070 (addr & ~TARGET_PAGE_MASK);
3071 memcpy(buf, ptr, l);
3074 len -= l;
3075 buf += l;
3076 addr += l;
3080 /* used for ROM loading : can write in RAM and ROM */
3081 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3082 const uint8_t *buf, int len)
3084 int l;
3085 uint8_t *ptr;
3086 target_phys_addr_t page;
3087 unsigned long pd;
3088 PhysPageDesc *p;
3090 while (len > 0) {
3091 page = addr & TARGET_PAGE_MASK;
3092 l = (page + TARGET_PAGE_SIZE) - addr;
3093 if (l > len)
3094 l = len;
3095 p = phys_page_find(page >> TARGET_PAGE_BITS);
3096 if (!p) {
3097 pd = IO_MEM_UNASSIGNED;
3098 } else {
3099 pd = p->phys_offset;
3102 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3103 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3104 !(pd & IO_MEM_ROMD)) {
3105 /* do nothing */
3106 } else {
3107 unsigned long addr1;
3108 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3109 /* ROM/RAM case */
3110 ptr = phys_ram_base + addr1;
3111 memcpy(ptr, buf, l);
3113 len -= l;
3114 buf += l;
3115 addr += l;
3119 typedef struct {
3120 void *buffer;
3121 target_phys_addr_t addr;
3122 target_phys_addr_t len;
3123 } BounceBuffer;
3125 static BounceBuffer bounce;
3127 typedef struct MapClient {
3128 void *opaque;
3129 void (*callback)(void *opaque);
3130 LIST_ENTRY(MapClient) link;
3131 } MapClient;
3133 static LIST_HEAD(map_client_list, MapClient) map_client_list
3134 = LIST_HEAD_INITIALIZER(map_client_list);
3136 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3138 MapClient *client = qemu_malloc(sizeof(*client));
3140 client->opaque = opaque;
3141 client->callback = callback;
3142 LIST_INSERT_HEAD(&map_client_list, client, link);
3143 return client;
3146 void cpu_unregister_map_client(void *_client)
3148 MapClient *client = (MapClient *)_client;
3150 LIST_REMOVE(client, link);
3153 static void cpu_notify_map_clients(void)
3155 MapClient *client;
3157 while (!LIST_EMPTY(&map_client_list)) {
3158 client = LIST_FIRST(&map_client_list);
3159 client->callback(client->opaque);
3160 LIST_REMOVE(client, link);
3164 /* Map a physical memory region into a host virtual address.
3165 * May map a subset of the requested range, given by and returned in *plen.
3166 * May return NULL if resources needed to perform the mapping are exhausted.
3167 * Use only for reads OR writes - not for read-modify-write operations.
3168 * Use cpu_register_map_client() to know when retrying the map operation is
3169 * likely to succeed.
3171 void *cpu_physical_memory_map(target_phys_addr_t addr,
3172 target_phys_addr_t *plen,
3173 int is_write)
3175 target_phys_addr_t len = *plen;
3176 target_phys_addr_t done = 0;
3177 int l;
3178 uint8_t *ret = NULL;
3179 uint8_t *ptr;
3180 target_phys_addr_t page;
3181 unsigned long pd;
3182 PhysPageDesc *p;
3183 unsigned long addr1;
3185 while (len > 0) {
3186 page = addr & TARGET_PAGE_MASK;
3187 l = (page + TARGET_PAGE_SIZE) - addr;
3188 if (l > len)
3189 l = len;
3190 p = phys_page_find(page >> TARGET_PAGE_BITS);
3191 if (!p) {
3192 pd = IO_MEM_UNASSIGNED;
3193 } else {
3194 pd = p->phys_offset;
3197 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3198 if (done || bounce.buffer) {
3199 break;
3201 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3202 bounce.addr = addr;
3203 bounce.len = l;
3204 if (!is_write) {
3205 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3207 ptr = bounce.buffer;
3208 } else {
3209 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3210 ptr = phys_ram_base + addr1;
3212 if (!done) {
3213 ret = ptr;
3214 } else if (ret + done != ptr) {
3215 break;
3218 len -= l;
3219 addr += l;
3220 done += l;
3222 *plen = done;
3223 return ret;
3226 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3227 * Will also mark the memory as dirty if is_write == 1. access_len gives
3228 * the amount of memory that was actually read or written by the caller.
3230 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3231 int is_write, target_phys_addr_t access_len)
3233 if (buffer != bounce.buffer) {
3234 if (is_write) {
3235 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3236 while (access_len) {
3237 unsigned l;
3238 l = TARGET_PAGE_SIZE;
3239 if (l > access_len)
3240 l = access_len;
3241 if (!cpu_physical_memory_is_dirty(addr1)) {
3242 /* invalidate code */
3243 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3244 /* set dirty bit */
3245 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3246 (0xff & ~CODE_DIRTY_FLAG);
3248 addr1 += l;
3249 access_len -= l;
3252 return;
3254 if (is_write) {
3255 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3257 qemu_free(bounce.buffer);
3258 bounce.buffer = NULL;
3259 cpu_notify_map_clients();
3262 /* warning: addr must be aligned */
3263 uint32_t ldl_phys(target_phys_addr_t addr)
3265 int io_index;
3266 uint8_t *ptr;
3267 uint32_t val;
3268 unsigned long pd;
3269 PhysPageDesc *p;
3271 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3272 if (!p) {
3273 pd = IO_MEM_UNASSIGNED;
3274 } else {
3275 pd = p->phys_offset;
3278 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3279 !(pd & IO_MEM_ROMD)) {
3280 /* I/O case */
3281 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3282 if (p)
3283 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3284 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3285 } else {
3286 /* RAM case */
3287 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3288 (addr & ~TARGET_PAGE_MASK);
3289 val = ldl_p(ptr);
3291 return val;
3294 /* warning: addr must be aligned */
3295 uint64_t ldq_phys(target_phys_addr_t addr)
3297 int io_index;
3298 uint8_t *ptr;
3299 uint64_t val;
3300 unsigned long pd;
3301 PhysPageDesc *p;
3303 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3304 if (!p) {
3305 pd = IO_MEM_UNASSIGNED;
3306 } else {
3307 pd = p->phys_offset;
3310 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3311 !(pd & IO_MEM_ROMD)) {
3312 /* I/O case */
3313 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3314 if (p)
3315 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3316 #ifdef TARGET_WORDS_BIGENDIAN
3317 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3318 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3319 #else
3320 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3321 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3322 #endif
3323 } else {
3324 /* RAM case */
3325 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3326 (addr & ~TARGET_PAGE_MASK);
3327 val = ldq_p(ptr);
3329 return val;
3332 /* XXX: optimize */
3333 uint32_t ldub_phys(target_phys_addr_t addr)
3335 uint8_t val;
3336 cpu_physical_memory_read(addr, &val, 1);
3337 return val;
3340 /* XXX: optimize */
3341 uint32_t lduw_phys(target_phys_addr_t addr)
3343 uint16_t val;
3344 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3345 return tswap16(val);
3348 #ifdef __GNUC__
3349 #define likely(x) __builtin_expect(!!(x), 1)
3350 #define unlikely(x) __builtin_expect(!!(x), 0)
3351 #else
3352 #define likely(x) x
3353 #define unlikely(x) x
3354 #endif
3356 /* warning: addr must be aligned. The ram page is not masked as dirty
3357 and the code inside is not invalidated. It is useful if the dirty
3358 bits are used to track modified PTEs */
3359 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3361 int io_index;
3362 uint8_t *ptr;
3363 unsigned long pd;
3364 PhysPageDesc *p;
3366 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3367 if (!p) {
3368 pd = IO_MEM_UNASSIGNED;
3369 } else {
3370 pd = p->phys_offset;
3373 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3374 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3375 if (p)
3376 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3377 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3378 } else {
3379 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3380 ptr = phys_ram_base + addr1;
3381 stl_p(ptr, val);
3383 if (unlikely(in_migration)) {
3384 if (!cpu_physical_memory_is_dirty(addr1)) {
3385 /* invalidate code */
3386 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3387 /* set dirty bit */
3388 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3389 (0xff & ~CODE_DIRTY_FLAG);
3395 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3397 int io_index;
3398 uint8_t *ptr;
3399 unsigned long pd;
3400 PhysPageDesc *p;
3402 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3403 if (!p) {
3404 pd = IO_MEM_UNASSIGNED;
3405 } else {
3406 pd = p->phys_offset;
3409 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3410 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3411 if (p)
3412 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3413 #ifdef TARGET_WORDS_BIGENDIAN
3414 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3415 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3416 #else
3417 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3418 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3419 #endif
3420 } else {
3421 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3422 (addr & ~TARGET_PAGE_MASK);
3423 stq_p(ptr, val);
3427 /* warning: addr must be aligned */
3428 void stl_phys(target_phys_addr_t addr, uint32_t val)
3430 int io_index;
3431 uint8_t *ptr;
3432 unsigned long pd;
3433 PhysPageDesc *p;
3435 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3436 if (!p) {
3437 pd = IO_MEM_UNASSIGNED;
3438 } else {
3439 pd = p->phys_offset;
3442 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3443 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3444 if (p)
3445 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3446 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3447 } else {
3448 unsigned long addr1;
3449 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3450 /* RAM case */
3451 ptr = phys_ram_base + addr1;
3452 stl_p(ptr, val);
3453 if (!cpu_physical_memory_is_dirty(addr1)) {
3454 /* invalidate code */
3455 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3456 /* set dirty bit */
3457 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3458 (0xff & ~CODE_DIRTY_FLAG);
3463 /* XXX: optimize */
3464 void stb_phys(target_phys_addr_t addr, uint32_t val)
3466 uint8_t v = val;
3467 cpu_physical_memory_write(addr, &v, 1);
3470 /* XXX: optimize */
3471 void stw_phys(target_phys_addr_t addr, uint32_t val)
3473 uint16_t v = tswap16(val);
3474 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3477 /* XXX: optimize */
3478 void stq_phys(target_phys_addr_t addr, uint64_t val)
3480 val = tswap64(val);
3481 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3484 #endif
3486 /* virtual memory access for debug (includes writing to ROM) */
3487 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3488 uint8_t *buf, int len, int is_write)
3490 int l;
3491 target_phys_addr_t phys_addr;
3492 target_ulong page;
3494 while (len > 0) {
3495 page = addr & TARGET_PAGE_MASK;
3496 phys_addr = cpu_get_phys_page_debug(env, page);
3497 /* if no physical page mapped, return an error */
3498 if (phys_addr == -1)
3499 return -1;
3500 l = (page + TARGET_PAGE_SIZE) - addr;
3501 if (l > len)
3502 l = len;
3503 phys_addr += (addr & ~TARGET_PAGE_MASK);
3504 #if !defined(CONFIG_USER_ONLY)
3505 if (is_write)
3506 cpu_physical_memory_write_rom(phys_addr, buf, l);
3507 else
3508 #endif
3509 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3510 len -= l;
3511 buf += l;
3512 addr += l;
3514 return 0;
3517 /* in deterministic execution mode, instructions doing device I/Os
3518 must be at the end of the TB */
3519 void cpu_io_recompile(CPUState *env, void *retaddr)
3521 TranslationBlock *tb;
3522 uint32_t n, cflags;
3523 target_ulong pc, cs_base;
3524 uint64_t flags;
3526 tb = tb_find_pc((unsigned long)retaddr);
3527 if (!tb) {
3528 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3529 retaddr);
3531 n = env->icount_decr.u16.low + tb->icount;
3532 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3533 /* Calculate how many instructions had been executed before the fault
3534 occurred. */
3535 n = n - env->icount_decr.u16.low;
3536 /* Generate a new TB ending on the I/O insn. */
3537 n++;
3538 /* On MIPS and SH, delay slot instructions can only be restarted if
3539 they were already the first instruction in the TB. If this is not
3540 the first instruction in a TB then re-execute the preceding
3541 branch. */
3542 #if defined(TARGET_MIPS)
3543 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3544 env->active_tc.PC -= 4;
3545 env->icount_decr.u16.low++;
3546 env->hflags &= ~MIPS_HFLAG_BMASK;
3548 #elif defined(TARGET_SH4)
3549 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3550 && n > 1) {
3551 env->pc -= 2;
3552 env->icount_decr.u16.low++;
3553 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3555 #endif
3556 /* This should never happen. */
3557 if (n > CF_COUNT_MASK)
3558 cpu_abort(env, "TB too big during recompile");
3560 cflags = n | CF_LAST_IO;
3561 pc = tb->pc;
3562 cs_base = tb->cs_base;
3563 flags = tb->flags;
3564 tb_phys_invalidate(tb, -1);
3565 /* FIXME: In theory this could raise an exception. In practice
3566 we have already translated the block once so it's probably ok. */
3567 tb_gen_code(env, pc, cs_base, flags, cflags);
3568 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3569 the first in the TB) then we end up generating a whole new TB and
3570 repeating the fault, which is horribly inefficient.
3571 Better would be to execute just this insn uncached, or generate a
3572 second new TB. */
3573 cpu_resume_from_signal(env, NULL);
3576 void dump_exec_info(FILE *f,
3577 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3579 int i, target_code_size, max_target_code_size;
3580 int direct_jmp_count, direct_jmp2_count, cross_page;
3581 TranslationBlock *tb;
3583 target_code_size = 0;
3584 max_target_code_size = 0;
3585 cross_page = 0;
3586 direct_jmp_count = 0;
3587 direct_jmp2_count = 0;
3588 for(i = 0; i < nb_tbs; i++) {
3589 tb = &tbs[i];
3590 target_code_size += tb->size;
3591 if (tb->size > max_target_code_size)
3592 max_target_code_size = tb->size;
3593 if (tb->page_addr[1] != -1)
3594 cross_page++;
3595 if (tb->tb_next_offset[0] != 0xffff) {
3596 direct_jmp_count++;
3597 if (tb->tb_next_offset[1] != 0xffff) {
3598 direct_jmp2_count++;
3602 /* XXX: avoid using doubles ? */
3603 cpu_fprintf(f, "Translation buffer state:\n");
3604 cpu_fprintf(f, "gen code size %ld/%ld\n",
3605 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3606 cpu_fprintf(f, "TB count %d/%d\n",
3607 nb_tbs, code_gen_max_blocks);
3608 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3609 nb_tbs ? target_code_size / nb_tbs : 0,
3610 max_target_code_size);
3611 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3612 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3613 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3614 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3615 cross_page,
3616 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3617 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3618 direct_jmp_count,
3619 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3620 direct_jmp2_count,
3621 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3622 cpu_fprintf(f, "\nStatistics:\n");
3623 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3624 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3625 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3626 tcg_dump_info(f, cpu_fprintf);
3629 #if !defined(CONFIG_USER_ONLY)
3631 #define MMUSUFFIX _cmmu
3632 #define GETPC() NULL
3633 #define env cpu_single_env
3634 #define SOFTMMU_CODE_ACCESS
3636 #define SHIFT 0
3637 #include "softmmu_template.h"
3639 #define SHIFT 1
3640 #include "softmmu_template.h"
3642 #define SHIFT 2
3643 #include "softmmu_template.h"
3645 #define SHIFT 3
3646 #include "softmmu_template.h"
3648 #undef env
3650 #endif