2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
149 ram_addr_t region_offset
;
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size
;
167 unsigned long qemu_host_page_bits
;
168 unsigned long qemu_host_page_size
;
169 unsigned long qemu_host_page_mask
;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc
*l1_map
[L1_SIZE
];
173 static PhysPageDesc
**l1_phys_map
;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
180 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
181 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
182 char io_mem_used
[IO_MEM_NB_ENTRIES
];
183 static int io_mem_watch
;
187 static const char *logfilename
= "/tmp/qemu.log";
190 static int log_append
= 0;
193 static int tlb_flush_count
;
194 static int tb_flush_count
;
195 static int tb_phys_invalidate_count
;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t
{
199 target_phys_addr_t base
;
200 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
201 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
202 void *opaque
[TARGET_PAGE_SIZE
][2][4];
203 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
207 static void map_exec(void *addr
, long size
)
210 VirtualProtect(addr
, size
,
211 PAGE_EXECUTE_READWRITE
, &old_protect
);
215 static void map_exec(void *addr
, long size
)
217 unsigned long start
, end
, page_size
;
219 page_size
= getpagesize();
220 start
= (unsigned long)addr
;
221 start
&= ~(page_size
- 1);
223 end
= (unsigned long)addr
+ size
;
224 end
+= page_size
- 1;
225 end
&= ~(page_size
- 1);
227 mprotect((void *)start
, end
- start
,
228 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
238 SYSTEM_INFO system_info
;
240 GetSystemInfo(&system_info
);
241 qemu_real_host_page_size
= system_info
.dwPageSize
;
244 qemu_real_host_page_size
= getpagesize();
246 if (qemu_host_page_size
== 0)
247 qemu_host_page_size
= qemu_real_host_page_size
;
248 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
249 qemu_host_page_size
= TARGET_PAGE_SIZE
;
250 qemu_host_page_bits
= 0;
251 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
252 qemu_host_page_bits
++;
253 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
255 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr
, endaddr
;
264 last_brk
= (unsigned long)sbrk(0);
265 f
= fopen("/proc/self/maps", "r");
268 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
270 startaddr
= MIN(startaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 endaddr
= MIN(endaddr
,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
274 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
275 TARGET_PAGE_ALIGN(endaddr
),
286 static inline PageDesc
**page_l1_map(target_ulong index
)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
294 return &l1_map
[index
>> L2_BITS
];
297 static inline PageDesc
*page_find_alloc(target_ulong index
)
300 lp
= page_l1_map(index
);
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
309 /* Don't use qemu_malloc because it may recurse. */
310 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
311 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
314 unsigned long addr
= h2g(p
);
315 page_set_flags(addr
& TARGET_PAGE_MASK
,
316 TARGET_PAGE_ALIGN(addr
+ len
),
320 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
324 return p
+ (index
& (L2_SIZE
- 1));
327 static inline PageDesc
*page_find(target_ulong index
)
330 lp
= page_l1_map(index
);
337 return p
+ (index
& (L2_SIZE
- 1));
340 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
345 p
= (void **)l1_phys_map
;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
354 /* allocate if not found */
357 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
358 memset(p
, 0, sizeof(void *) * L1_SIZE
);
362 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
366 /* allocate if not found */
369 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
371 for (i
= 0; i
< L2_SIZE
; i
++) {
372 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
373 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
376 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
379 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
381 return phys_page_find_alloc(index
, 0);
384 #if !defined(CONFIG_USER_ONLY)
385 static void tlb_protect_code(ram_addr_t ram_addr
);
386 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
388 #define mmap_lock() do { } while(0)
389 #define mmap_unlock() do { } while(0)
392 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
394 #if defined(CONFIG_USER_ONLY)
395 /* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397 #define USE_STATIC_CODE_GEN_BUFFER
400 #ifdef USE_STATIC_CODE_GEN_BUFFER
401 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
404 static void code_gen_alloc(unsigned long tb_size
)
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer
= static_code_gen_buffer
;
408 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
409 map_exec(code_gen_buffer
, code_gen_buffer_size
);
411 code_gen_buffer_size
= tb_size
;
412 if (code_gen_buffer_size
== 0) {
413 #if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
417 /* XXX: needs ajustments */
418 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
421 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
422 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425 #if defined(__linux__)
430 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
431 #if defined(__x86_64__)
433 /* Cannot map more than that */
434 if (code_gen_buffer_size
> (800 * 1024 * 1024))
435 code_gen_buffer_size
= (800 * 1024 * 1024);
436 #elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
439 start
= (void *) 0x60000000UL
;
440 if (code_gen_buffer_size
> (512 * 1024 * 1024))
441 code_gen_buffer_size
= (512 * 1024 * 1024);
442 #elif defined(__arm__)
443 /* Map the buffer below 32M, so we can use direct calls and branches */
445 start
= (void *) 0x01000000UL
;
446 if (code_gen_buffer_size
> 16 * 1024 * 1024)
447 code_gen_buffer_size
= 16 * 1024 * 1024;
449 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
450 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
452 if (code_gen_buffer
== MAP_FAILED
) {
453 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
457 #elif defined(__FreeBSD__)
461 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
462 #if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
466 addr
= (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size
> (800 * 1024 * 1024))
469 code_gen_buffer_size
= (800 * 1024 * 1024);
471 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
472 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
474 if (code_gen_buffer
== MAP_FAILED
) {
475 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
480 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
481 map_exec(code_gen_buffer
, code_gen_buffer_size
);
483 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
484 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
485 code_gen_buffer_max_size
= code_gen_buffer_size
-
486 code_gen_max_block_size();
487 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
488 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
491 /* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
494 void cpu_exec_init_all(unsigned long tb_size
)
497 code_gen_alloc(tb_size
);
498 code_gen_ptr
= code_gen_buffer
;
500 #if !defined(CONFIG_USER_ONLY)
505 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507 #define CPU_COMMON_SAVE_VERSION 1
509 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
511 CPUState
*env
= opaque
;
513 qemu_put_be32s(f
, &env
->halted
);
514 qemu_put_be32s(f
, &env
->interrupt_request
);
517 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
519 CPUState
*env
= opaque
;
521 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
524 qemu_get_be32s(f
, &env
->halted
);
525 qemu_get_be32s(f
, &env
->interrupt_request
);
532 void cpu_exec_init(CPUState
*env
)
537 env
->next_cpu
= NULL
;
540 while (*penv
!= NULL
) {
541 penv
= (CPUState
**)&(*penv
)->next_cpu
;
544 env
->cpu_index
= cpu_index
;
545 TAILQ_INIT(&env
->breakpoints
);
546 TAILQ_INIT(&env
->watchpoints
);
548 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
549 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
550 cpu_common_save
, cpu_common_load
, env
);
551 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
552 cpu_save
, cpu_load
, env
);
556 static inline void invalidate_page_bitmap(PageDesc
*p
)
558 if (p
->code_bitmap
) {
559 qemu_free(p
->code_bitmap
);
560 p
->code_bitmap
= NULL
;
562 p
->code_write_count
= 0;
565 /* set to NULL all the 'first_tb' fields in all PageDescs */
566 static void page_flush_tb(void)
571 for(i
= 0; i
< L1_SIZE
; i
++) {
574 for(j
= 0; j
< L2_SIZE
; j
++) {
576 invalidate_page_bitmap(p
);
583 /* flush all the translation blocks */
584 /* XXX: tb_flush is currently not thread safe */
585 void tb_flush(CPUState
*env1
)
588 #if defined(DEBUG_FLUSH)
589 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
590 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
592 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
594 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
595 cpu_abort(env1
, "Internal error: code buffer overflow\n");
599 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
600 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
603 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
606 code_gen_ptr
= code_gen_buffer
;
607 /* XXX: flush processor icache at this point if cache flush is
612 #ifdef DEBUG_TB_CHECK
614 static void tb_invalidate_check(target_ulong address
)
616 TranslationBlock
*tb
;
618 address
&= TARGET_PAGE_MASK
;
619 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
620 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
621 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
622 address
>= tb
->pc
+ tb
->size
)) {
623 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
624 address
, (long)tb
->pc
, tb
->size
);
630 /* verify that all the pages have correct rights for code */
631 static void tb_page_check(void)
633 TranslationBlock
*tb
;
634 int i
, flags1
, flags2
;
636 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
637 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
638 flags1
= page_get_flags(tb
->pc
);
639 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
640 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
641 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
642 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
648 static void tb_jmp_check(TranslationBlock
*tb
)
650 TranslationBlock
*tb1
;
653 /* suppress any remaining jumps to this TB */
657 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
660 tb1
= tb1
->jmp_next
[n1
];
662 /* check end of list */
664 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
670 /* invalidate one TB */
671 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
674 TranslationBlock
*tb1
;
678 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
681 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
685 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
687 TranslationBlock
*tb1
;
693 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
695 *ptb
= tb1
->page_next
[n1
];
698 ptb
= &tb1
->page_next
[n1
];
702 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
704 TranslationBlock
*tb1
, **ptb
;
707 ptb
= &tb
->jmp_next
[n
];
710 /* find tb(n) in circular list */
714 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
715 if (n1
== n
&& tb1
== tb
)
718 ptb
= &tb1
->jmp_first
;
720 ptb
= &tb1
->jmp_next
[n1
];
723 /* now we can suppress tb(n) from the list */
724 *ptb
= tb
->jmp_next
[n
];
726 tb
->jmp_next
[n
] = NULL
;
730 /* reset the jump entry 'n' of a TB so that it is not chained to
732 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
734 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
737 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
742 target_phys_addr_t phys_pc
;
743 TranslationBlock
*tb1
, *tb2
;
745 /* remove the TB from the hash list */
746 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
747 h
= tb_phys_hash_func(phys_pc
);
748 tb_remove(&tb_phys_hash
[h
], tb
,
749 offsetof(TranslationBlock
, phys_hash_next
));
751 /* remove the TB from the page list */
752 if (tb
->page_addr
[0] != page_addr
) {
753 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
754 tb_page_remove(&p
->first_tb
, tb
);
755 invalidate_page_bitmap(p
);
757 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
758 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
759 tb_page_remove(&p
->first_tb
, tb
);
760 invalidate_page_bitmap(p
);
763 tb_invalidated_flag
= 1;
765 /* remove the TB from the hash list */
766 h
= tb_jmp_cache_hash_func(tb
->pc
);
767 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
768 if (env
->tb_jmp_cache
[h
] == tb
)
769 env
->tb_jmp_cache
[h
] = NULL
;
772 /* suppress this TB from the two jump lists */
773 tb_jmp_remove(tb
, 0);
774 tb_jmp_remove(tb
, 1);
776 /* suppress any remaining jumps to this TB */
782 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
783 tb2
= tb1
->jmp_next
[n1
];
784 tb_reset_jump(tb1
, n1
);
785 tb1
->jmp_next
[n1
] = NULL
;
788 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
790 tb_phys_invalidate_count
++;
793 static inline void set_bits(uint8_t *tab
, int start
, int len
)
799 mask
= 0xff << (start
& 7);
800 if ((start
& ~7) == (end
& ~7)) {
802 mask
&= ~(0xff << (end
& 7));
807 start
= (start
+ 8) & ~7;
809 while (start
< end1
) {
814 mask
= ~(0xff << (end
& 7));
820 static void build_page_bitmap(PageDesc
*p
)
822 int n
, tb_start
, tb_end
;
823 TranslationBlock
*tb
;
825 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
830 tb
= (TranslationBlock
*)((long)tb
& ~3);
831 /* NOTE: this is subtle as a TB may span two physical pages */
833 /* NOTE: tb_end may be after the end of the page, but
834 it is not a problem */
835 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
836 tb_end
= tb_start
+ tb
->size
;
837 if (tb_end
> TARGET_PAGE_SIZE
)
838 tb_end
= TARGET_PAGE_SIZE
;
841 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
843 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
844 tb
= tb
->page_next
[n
];
848 TranslationBlock
*tb_gen_code(CPUState
*env
,
849 target_ulong pc
, target_ulong cs_base
,
850 int flags
, int cflags
)
852 TranslationBlock
*tb
;
854 target_ulong phys_pc
, phys_page2
, virt_page2
;
857 phys_pc
= get_phys_addr_code(env
, pc
);
860 /* flush must be done */
862 /* cannot fail at this point */
864 /* Don't forget to invalidate previous TB info. */
865 tb_invalidated_flag
= 1;
867 tc_ptr
= code_gen_ptr
;
869 tb
->cs_base
= cs_base
;
872 cpu_gen_code(env
, tb
, &code_gen_size
);
873 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
875 /* check next page if needed */
876 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
878 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
879 phys_page2
= get_phys_addr_code(env
, virt_page2
);
881 tb_link_phys(tb
, phys_pc
, phys_page2
);
885 /* invalidate all TBs which intersect with the target physical page
886 starting in range [start;end[. NOTE: start and end must refer to
887 the same physical page. 'is_cpu_write_access' should be true if called
888 from a real cpu write access: the virtual CPU will exit the current
889 TB if code is modified inside this TB. */
890 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
891 int is_cpu_write_access
)
893 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
894 CPUState
*env
= cpu_single_env
;
895 target_ulong tb_start
, tb_end
;
898 #ifdef TARGET_HAS_PRECISE_SMC
899 int current_tb_not_found
= is_cpu_write_access
;
900 TranslationBlock
*current_tb
= NULL
;
901 int current_tb_modified
= 0;
902 target_ulong current_pc
= 0;
903 target_ulong current_cs_base
= 0;
904 int current_flags
= 0;
905 #endif /* TARGET_HAS_PRECISE_SMC */
907 p
= page_find(start
>> TARGET_PAGE_BITS
);
910 if (!p
->code_bitmap
&&
911 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
912 is_cpu_write_access
) {
913 /* build code bitmap */
914 build_page_bitmap(p
);
917 /* we remove all the TBs in the range [start, end[ */
918 /* XXX: see if in some cases it could be faster to invalidate all the code */
922 tb
= (TranslationBlock
*)((long)tb
& ~3);
923 tb_next
= tb
->page_next
[n
];
924 /* NOTE: this is subtle as a TB may span two physical pages */
926 /* NOTE: tb_end may be after the end of the page, but
927 it is not a problem */
928 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
929 tb_end
= tb_start
+ tb
->size
;
931 tb_start
= tb
->page_addr
[1];
932 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
934 if (!(tb_end
<= start
|| tb_start
>= end
)) {
935 #ifdef TARGET_HAS_PRECISE_SMC
936 if (current_tb_not_found
) {
937 current_tb_not_found
= 0;
939 if (env
->mem_io_pc
) {
940 /* now we have a real cpu fault */
941 current_tb
= tb_find_pc(env
->mem_io_pc
);
944 if (current_tb
== tb
&&
945 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
952 current_tb_modified
= 1;
953 cpu_restore_state(current_tb
, env
,
954 env
->mem_io_pc
, NULL
);
955 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
958 #endif /* TARGET_HAS_PRECISE_SMC */
959 /* we need to do that to handle the case where a signal
960 occurs while doing tb_phys_invalidate() */
963 saved_tb
= env
->current_tb
;
964 env
->current_tb
= NULL
;
966 tb_phys_invalidate(tb
, -1);
968 env
->current_tb
= saved_tb
;
969 if (env
->interrupt_request
&& env
->current_tb
)
970 cpu_interrupt(env
, env
->interrupt_request
);
975 #if !defined(CONFIG_USER_ONLY)
976 /* if no code remaining, no need to continue to use slow writes */
978 invalidate_page_bitmap(p
);
979 if (is_cpu_write_access
) {
980 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
984 #ifdef TARGET_HAS_PRECISE_SMC
985 if (current_tb_modified
) {
986 /* we generate a block containing just the instruction
987 modifying the memory. It will ensure that it cannot modify
989 env
->current_tb
= NULL
;
990 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
991 cpu_resume_from_signal(env
, NULL
);
996 /* len must be <= 8 and start must be a multiple of len */
997 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1003 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1004 cpu_single_env
->mem_io_vaddr
, len
,
1005 cpu_single_env
->eip
,
1006 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1009 p
= page_find(start
>> TARGET_PAGE_BITS
);
1012 if (p
->code_bitmap
) {
1013 offset
= start
& ~TARGET_PAGE_MASK
;
1014 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1015 if (b
& ((1 << len
) - 1))
1019 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1023 #if !defined(CONFIG_SOFTMMU)
1024 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1025 unsigned long pc
, void *puc
)
1027 TranslationBlock
*tb
;
1030 #ifdef TARGET_HAS_PRECISE_SMC
1031 TranslationBlock
*current_tb
= NULL
;
1032 CPUState
*env
= cpu_single_env
;
1033 int current_tb_modified
= 0;
1034 target_ulong current_pc
= 0;
1035 target_ulong current_cs_base
= 0;
1036 int current_flags
= 0;
1039 addr
&= TARGET_PAGE_MASK
;
1040 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1044 #ifdef TARGET_HAS_PRECISE_SMC
1045 if (tb
&& pc
!= 0) {
1046 current_tb
= tb_find_pc(pc
);
1049 while (tb
!= NULL
) {
1051 tb
= (TranslationBlock
*)((long)tb
& ~3);
1052 #ifdef TARGET_HAS_PRECISE_SMC
1053 if (current_tb
== tb
&&
1054 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1055 /* If we are modifying the current TB, we must stop
1056 its execution. We could be more precise by checking
1057 that the modification is after the current PC, but it
1058 would require a specialized function to partially
1059 restore the CPU state */
1061 current_tb_modified
= 1;
1062 cpu_restore_state(current_tb
, env
, pc
, puc
);
1063 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1066 #endif /* TARGET_HAS_PRECISE_SMC */
1067 tb_phys_invalidate(tb
, addr
);
1068 tb
= tb
->page_next
[n
];
1071 #ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb_modified
) {
1073 /* we generate a block containing just the instruction
1074 modifying the memory. It will ensure that it cannot modify
1076 env
->current_tb
= NULL
;
1077 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1078 cpu_resume_from_signal(env
, puc
);
1084 /* add the tb in the target page and protect it if necessary */
1085 static inline void tb_alloc_page(TranslationBlock
*tb
,
1086 unsigned int n
, target_ulong page_addr
)
1089 TranslationBlock
*last_first_tb
;
1091 tb
->page_addr
[n
] = page_addr
;
1092 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1093 tb
->page_next
[n
] = p
->first_tb
;
1094 last_first_tb
= p
->first_tb
;
1095 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1096 invalidate_page_bitmap(p
);
1098 #if defined(TARGET_HAS_SMC) || 1
1100 #if defined(CONFIG_USER_ONLY)
1101 if (p
->flags
& PAGE_WRITE
) {
1106 /* force the host page as non writable (writes will have a
1107 page fault + mprotect overhead) */
1108 page_addr
&= qemu_host_page_mask
;
1110 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1111 addr
+= TARGET_PAGE_SIZE
) {
1113 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1117 p2
->flags
&= ~PAGE_WRITE
;
1118 page_get_flags(addr
);
1120 mprotect(g2h(page_addr
), qemu_host_page_size
,
1121 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1122 #ifdef DEBUG_TB_INVALIDATE
1123 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1128 /* if some code is already present, then the pages are already
1129 protected. So we handle the case where only the first TB is
1130 allocated in a physical page */
1131 if (!last_first_tb
) {
1132 tlb_protect_code(page_addr
);
1136 #endif /* TARGET_HAS_SMC */
1139 /* Allocate a new translation block. Flush the translation buffer if
1140 too many translation blocks or too much generated code. */
1141 TranslationBlock
*tb_alloc(target_ulong pc
)
1143 TranslationBlock
*tb
;
1145 if (nb_tbs
>= code_gen_max_blocks
||
1146 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1148 tb
= &tbs
[nb_tbs
++];
1154 void tb_free(TranslationBlock
*tb
)
1156 /* In practice this is mostly used for single use temporary TB
1157 Ignore the hard cases and just back up if this TB happens to
1158 be the last one generated. */
1159 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1160 code_gen_ptr
= tb
->tc_ptr
;
1165 /* add a new TB and link it to the physical page tables. phys_page2 is
1166 (-1) to indicate that only one page contains the TB. */
1167 void tb_link_phys(TranslationBlock
*tb
,
1168 target_ulong phys_pc
, target_ulong phys_page2
)
1171 TranslationBlock
**ptb
;
1173 /* Grab the mmap lock to stop another thread invalidating this TB
1174 before we are done. */
1176 /* add in the physical hash table */
1177 h
= tb_phys_hash_func(phys_pc
);
1178 ptb
= &tb_phys_hash
[h
];
1179 tb
->phys_hash_next
= *ptb
;
1182 /* add in the page list */
1183 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1184 if (phys_page2
!= -1)
1185 tb_alloc_page(tb
, 1, phys_page2
);
1187 tb
->page_addr
[1] = -1;
1189 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1190 tb
->jmp_next
[0] = NULL
;
1191 tb
->jmp_next
[1] = NULL
;
1193 /* init original jump addresses */
1194 if (tb
->tb_next_offset
[0] != 0xffff)
1195 tb_reset_jump(tb
, 0);
1196 if (tb
->tb_next_offset
[1] != 0xffff)
1197 tb_reset_jump(tb
, 1);
1199 #ifdef DEBUG_TB_CHECK
1205 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1206 tb[1].tc_ptr. Return NULL if not found */
1207 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1209 int m_min
, m_max
, m
;
1211 TranslationBlock
*tb
;
1215 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1216 tc_ptr
>= (unsigned long)code_gen_ptr
)
1218 /* binary search (cf Knuth) */
1221 while (m_min
<= m_max
) {
1222 m
= (m_min
+ m_max
) >> 1;
1224 v
= (unsigned long)tb
->tc_ptr
;
1227 else if (tc_ptr
< v
) {
1236 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1238 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1240 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1243 tb1
= tb
->jmp_next
[n
];
1245 /* find head of list */
1248 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1251 tb1
= tb1
->jmp_next
[n1
];
1253 /* we are now sure now that tb jumps to tb1 */
1256 /* remove tb from the jmp_first list */
1257 ptb
= &tb_next
->jmp_first
;
1261 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1262 if (n1
== n
&& tb1
== tb
)
1264 ptb
= &tb1
->jmp_next
[n1
];
1266 *ptb
= tb
->jmp_next
[n
];
1267 tb
->jmp_next
[n
] = NULL
;
1269 /* suppress the jump to next tb in generated code */
1270 tb_reset_jump(tb
, n
);
1272 /* suppress jumps in the tb on which we could have jumped */
1273 tb_reset_jump_recursive(tb_next
);
1277 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1279 tb_reset_jump_recursive2(tb
, 0);
1280 tb_reset_jump_recursive2(tb
, 1);
1283 #if defined(TARGET_HAS_ICE)
1284 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1286 target_phys_addr_t addr
;
1288 ram_addr_t ram_addr
;
1291 addr
= cpu_get_phys_page_debug(env
, pc
);
1292 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1294 pd
= IO_MEM_UNASSIGNED
;
1296 pd
= p
->phys_offset
;
1298 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1299 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1303 /* Add a watchpoint. */
1304 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1305 int flags
, CPUWatchpoint
**watchpoint
)
1307 target_ulong len_mask
= ~(len
- 1);
1310 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1311 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1312 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1313 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1316 wp
= qemu_malloc(sizeof(*wp
));
1319 wp
->len_mask
= len_mask
;
1322 /* keep all GDB-injected watchpoints in front */
1324 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1326 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1328 tlb_flush_page(env
, addr
);
1335 /* Remove a specific watchpoint. */
1336 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1339 target_ulong len_mask
= ~(len
- 1);
1342 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1343 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1344 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1345 cpu_watchpoint_remove_by_ref(env
, wp
);
1352 /* Remove a specific watchpoint by reference. */
1353 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1355 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1357 tlb_flush_page(env
, watchpoint
->vaddr
);
1359 qemu_free(watchpoint
);
1362 /* Remove all matching watchpoints. */
1363 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1365 CPUWatchpoint
*wp
, *next
;
1367 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1368 if (wp
->flags
& mask
)
1369 cpu_watchpoint_remove_by_ref(env
, wp
);
1373 /* Add a breakpoint. */
1374 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1375 CPUBreakpoint
**breakpoint
)
1377 #if defined(TARGET_HAS_ICE)
1380 bp
= qemu_malloc(sizeof(*bp
));
1385 /* keep all GDB-injected breakpoints in front */
1387 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1389 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1391 breakpoint_invalidate(env
, pc
);
1401 /* Remove a specific breakpoint. */
1402 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1404 #if defined(TARGET_HAS_ICE)
1407 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1408 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1409 cpu_breakpoint_remove_by_ref(env
, bp
);
1419 /* Remove a specific breakpoint by reference. */
1420 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1422 #if defined(TARGET_HAS_ICE)
1423 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1425 breakpoint_invalidate(env
, breakpoint
->pc
);
1427 qemu_free(breakpoint
);
1431 /* Remove all matching breakpoints. */
1432 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1434 #if defined(TARGET_HAS_ICE)
1435 CPUBreakpoint
*bp
, *next
;
1437 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1438 if (bp
->flags
& mask
)
1439 cpu_breakpoint_remove_by_ref(env
, bp
);
1444 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1445 CPU loop after each instruction */
1446 void cpu_single_step(CPUState
*env
, int enabled
)
1448 #if defined(TARGET_HAS_ICE)
1449 if (env
->singlestep_enabled
!= enabled
) {
1450 env
->singlestep_enabled
= enabled
;
1451 /* must flush all the translated code to avoid inconsistancies */
1452 /* XXX: only flush what is necessary */
1458 /* enable or disable low levels log */
1459 void cpu_set_log(int log_flags
)
1461 loglevel
= log_flags
;
1462 if (loglevel
&& !logfile
) {
1463 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1465 perror(logfilename
);
1468 #if !defined(CONFIG_SOFTMMU)
1469 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1471 static char logfile_buf
[4096];
1472 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1475 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1479 if (!loglevel
&& logfile
) {
1485 void cpu_set_log_filename(const char *filename
)
1487 logfilename
= strdup(filename
);
1492 cpu_set_log(loglevel
);
1495 /* mask must never be zero, except for A20 change call */
1496 void cpu_interrupt(CPUState
*env
, int mask
)
1498 #if !defined(USE_NPTL)
1499 TranslationBlock
*tb
;
1500 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1504 old_mask
= env
->interrupt_request
;
1505 /* FIXME: This is probably not threadsafe. A different thread could
1506 be in the middle of a read-modify-write operation. */
1507 env
->interrupt_request
|= mask
;
1508 #if defined(USE_NPTL)
1509 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1510 problem and hope the cpu will stop of its own accord. For userspace
1511 emulation this often isn't actually as bad as it sounds. Often
1512 signals are used primarily to interrupt blocking syscalls. */
1515 env
->icount_decr
.u16
.high
= 0xffff;
1516 #ifndef CONFIG_USER_ONLY
1517 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1518 an async event happened and we need to process it. */
1520 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1521 cpu_abort(env
, "Raised interrupt while not in I/O function");
1525 tb
= env
->current_tb
;
1526 /* if the cpu is currently executing code, we must unlink it and
1527 all the potentially executing TB */
1528 if (tb
&& !testandset(&interrupt_lock
)) {
1529 env
->current_tb
= NULL
;
1530 tb_reset_jump_recursive(tb
);
1531 resetlock(&interrupt_lock
);
1537 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1539 env
->interrupt_request
&= ~mask
;
1542 const CPULogItem cpu_log_items
[] = {
1543 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1544 "show generated host assembly code for each compiled TB" },
1545 { CPU_LOG_TB_IN_ASM
, "in_asm",
1546 "show target assembly code for each compiled TB" },
1547 { CPU_LOG_TB_OP
, "op",
1548 "show micro ops for each compiled TB" },
1549 { CPU_LOG_TB_OP_OPT
, "op_opt",
1552 "before eflags optimization and "
1554 "after liveness analysis" },
1555 { CPU_LOG_INT
, "int",
1556 "show interrupts/exceptions in short format" },
1557 { CPU_LOG_EXEC
, "exec",
1558 "show trace before each executed TB (lots of logs)" },
1559 { CPU_LOG_TB_CPU
, "cpu",
1560 "show CPU state before block translation" },
1562 { CPU_LOG_PCALL
, "pcall",
1563 "show protected mode far calls/returns/exceptions" },
1564 { CPU_LOG_RESET
, "cpu_reset",
1565 "show CPU state before CPU resets" },
1568 { CPU_LOG_IOPORT
, "ioport",
1569 "show all i/o ports accesses" },
1574 static int cmp1(const char *s1
, int n
, const char *s2
)
1576 if (strlen(s2
) != n
)
1578 return memcmp(s1
, s2
, n
) == 0;
1581 /* takes a comma separated list of log masks. Return 0 if error. */
1582 int cpu_str_to_log_mask(const char *str
)
1584 const CPULogItem
*item
;
1591 p1
= strchr(p
, ',');
1594 if(cmp1(p
,p1
-p
,"all")) {
1595 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1599 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1600 if (cmp1(p
, p1
- p
, item
->name
))
1614 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1621 fprintf(stderr
, "qemu: fatal: ");
1622 vfprintf(stderr
, fmt
, ap
);
1623 fprintf(stderr
, "\n");
1625 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1627 cpu_dump_state(env
, stderr
, fprintf
, 0);
1629 if (qemu_log_enabled()) {
1630 qemu_log("qemu: fatal: ");
1631 qemu_log_vprintf(fmt
, ap2
);
1634 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1636 log_cpu_state(env
, 0);
1646 CPUState
*cpu_copy(CPUState
*env
)
1648 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1649 CPUState
*next_cpu
= new_env
->next_cpu
;
1650 int cpu_index
= new_env
->cpu_index
;
1651 #if defined(TARGET_HAS_ICE)
1656 memcpy(new_env
, env
, sizeof(CPUState
));
1658 /* Preserve chaining and index. */
1659 new_env
->next_cpu
= next_cpu
;
1660 new_env
->cpu_index
= cpu_index
;
1662 /* Clone all break/watchpoints.
1663 Note: Once we support ptrace with hw-debug register access, make sure
1664 BP_CPU break/watchpoints are handled correctly on clone. */
1665 TAILQ_INIT(&env
->breakpoints
);
1666 TAILQ_INIT(&env
->watchpoints
);
1667 #if defined(TARGET_HAS_ICE)
1668 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1669 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1671 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1672 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1680 #if !defined(CONFIG_USER_ONLY)
1682 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1686 /* Discard jump cache entries for any tb which might potentially
1687 overlap the flushed page. */
1688 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1689 memset (&env
->tb_jmp_cache
[i
], 0,
1690 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1692 i
= tb_jmp_cache_hash_page(addr
);
1693 memset (&env
->tb_jmp_cache
[i
], 0,
1694 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1697 /* NOTE: if flush_global is true, also flush global entries (not
1699 void tlb_flush(CPUState
*env
, int flush_global
)
1703 #if defined(DEBUG_TLB)
1704 printf("tlb_flush:\n");
1706 /* must reset current TB so that interrupts cannot modify the
1707 links while we are modifying them */
1708 env
->current_tb
= NULL
;
1710 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1711 env
->tlb_table
[0][i
].addr_read
= -1;
1712 env
->tlb_table
[0][i
].addr_write
= -1;
1713 env
->tlb_table
[0][i
].addr_code
= -1;
1714 env
->tlb_table
[1][i
].addr_read
= -1;
1715 env
->tlb_table
[1][i
].addr_write
= -1;
1716 env
->tlb_table
[1][i
].addr_code
= -1;
1717 #if (NB_MMU_MODES >= 3)
1718 env
->tlb_table
[2][i
].addr_read
= -1;
1719 env
->tlb_table
[2][i
].addr_write
= -1;
1720 env
->tlb_table
[2][i
].addr_code
= -1;
1721 #if (NB_MMU_MODES == 4)
1722 env
->tlb_table
[3][i
].addr_read
= -1;
1723 env
->tlb_table
[3][i
].addr_write
= -1;
1724 env
->tlb_table
[3][i
].addr_code
= -1;
1729 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1732 if (env
->kqemu_enabled
) {
1733 kqemu_flush(env
, flush_global
);
1739 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1741 if (addr
== (tlb_entry
->addr_read
&
1742 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1743 addr
== (tlb_entry
->addr_write
&
1744 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1745 addr
== (tlb_entry
->addr_code
&
1746 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1747 tlb_entry
->addr_read
= -1;
1748 tlb_entry
->addr_write
= -1;
1749 tlb_entry
->addr_code
= -1;
1753 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1757 #if defined(DEBUG_TLB)
1758 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1760 /* must reset current TB so that interrupts cannot modify the
1761 links while we are modifying them */
1762 env
->current_tb
= NULL
;
1764 addr
&= TARGET_PAGE_MASK
;
1765 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1766 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1767 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1768 #if (NB_MMU_MODES >= 3)
1769 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1770 #if (NB_MMU_MODES == 4)
1771 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1775 tlb_flush_jmp_cache(env
, addr
);
1778 if (env
->kqemu_enabled
) {
1779 kqemu_flush_page(env
, addr
);
1784 /* update the TLBs so that writes to code in the virtual page 'addr'
1786 static void tlb_protect_code(ram_addr_t ram_addr
)
1788 cpu_physical_memory_reset_dirty(ram_addr
,
1789 ram_addr
+ TARGET_PAGE_SIZE
,
1793 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1794 tested for self modifying code */
1795 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1798 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1801 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1802 unsigned long start
, unsigned long length
)
1805 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1806 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1807 if ((addr
- start
) < length
) {
1808 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1813 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1817 unsigned long length
, start1
;
1821 start
&= TARGET_PAGE_MASK
;
1822 end
= TARGET_PAGE_ALIGN(end
);
1824 length
= end
- start
;
1827 len
= length
>> TARGET_PAGE_BITS
;
1829 /* XXX: should not depend on cpu context */
1831 if (env
->kqemu_enabled
) {
1834 for(i
= 0; i
< len
; i
++) {
1835 kqemu_set_notdirty(env
, addr
);
1836 addr
+= TARGET_PAGE_SIZE
;
1840 mask
= ~dirty_flags
;
1841 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1842 for(i
= 0; i
< len
; i
++)
1845 /* we modify the TLB cache so that the dirty bit will be set again
1846 when accessing the range */
1847 start1
= start
+ (unsigned long)phys_ram_base
;
1848 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1849 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1850 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1851 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1852 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1853 #if (NB_MMU_MODES >= 3)
1854 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1855 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1856 #if (NB_MMU_MODES == 4)
1857 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1858 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1864 int cpu_physical_memory_set_dirty_tracking(int enable
)
1866 in_migration
= enable
;
1870 int cpu_physical_memory_get_dirty_tracking(void)
1872 return in_migration
;
1875 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1878 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1881 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1883 ram_addr_t ram_addr
;
1885 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1886 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1887 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1888 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1889 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1894 /* update the TLB according to the current state of the dirty bits */
1895 void cpu_tlb_update_dirty(CPUState
*env
)
1898 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1899 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1900 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1901 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1902 #if (NB_MMU_MODES >= 3)
1903 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1904 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1905 #if (NB_MMU_MODES == 4)
1906 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1907 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1912 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1914 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1915 tlb_entry
->addr_write
= vaddr
;
1918 /* update the TLB corresponding to virtual page vaddr
1919 so that it is no longer dirty */
1920 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1924 vaddr
&= TARGET_PAGE_MASK
;
1925 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1926 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1927 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1928 #if (NB_MMU_MODES >= 3)
1929 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1930 #if (NB_MMU_MODES == 4)
1931 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1936 /* add a new TLB entry. At most one entry for a given virtual address
1937 is permitted. Return 0 if OK or 2 if the page could not be mapped
1938 (can only happen in non SOFTMMU mode for I/O pages or pages
1939 conflicting with the host address space). */
1940 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1941 target_phys_addr_t paddr
, int prot
,
1942 int mmu_idx
, int is_softmmu
)
1947 target_ulong address
;
1948 target_ulong code_address
;
1949 target_phys_addr_t addend
;
1953 target_phys_addr_t iotlb
;
1955 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1957 pd
= IO_MEM_UNASSIGNED
;
1959 pd
= p
->phys_offset
;
1961 #if defined(DEBUG_TLB)
1962 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1963 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1968 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1969 /* IO memory case (romd handled later) */
1970 address
|= TLB_MMIO
;
1972 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1973 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1975 iotlb
= pd
& TARGET_PAGE_MASK
;
1976 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1977 iotlb
|= IO_MEM_NOTDIRTY
;
1979 iotlb
|= IO_MEM_ROM
;
1981 /* IO handlers are currently passed a phsical address.
1982 It would be nice to pass an offset from the base address
1983 of that region. This would avoid having to special case RAM,
1984 and avoid full address decoding in every device.
1985 We can't use the high bits of pd for this because
1986 IO_MEM_ROMD uses these as a ram address. */
1987 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
1989 iotlb
+= p
->region_offset
;
1995 code_address
= address
;
1996 /* Make accesses to pages with watchpoints go via the
1997 watchpoint trap routines. */
1998 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1999 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2000 iotlb
= io_mem_watch
+ paddr
;
2001 /* TODO: The memory case can be optimized by not trapping
2002 reads of pages with a write breakpoint. */
2003 address
|= TLB_MMIO
;
2007 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2008 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2009 te
= &env
->tlb_table
[mmu_idx
][index
];
2010 te
->addend
= addend
- vaddr
;
2011 if (prot
& PAGE_READ
) {
2012 te
->addr_read
= address
;
2017 if (prot
& PAGE_EXEC
) {
2018 te
->addr_code
= code_address
;
2022 if (prot
& PAGE_WRITE
) {
2023 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2024 (pd
& IO_MEM_ROMD
)) {
2025 /* Write access calls the I/O callback. */
2026 te
->addr_write
= address
| TLB_MMIO
;
2027 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2028 !cpu_physical_memory_is_dirty(pd
)) {
2029 te
->addr_write
= address
| TLB_NOTDIRTY
;
2031 te
->addr_write
= address
;
2034 te
->addr_write
= -1;
2041 void tlb_flush(CPUState
*env
, int flush_global
)
2045 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2049 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2050 target_phys_addr_t paddr
, int prot
,
2051 int mmu_idx
, int is_softmmu
)
2056 /* dump memory mappings */
2057 void page_dump(FILE *f
)
2059 unsigned long start
, end
;
2060 int i
, j
, prot
, prot1
;
2063 fprintf(f
, "%-8s %-8s %-8s %s\n",
2064 "start", "end", "size", "prot");
2068 for(i
= 0; i
<= L1_SIZE
; i
++) {
2073 for(j
= 0;j
< L2_SIZE
; j
++) {
2078 if (prot1
!= prot
) {
2079 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2081 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2082 start
, end
, end
- start
,
2083 prot
& PAGE_READ
? 'r' : '-',
2084 prot
& PAGE_WRITE
? 'w' : '-',
2085 prot
& PAGE_EXEC
? 'x' : '-');
2099 int page_get_flags(target_ulong address
)
2103 p
= page_find(address
>> TARGET_PAGE_BITS
);
2109 /* modify the flags of a page and invalidate the code if
2110 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2111 depending on PAGE_WRITE */
2112 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2117 /* mmap_lock should already be held. */
2118 start
= start
& TARGET_PAGE_MASK
;
2119 end
= TARGET_PAGE_ALIGN(end
);
2120 if (flags
& PAGE_WRITE
)
2121 flags
|= PAGE_WRITE_ORG
;
2122 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2123 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2124 /* We may be called for host regions that are outside guest
2128 /* if the write protection is set, then we invalidate the code
2130 if (!(p
->flags
& PAGE_WRITE
) &&
2131 (flags
& PAGE_WRITE
) &&
2133 tb_invalidate_phys_page(addr
, 0, NULL
);
2139 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2145 if (start
+ len
< start
)
2146 /* we've wrapped around */
2149 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2150 start
= start
& TARGET_PAGE_MASK
;
2152 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2153 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2156 if( !(p
->flags
& PAGE_VALID
) )
2159 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2161 if (flags
& PAGE_WRITE
) {
2162 if (!(p
->flags
& PAGE_WRITE_ORG
))
2164 /* unprotect the page if it was put read-only because it
2165 contains translated code */
2166 if (!(p
->flags
& PAGE_WRITE
)) {
2167 if (!page_unprotect(addr
, 0, NULL
))
2176 /* called from signal handler: invalidate the code and unprotect the
2177 page. Return TRUE if the fault was succesfully handled. */
2178 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2180 unsigned int page_index
, prot
, pindex
;
2182 target_ulong host_start
, host_end
, addr
;
2184 /* Technically this isn't safe inside a signal handler. However we
2185 know this only ever happens in a synchronous SEGV handler, so in
2186 practice it seems to be ok. */
2189 host_start
= address
& qemu_host_page_mask
;
2190 page_index
= host_start
>> TARGET_PAGE_BITS
;
2191 p1
= page_find(page_index
);
2196 host_end
= host_start
+ qemu_host_page_size
;
2199 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2203 /* if the page was really writable, then we change its
2204 protection back to writable */
2205 if (prot
& PAGE_WRITE_ORG
) {
2206 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2207 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2208 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2209 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2210 p1
[pindex
].flags
|= PAGE_WRITE
;
2211 /* and since the content will be modified, we must invalidate
2212 the corresponding translated code. */
2213 tb_invalidate_phys_page(address
, pc
, puc
);
2214 #ifdef DEBUG_TB_CHECK
2215 tb_invalidate_check(address
);
2225 static inline void tlb_set_dirty(CPUState
*env
,
2226 unsigned long addr
, target_ulong vaddr
)
2229 #endif /* defined(CONFIG_USER_ONLY) */
2231 #if !defined(CONFIG_USER_ONLY)
2233 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2234 ram_addr_t memory
, ram_addr_t region_offset
);
2235 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2236 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2237 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2240 if (addr > start_addr) \
2243 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2244 if (start_addr2 > 0) \
2248 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2249 end_addr2 = TARGET_PAGE_SIZE - 1; \
2251 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2252 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2257 /* register physical memory. 'size' must be a multiple of the target
2258 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2259 io memory page. The address used when calling the IO function is
2260 the offset from the start of the region, plus region_offset. Both
2261 start_region and regon_offset are rounded down to a page boundary
2262 before calculating this offset. This should not be a problem unless
2263 the low bits of start_addr and region_offset differ. */
2264 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2266 ram_addr_t phys_offset
,
2267 ram_addr_t region_offset
)
2269 target_phys_addr_t addr
, end_addr
;
2272 ram_addr_t orig_size
= size
;
2276 /* XXX: should not depend on cpu context */
2278 if (env
->kqemu_enabled
) {
2279 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2283 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2285 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2286 region_offset
= start_addr
;
2288 region_offset
&= TARGET_PAGE_MASK
;
2289 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2290 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2291 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2292 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2293 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2294 ram_addr_t orig_memory
= p
->phys_offset
;
2295 target_phys_addr_t start_addr2
, end_addr2
;
2296 int need_subpage
= 0;
2298 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2300 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2301 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2302 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2303 &p
->phys_offset
, orig_memory
,
2306 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2309 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2311 p
->region_offset
= 0;
2313 p
->phys_offset
= phys_offset
;
2314 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2315 (phys_offset
& IO_MEM_ROMD
))
2316 phys_offset
+= TARGET_PAGE_SIZE
;
2319 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2320 p
->phys_offset
= phys_offset
;
2321 p
->region_offset
= region_offset
;
2322 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2323 (phys_offset
& IO_MEM_ROMD
)) {
2324 phys_offset
+= TARGET_PAGE_SIZE
;
2326 target_phys_addr_t start_addr2
, end_addr2
;
2327 int need_subpage
= 0;
2329 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2330 end_addr2
, need_subpage
);
2332 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2333 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2334 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2335 addr
& TARGET_PAGE_MASK
);
2336 subpage_register(subpage
, start_addr2
, end_addr2
,
2337 phys_offset
, region_offset
);
2338 p
->region_offset
= 0;
2342 region_offset
+= TARGET_PAGE_SIZE
;
2345 /* since each CPU stores ram addresses in its TLB cache, we must
2346 reset the modified entries */
2348 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2353 /* XXX: temporary until new memory mapping API */
2354 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2358 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2360 return IO_MEM_UNASSIGNED
;
2361 return p
->phys_offset
;
2364 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2367 kvm_coalesce_mmio_region(addr
, size
);
2370 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2373 kvm_uncoalesce_mmio_region(addr
, size
);
2376 /* XXX: better than nothing */
2377 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2380 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2381 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2382 (uint64_t)size
, (uint64_t)phys_ram_size
);
2385 addr
= phys_ram_alloc_offset
;
2386 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2390 void qemu_ram_free(ram_addr_t addr
)
2394 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2396 #ifdef DEBUG_UNASSIGNED
2397 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2399 #if defined(TARGET_SPARC)
2400 do_unassigned_access(addr
, 0, 0, 0, 1);
2405 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2407 #ifdef DEBUG_UNASSIGNED
2408 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2410 #if defined(TARGET_SPARC)
2411 do_unassigned_access(addr
, 0, 0, 0, 2);
2416 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2418 #ifdef DEBUG_UNASSIGNED
2419 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2421 #if defined(TARGET_SPARC)
2422 do_unassigned_access(addr
, 0, 0, 0, 4);
2427 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2429 #ifdef DEBUG_UNASSIGNED
2430 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2432 #if defined(TARGET_SPARC)
2433 do_unassigned_access(addr
, 1, 0, 0, 1);
2437 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2439 #ifdef DEBUG_UNASSIGNED
2440 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2442 #if defined(TARGET_SPARC)
2443 do_unassigned_access(addr
, 1, 0, 0, 2);
2447 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2449 #ifdef DEBUG_UNASSIGNED
2450 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2452 #if defined(TARGET_SPARC)
2453 do_unassigned_access(addr
, 1, 0, 0, 4);
2457 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2458 unassigned_mem_readb
,
2459 unassigned_mem_readw
,
2460 unassigned_mem_readl
,
2463 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2464 unassigned_mem_writeb
,
2465 unassigned_mem_writew
,
2466 unassigned_mem_writel
,
2469 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2473 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2474 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2475 #if !defined(CONFIG_USER_ONLY)
2476 tb_invalidate_phys_page_fast(ram_addr
, 1);
2477 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2480 stb_p(phys_ram_base
+ ram_addr
, val
);
2482 if (cpu_single_env
->kqemu_enabled
&&
2483 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2484 kqemu_modify_page(cpu_single_env
, ram_addr
);
2486 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2487 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2488 /* we remove the notdirty callback only if the code has been
2490 if (dirty_flags
== 0xff)
2491 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2494 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2498 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2499 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2500 #if !defined(CONFIG_USER_ONLY)
2501 tb_invalidate_phys_page_fast(ram_addr
, 2);
2502 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2505 stw_p(phys_ram_base
+ ram_addr
, val
);
2507 if (cpu_single_env
->kqemu_enabled
&&
2508 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2509 kqemu_modify_page(cpu_single_env
, ram_addr
);
2511 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2512 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2513 /* we remove the notdirty callback only if the code has been
2515 if (dirty_flags
== 0xff)
2516 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2519 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2523 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2524 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2525 #if !defined(CONFIG_USER_ONLY)
2526 tb_invalidate_phys_page_fast(ram_addr
, 4);
2527 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2530 stl_p(phys_ram_base
+ ram_addr
, val
);
2532 if (cpu_single_env
->kqemu_enabled
&&
2533 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2534 kqemu_modify_page(cpu_single_env
, ram_addr
);
2536 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2537 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2538 /* we remove the notdirty callback only if the code has been
2540 if (dirty_flags
== 0xff)
2541 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2544 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2545 NULL
, /* never used */
2546 NULL
, /* never used */
2547 NULL
, /* never used */
2550 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2551 notdirty_mem_writeb
,
2552 notdirty_mem_writew
,
2553 notdirty_mem_writel
,
2556 /* Generate a debug exception if a watchpoint has been hit. */
2557 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2559 CPUState
*env
= cpu_single_env
;
2560 target_ulong pc
, cs_base
;
2561 TranslationBlock
*tb
;
2566 if (env
->watchpoint_hit
) {
2567 /* We re-entered the check after replacing the TB. Now raise
2568 * the debug interrupt so that is will trigger after the
2569 * current instruction. */
2570 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2573 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2574 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2575 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2576 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2577 wp
->flags
|= BP_WATCHPOINT_HIT
;
2578 if (!env
->watchpoint_hit
) {
2579 env
->watchpoint_hit
= wp
;
2580 tb
= tb_find_pc(env
->mem_io_pc
);
2582 cpu_abort(env
, "check_watchpoint: could not find TB for "
2583 "pc=%p", (void *)env
->mem_io_pc
);
2585 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2586 tb_phys_invalidate(tb
, -1);
2587 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2588 env
->exception_index
= EXCP_DEBUG
;
2590 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2591 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2593 cpu_resume_from_signal(env
, NULL
);
2596 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2601 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2602 so these check for a hit then pass through to the normal out-of-line
2604 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2606 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2607 return ldub_phys(addr
);
2610 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2612 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2613 return lduw_phys(addr
);
2616 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2618 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2619 return ldl_phys(addr
);
2622 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2625 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2626 stb_phys(addr
, val
);
2629 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2632 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2633 stw_phys(addr
, val
);
2636 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2639 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2640 stl_phys(addr
, val
);
2643 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2649 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2655 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2661 idx
= SUBPAGE_IDX(addr
);
2662 #if defined(DEBUG_SUBPAGE)
2663 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2664 mmio
, len
, addr
, idx
);
2666 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2667 addr
+ mmio
->region_offset
[idx
][0][len
]);
2672 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2673 uint32_t value
, unsigned int len
)
2677 idx
= SUBPAGE_IDX(addr
);
2678 #if defined(DEBUG_SUBPAGE)
2679 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2680 mmio
, len
, addr
, idx
, value
);
2682 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2683 addr
+ mmio
->region_offset
[idx
][1][len
],
2687 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2689 #if defined(DEBUG_SUBPAGE)
2690 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2693 return subpage_readlen(opaque
, addr
, 0);
2696 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2699 #if defined(DEBUG_SUBPAGE)
2700 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2702 subpage_writelen(opaque
, addr
, value
, 0);
2705 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2707 #if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2711 return subpage_readlen(opaque
, addr
, 1);
2714 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2717 #if defined(DEBUG_SUBPAGE)
2718 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2720 subpage_writelen(opaque
, addr
, value
, 1);
2723 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2725 #if defined(DEBUG_SUBPAGE)
2726 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2729 return subpage_readlen(opaque
, addr
, 2);
2732 static void subpage_writel (void *opaque
,
2733 target_phys_addr_t addr
, uint32_t value
)
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2738 subpage_writelen(opaque
, addr
, value
, 2);
2741 static CPUReadMemoryFunc
*subpage_read
[] = {
2747 static CPUWriteMemoryFunc
*subpage_write
[] = {
2753 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2754 ram_addr_t memory
, ram_addr_t region_offset
)
2759 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2761 idx
= SUBPAGE_IDX(start
);
2762 eidx
= SUBPAGE_IDX(end
);
2763 #if defined(DEBUG_SUBPAGE)
2764 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2765 mmio
, start
, end
, idx
, eidx
, memory
);
2767 memory
>>= IO_MEM_SHIFT
;
2768 for (; idx
<= eidx
; idx
++) {
2769 for (i
= 0; i
< 4; i
++) {
2770 if (io_mem_read
[memory
][i
]) {
2771 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2772 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2773 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2775 if (io_mem_write
[memory
][i
]) {
2776 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2777 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2778 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2786 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2787 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2792 mmio
= qemu_mallocz(sizeof(subpage_t
));
2795 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2796 #if defined(DEBUG_SUBPAGE)
2797 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2798 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2800 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2801 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2807 static int get_free_io_mem_idx(void)
2811 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2812 if (!io_mem_used
[i
]) {
2820 static void io_mem_init(void)
2824 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2825 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2826 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2830 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2831 watch_mem_write
, NULL
);
2832 /* alloc dirty bits array */
2833 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2834 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2837 /* mem_read and mem_write are arrays of functions containing the
2838 function to access byte (index 0), word (index 1) and dword (index
2839 2). Functions can be omitted with a NULL function pointer. The
2840 registered functions may be modified dynamically later.
2841 If io_index is non zero, the corresponding io zone is
2842 modified. If it is zero, a new io zone is allocated. The return
2843 value can be used with cpu_register_physical_memory(). (-1) is
2844 returned if error. */
2845 int cpu_register_io_memory(int io_index
,
2846 CPUReadMemoryFunc
**mem_read
,
2847 CPUWriteMemoryFunc
**mem_write
,
2850 int i
, subwidth
= 0;
2852 if (io_index
<= 0) {
2853 io_index
= get_free_io_mem_idx();
2857 if (io_index
>= IO_MEM_NB_ENTRIES
)
2861 for(i
= 0;i
< 3; i
++) {
2862 if (!mem_read
[i
] || !mem_write
[i
])
2863 subwidth
= IO_MEM_SUBWIDTH
;
2864 io_mem_read
[io_index
][i
] = mem_read
[i
];
2865 io_mem_write
[io_index
][i
] = mem_write
[i
];
2867 io_mem_opaque
[io_index
] = opaque
;
2868 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2871 void cpu_unregister_io_memory(int io_table_address
)
2874 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2876 for (i
=0;i
< 3; i
++) {
2877 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2878 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2880 io_mem_opaque
[io_index
] = NULL
;
2881 io_mem_used
[io_index
] = 0;
2884 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2886 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2889 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2891 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2894 #endif /* !defined(CONFIG_USER_ONLY) */
2896 /* physical memory access (slow version, mainly for debug) */
2897 #if defined(CONFIG_USER_ONLY)
2898 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2899 int len
, int is_write
)
2906 page
= addr
& TARGET_PAGE_MASK
;
2907 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2910 flags
= page_get_flags(page
);
2911 if (!(flags
& PAGE_VALID
))
2914 if (!(flags
& PAGE_WRITE
))
2916 /* XXX: this code should not depend on lock_user */
2917 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2918 /* FIXME - should this return an error rather than just fail? */
2921 unlock_user(p
, addr
, l
);
2923 if (!(flags
& PAGE_READ
))
2925 /* XXX: this code should not depend on lock_user */
2926 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2927 /* FIXME - should this return an error rather than just fail? */
2930 unlock_user(p
, addr
, 0);
2939 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2940 int len
, int is_write
)
2945 target_phys_addr_t page
;
2950 page
= addr
& TARGET_PAGE_MASK
;
2951 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2954 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2956 pd
= IO_MEM_UNASSIGNED
;
2958 pd
= p
->phys_offset
;
2962 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2963 target_phys_addr_t addr1
= addr
;
2964 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2966 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2967 /* XXX: could force cpu_single_env to NULL to avoid
2969 if (l
>= 4 && ((addr1
& 3) == 0)) {
2970 /* 32 bit write access */
2972 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
2974 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
2975 /* 16 bit write access */
2977 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
2980 /* 8 bit write access */
2982 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
2986 unsigned long addr1
;
2987 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2989 ptr
= phys_ram_base
+ addr1
;
2990 memcpy(ptr
, buf
, l
);
2991 if (!cpu_physical_memory_is_dirty(addr1
)) {
2992 /* invalidate code */
2993 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2995 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2996 (0xff & ~CODE_DIRTY_FLAG
);
3000 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3001 !(pd
& IO_MEM_ROMD
)) {
3002 target_phys_addr_t addr1
= addr
;
3004 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3006 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3007 if (l
>= 4 && ((addr1
& 3) == 0)) {
3008 /* 32 bit read access */
3009 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3012 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3013 /* 16 bit read access */
3014 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3018 /* 8 bit read access */
3019 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3025 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3026 (addr
& ~TARGET_PAGE_MASK
);
3027 memcpy(buf
, ptr
, l
);
3036 /* used for ROM loading : can write in RAM and ROM */
3037 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3038 const uint8_t *buf
, int len
)
3042 target_phys_addr_t page
;
3047 page
= addr
& TARGET_PAGE_MASK
;
3048 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3051 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3053 pd
= IO_MEM_UNASSIGNED
;
3055 pd
= p
->phys_offset
;
3058 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3059 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3060 !(pd
& IO_MEM_ROMD
)) {
3063 unsigned long addr1
;
3064 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3066 ptr
= phys_ram_base
+ addr1
;
3067 memcpy(ptr
, buf
, l
);
3077 target_phys_addr_t addr
;
3078 target_phys_addr_t len
;
3081 static BounceBuffer bounce
;
3083 typedef struct MapClient
{
3085 void (*callback
)(void *opaque
);
3086 LIST_ENTRY(MapClient
) link
;
3089 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3090 = LIST_HEAD_INITIALIZER(map_client_list
);
3092 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3094 MapClient
*client
= qemu_malloc(sizeof(*client
));
3096 client
->opaque
= opaque
;
3097 client
->callback
= callback
;
3098 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3102 void cpu_unregister_map_client(void *_client
)
3104 MapClient
*client
= (MapClient
*)_client
;
3106 LIST_REMOVE(client
, link
);
3109 static void cpu_notify_map_clients(void)
3113 while (!LIST_EMPTY(&map_client_list
)) {
3114 client
= LIST_FIRST(&map_client_list
);
3115 client
->callback(client
->opaque
);
3116 LIST_REMOVE(client
, link
);
3120 /* Map a physical memory region into a host virtual address.
3121 * May map a subset of the requested range, given by and returned in *plen.
3122 * May return NULL if resources needed to perform the mapping are exhausted.
3123 * Use only for reads OR writes - not for read-modify-write operations.
3124 * Use cpu_register_map_client() to know when retrying the map operation is
3125 * likely to succeed.
3127 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3128 target_phys_addr_t
*plen
,
3131 target_phys_addr_t len
= *plen
;
3132 target_phys_addr_t done
= 0;
3134 uint8_t *ret
= NULL
;
3136 target_phys_addr_t page
;
3139 unsigned long addr1
;
3142 page
= addr
& TARGET_PAGE_MASK
;
3143 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3146 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3148 pd
= IO_MEM_UNASSIGNED
;
3150 pd
= p
->phys_offset
;
3153 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3154 if (done
|| bounce
.buffer
) {
3157 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3161 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3163 ptr
= bounce
.buffer
;
3165 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3166 ptr
= phys_ram_base
+ addr1
;
3170 } else if (ret
+ done
!= ptr
) {
3182 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3183 * Will also mark the memory as dirty if is_write == 1. access_len gives
3184 * the amount of memory that was actually read or written by the caller.
3186 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3187 int is_write
, target_phys_addr_t access_len
)
3189 if (buffer
!= bounce
.buffer
) {
3191 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3192 while (access_len
) {
3194 l
= TARGET_PAGE_SIZE
;
3197 if (!cpu_physical_memory_is_dirty(addr1
)) {
3198 /* invalidate code */
3199 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3201 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3202 (0xff & ~CODE_DIRTY_FLAG
);
3211 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3213 qemu_free(bounce
.buffer
);
3214 bounce
.buffer
= NULL
;
3215 cpu_notify_map_clients();
3218 /* warning: addr must be aligned */
3219 uint32_t ldl_phys(target_phys_addr_t addr
)
3227 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3229 pd
= IO_MEM_UNASSIGNED
;
3231 pd
= p
->phys_offset
;
3234 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3235 !(pd
& IO_MEM_ROMD
)) {
3237 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3239 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3240 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3243 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3244 (addr
& ~TARGET_PAGE_MASK
);
3250 /* warning: addr must be aligned */
3251 uint64_t ldq_phys(target_phys_addr_t addr
)
3259 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3261 pd
= IO_MEM_UNASSIGNED
;
3263 pd
= p
->phys_offset
;
3266 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3267 !(pd
& IO_MEM_ROMD
)) {
3269 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3271 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3272 #ifdef TARGET_WORDS_BIGENDIAN
3273 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3274 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3276 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3277 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3281 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3282 (addr
& ~TARGET_PAGE_MASK
);
3289 uint32_t ldub_phys(target_phys_addr_t addr
)
3292 cpu_physical_memory_read(addr
, &val
, 1);
3297 uint32_t lduw_phys(target_phys_addr_t addr
)
3300 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3301 return tswap16(val
);
3304 /* warning: addr must be aligned. The ram page is not masked as dirty
3305 and the code inside is not invalidated. It is useful if the dirty
3306 bits are used to track modified PTEs */
3307 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3314 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3316 pd
= IO_MEM_UNASSIGNED
;
3318 pd
= p
->phys_offset
;
3321 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3322 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3324 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3325 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3327 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3328 ptr
= phys_ram_base
+ addr1
;
3331 if (unlikely(in_migration
)) {
3332 if (!cpu_physical_memory_is_dirty(addr1
)) {
3333 /* invalidate code */
3334 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3336 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3337 (0xff & ~CODE_DIRTY_FLAG
);
3343 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3350 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3352 pd
= IO_MEM_UNASSIGNED
;
3354 pd
= p
->phys_offset
;
3357 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3358 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3360 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3361 #ifdef TARGET_WORDS_BIGENDIAN
3362 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3363 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3365 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3366 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3369 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3370 (addr
& ~TARGET_PAGE_MASK
);
3375 /* warning: addr must be aligned */
3376 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3383 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3385 pd
= IO_MEM_UNASSIGNED
;
3387 pd
= p
->phys_offset
;
3390 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3391 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3393 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3394 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3396 unsigned long addr1
;
3397 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3399 ptr
= phys_ram_base
+ addr1
;
3401 if (!cpu_physical_memory_is_dirty(addr1
)) {
3402 /* invalidate code */
3403 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3405 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3406 (0xff & ~CODE_DIRTY_FLAG
);
3412 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3415 cpu_physical_memory_write(addr
, &v
, 1);
3419 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3421 uint16_t v
= tswap16(val
);
3422 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3426 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3429 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3434 /* virtual memory access for debug */
3435 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3436 uint8_t *buf
, int len
, int is_write
)
3439 target_phys_addr_t phys_addr
;
3443 page
= addr
& TARGET_PAGE_MASK
;
3444 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3445 /* if no physical page mapped, return an error */
3446 if (phys_addr
== -1)
3448 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3451 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3460 /* in deterministic execution mode, instructions doing device I/Os
3461 must be at the end of the TB */
3462 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3464 TranslationBlock
*tb
;
3466 target_ulong pc
, cs_base
;
3469 tb
= tb_find_pc((unsigned long)retaddr
);
3471 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3474 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3475 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3476 /* Calculate how many instructions had been executed before the fault
3478 n
= n
- env
->icount_decr
.u16
.low
;
3479 /* Generate a new TB ending on the I/O insn. */
3481 /* On MIPS and SH, delay slot instructions can only be restarted if
3482 they were already the first instruction in the TB. If this is not
3483 the first instruction in a TB then re-execute the preceding
3485 #if defined(TARGET_MIPS)
3486 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3487 env
->active_tc
.PC
-= 4;
3488 env
->icount_decr
.u16
.low
++;
3489 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3491 #elif defined(TARGET_SH4)
3492 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3495 env
->icount_decr
.u16
.low
++;
3496 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3499 /* This should never happen. */
3500 if (n
> CF_COUNT_MASK
)
3501 cpu_abort(env
, "TB too big during recompile");
3503 cflags
= n
| CF_LAST_IO
;
3505 cs_base
= tb
->cs_base
;
3507 tb_phys_invalidate(tb
, -1);
3508 /* FIXME: In theory this could raise an exception. In practice
3509 we have already translated the block once so it's probably ok. */
3510 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3511 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3512 the first in the TB) then we end up generating a whole new TB and
3513 repeating the fault, which is horribly inefficient.
3514 Better would be to execute just this insn uncached, or generate a
3516 cpu_resume_from_signal(env
, NULL
);
3519 void dump_exec_info(FILE *f
,
3520 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3522 int i
, target_code_size
, max_target_code_size
;
3523 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3524 TranslationBlock
*tb
;
3526 target_code_size
= 0;
3527 max_target_code_size
= 0;
3529 direct_jmp_count
= 0;
3530 direct_jmp2_count
= 0;
3531 for(i
= 0; i
< nb_tbs
; i
++) {
3533 target_code_size
+= tb
->size
;
3534 if (tb
->size
> max_target_code_size
)
3535 max_target_code_size
= tb
->size
;
3536 if (tb
->page_addr
[1] != -1)
3538 if (tb
->tb_next_offset
[0] != 0xffff) {
3540 if (tb
->tb_next_offset
[1] != 0xffff) {
3541 direct_jmp2_count
++;
3545 /* XXX: avoid using doubles ? */
3546 cpu_fprintf(f
, "Translation buffer state:\n");
3547 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3548 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3549 cpu_fprintf(f
, "TB count %d/%d\n",
3550 nb_tbs
, code_gen_max_blocks
);
3551 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3552 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3553 max_target_code_size
);
3554 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3555 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3556 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3557 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3559 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3560 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3562 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3564 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3565 cpu_fprintf(f
, "\nStatistics:\n");
3566 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3567 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3568 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3569 tcg_dump_info(f
, cpu_fprintf
);
3572 #if !defined(CONFIG_USER_ONLY)
3574 #define MMUSUFFIX _cmmu
3575 #define GETPC() NULL
3576 #define env cpu_single_env
3577 #define SOFTMMU_CODE_ACCESS
3580 #include "softmmu_template.h"
3583 #include "softmmu_template.h"
3586 #include "softmmu_template.h"
3589 #include "softmmu_template.h"