2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #if defined(TARGET_SPARC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 41
73 #elif defined(TARGET_SPARC)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 36
75 #elif defined(TARGET_ALPHA)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #define TARGET_VIRT_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_PPC64)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 36
84 #elif defined(TARGET_IA64)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88 #define TARGET_PHYS_ADDR_SPACE_BITS 32
91 static TranslationBlock
*tbs
;
92 int code_gen_max_blocks
;
93 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
95 /* any access to the tbs or the page table must use this lock */
96 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
98 #if defined(__arm__) || defined(__sparc_v9__)
99 /* The prologue must be reachable with a direct jump. ARM and Sparc64
100 have limited branch ranges (possibly also PPC) so place it in a
101 section close to code segment. */
102 #define code_gen_section \
103 __attribute__((__section__(".gen_code"))) \
104 __attribute__((aligned (32)))
106 #define code_gen_section \
107 __attribute__((aligned (32)))
110 uint8_t code_gen_prologue
[1024] code_gen_section
;
111 static uint8_t *code_gen_buffer
;
112 static unsigned long code_gen_buffer_size
;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size
;
115 uint8_t *code_gen_ptr
;
117 #if !defined(CONFIG_USER_ONLY)
118 ram_addr_t phys_ram_size
;
120 uint8_t *phys_ram_base
;
121 uint8_t *phys_ram_dirty
;
123 static int in_migration
;
124 static ram_addr_t phys_ram_alloc_offset
= 0;
128 /* current CPU in the current thread. It is only valid inside
130 CPUState
*cpu_single_env
;
131 /* 0 = Do not count executed instructions.
132 1 = Precise instruction counting.
133 2 = Adaptive rate instruction counting. */
135 /* Current instruction counter. While executing translated code this may
136 include some instructions that have not yet been executed. */
139 typedef struct PageDesc
{
140 /* list of TBs intersecting this ram page */
141 TranslationBlock
*first_tb
;
142 /* in order to optimize self modifying code, we count the number
143 of lookups we do to a given page to use a bitmap */
144 unsigned int code_write_count
;
145 uint8_t *code_bitmap
;
146 #if defined(CONFIG_USER_ONLY)
151 typedef struct PhysPageDesc
{
152 /* offset in host memory of the page + io_index in the low bits */
153 ram_addr_t phys_offset
;
154 ram_addr_t region_offset
;
158 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
159 /* XXX: this is a temporary hack for alpha target.
160 * In the future, this is to be replaced by a multi-level table
161 * to actually be able to handle the complete 64 bits address space.
163 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
168 #define L1_SIZE (1 << L1_BITS)
169 #define L2_SIZE (1 << L2_BITS)
171 unsigned long qemu_real_host_page_size
;
172 unsigned long qemu_host_page_bits
;
173 unsigned long qemu_host_page_size
;
174 unsigned long qemu_host_page_mask
;
176 /* XXX: for system emulation, it could just be an array */
177 static PageDesc
*l1_map
[L1_SIZE
];
178 static PhysPageDesc
**l1_phys_map
;
180 #if !defined(CONFIG_USER_ONLY)
181 static void io_mem_init(void);
183 /* io memory support */
184 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
185 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
186 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
187 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
188 static int io_mem_watch
;
192 static const char *logfilename
= "/tmp/qemu.log";
195 static int log_append
= 0;
198 static int tlb_flush_count
;
199 static int tb_flush_count
;
200 static int tb_phys_invalidate_count
;
202 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
203 typedef struct subpage_t
{
204 target_phys_addr_t base
;
205 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
206 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
207 void *opaque
[TARGET_PAGE_SIZE
][2][4];
208 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
212 static void map_exec(void *addr
, long size
)
215 VirtualProtect(addr
, size
,
216 PAGE_EXECUTE_READWRITE
, &old_protect
);
220 static void map_exec(void *addr
, long size
)
222 unsigned long start
, end
, page_size
;
224 page_size
= getpagesize();
225 start
= (unsigned long)addr
;
226 start
&= ~(page_size
- 1);
228 end
= (unsigned long)addr
+ size
;
229 end
+= page_size
- 1;
230 end
&= ~(page_size
- 1);
232 mprotect((void *)start
, end
- start
,
233 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
237 static void page_init(void)
239 /* NOTE: we can always suppose that qemu_host_page_size >=
243 SYSTEM_INFO system_info
;
245 GetSystemInfo(&system_info
);
246 qemu_real_host_page_size
= system_info
.dwPageSize
;
249 qemu_real_host_page_size
= getpagesize();
251 if (qemu_host_page_size
== 0)
252 qemu_host_page_size
= qemu_real_host_page_size
;
253 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
254 qemu_host_page_size
= TARGET_PAGE_SIZE
;
255 qemu_host_page_bits
= 0;
256 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
257 qemu_host_page_bits
++;
258 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
259 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
260 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
262 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264 long long startaddr
, endaddr
;
269 last_brk
= (unsigned long)sbrk(0);
270 f
= fopen("/proc/self/maps", "r");
273 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
275 startaddr
= MIN(startaddr
,
276 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
277 endaddr
= MIN(endaddr
,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
279 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
280 TARGET_PAGE_ALIGN(endaddr
),
291 static inline PageDesc
**page_l1_map(target_ulong index
)
293 #if TARGET_LONG_BITS > 32
294 /* Host memory outside guest VM. For 32-bit targets we have already
295 excluded high addresses. */
296 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
299 return &l1_map
[index
>> L2_BITS
];
302 static inline PageDesc
*page_find_alloc(target_ulong index
)
305 lp
= page_l1_map(index
);
311 /* allocate if not found */
312 #if defined(CONFIG_USER_ONLY)
313 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
314 /* Don't use qemu_malloc because it may recurse. */
315 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
316 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
319 unsigned long addr
= h2g(p
);
320 page_set_flags(addr
& TARGET_PAGE_MASK
,
321 TARGET_PAGE_ALIGN(addr
+ len
),
325 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
329 return p
+ (index
& (L2_SIZE
- 1));
332 static inline PageDesc
*page_find(target_ulong index
)
335 lp
= page_l1_map(index
);
342 return p
+ (index
& (L2_SIZE
- 1));
345 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
350 p
= (void **)l1_phys_map
;
351 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
353 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
354 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
359 /* allocate if not found */
362 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
363 memset(p
, 0, sizeof(void *) * L1_SIZE
);
367 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
371 /* allocate if not found */
374 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
376 for (i
= 0; i
< L2_SIZE
; i
++) {
377 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
378 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
381 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
384 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
386 return phys_page_find_alloc(index
, 0);
389 #if !defined(CONFIG_USER_ONLY)
390 static void tlb_protect_code(ram_addr_t ram_addr
);
391 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
393 #define mmap_lock() do { } while(0)
394 #define mmap_unlock() do { } while(0)
397 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399 #if defined(CONFIG_USER_ONLY)
400 /* Currently it is not recommanded to allocate big chunks of data in
401 user mode. It will change when a dedicated libc will be used */
402 #define USE_STATIC_CODE_GEN_BUFFER
405 #ifdef USE_STATIC_CODE_GEN_BUFFER
406 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
409 static void code_gen_alloc(unsigned long tb_size
)
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer
= static_code_gen_buffer
;
416 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
417 map_exec(code_gen_buffer
, code_gen_buffer_size
);
419 code_gen_buffer_size
= tb_size
;
420 if (code_gen_buffer_size
== 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
425 /* XXX: needs ajustments */
426 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
429 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
430 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
438 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
439 #if defined(__x86_64__)
441 /* Cannot map more than that */
442 if (code_gen_buffer_size
> (800 * 1024 * 1024))
443 code_gen_buffer_size
= (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
447 start
= (void *) 0x60000000UL
;
448 if (code_gen_buffer_size
> (512 * 1024 * 1024))
449 code_gen_buffer_size
= (512 * 1024 * 1024);
450 #elif defined(__arm__)
451 /* Map the buffer below 32M, so we can use direct calls and branches */
453 start
= (void *) 0x01000000UL
;
454 if (code_gen_buffer_size
> 16 * 1024 * 1024)
455 code_gen_buffer_size
= 16 * 1024 * 1024;
457 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
458 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
460 if (code_gen_buffer
== MAP_FAILED
) {
461 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
465 #elif defined(__FreeBSD__) || defined(__DragonFly__)
469 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
470 #if defined(__x86_64__)
471 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
472 * 0x40000000 is free */
474 addr
= (void *)0x40000000;
475 /* Cannot map more than that */
476 if (code_gen_buffer_size
> (800 * 1024 * 1024))
477 code_gen_buffer_size
= (800 * 1024 * 1024);
479 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
480 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
482 if (code_gen_buffer
== MAP_FAILED
) {
483 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
488 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
489 map_exec(code_gen_buffer
, code_gen_buffer_size
);
491 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
492 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
493 code_gen_buffer_max_size
= code_gen_buffer_size
-
494 code_gen_max_block_size();
495 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
496 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
499 /* Must be called before using the QEMU cpus. 'tb_size' is the size
500 (in bytes) allocated to the translation buffer. Zero means default
502 void cpu_exec_init_all(unsigned long tb_size
)
505 code_gen_alloc(tb_size
);
506 code_gen_ptr
= code_gen_buffer
;
508 #if !defined(CONFIG_USER_ONLY)
513 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515 #define CPU_COMMON_SAVE_VERSION 1
517 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
519 CPUState
*env
= opaque
;
521 qemu_put_be32s(f
, &env
->halted
);
522 qemu_put_be32s(f
, &env
->interrupt_request
);
525 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
527 CPUState
*env
= opaque
;
529 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
532 qemu_get_be32s(f
, &env
->halted
);
533 qemu_get_be32s(f
, &env
->interrupt_request
);
534 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535 version_id is increased. */
536 env
->interrupt_request
&= ~0x01;
543 void cpu_exec_init(CPUState
*env
)
548 #if defined(CONFIG_USER_ONLY)
551 env
->next_cpu
= NULL
;
554 while (*penv
!= NULL
) {
555 penv
= (CPUState
**)&(*penv
)->next_cpu
;
558 env
->cpu_index
= cpu_index
;
559 TAILQ_INIT(&env
->breakpoints
);
560 TAILQ_INIT(&env
->watchpoints
);
562 env
->thread_id
= GetCurrentProcessId();
564 env
->thread_id
= getpid();
567 #if defined(CONFIG_USER_ONLY)
570 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
571 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
572 cpu_common_save
, cpu_common_load
, env
);
573 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
574 cpu_save
, cpu_load
, env
);
578 static inline void invalidate_page_bitmap(PageDesc
*p
)
580 if (p
->code_bitmap
) {
581 qemu_free(p
->code_bitmap
);
582 p
->code_bitmap
= NULL
;
584 p
->code_write_count
= 0;
587 /* set to NULL all the 'first_tb' fields in all PageDescs */
588 static void page_flush_tb(void)
593 for(i
= 0; i
< L1_SIZE
; i
++) {
596 for(j
= 0; j
< L2_SIZE
; j
++) {
598 invalidate_page_bitmap(p
);
605 /* flush all the translation blocks */
606 /* XXX: tb_flush is currently not thread safe */
607 void tb_flush(CPUState
*env1
)
610 #if defined(DEBUG_FLUSH)
611 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
612 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
614 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
616 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
617 cpu_abort(env1
, "Internal error: code buffer overflow\n");
621 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
622 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
625 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
628 code_gen_ptr
= code_gen_buffer
;
629 /* XXX: flush processor icache at this point if cache flush is
634 #ifdef DEBUG_TB_CHECK
636 static void tb_invalidate_check(target_ulong address
)
638 TranslationBlock
*tb
;
640 address
&= TARGET_PAGE_MASK
;
641 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
642 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
643 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
644 address
>= tb
->pc
+ tb
->size
)) {
645 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
646 address
, (long)tb
->pc
, tb
->size
);
652 /* verify that all the pages have correct rights for code */
653 static void tb_page_check(void)
655 TranslationBlock
*tb
;
656 int i
, flags1
, flags2
;
658 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
659 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
660 flags1
= page_get_flags(tb
->pc
);
661 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
662 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
663 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
664 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
670 static void tb_jmp_check(TranslationBlock
*tb
)
672 TranslationBlock
*tb1
;
675 /* suppress any remaining jumps to this TB */
679 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
682 tb1
= tb1
->jmp_next
[n1
];
684 /* check end of list */
686 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
692 /* invalidate one TB */
693 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
696 TranslationBlock
*tb1
;
700 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
703 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
707 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
709 TranslationBlock
*tb1
;
715 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
717 *ptb
= tb1
->page_next
[n1
];
720 ptb
= &tb1
->page_next
[n1
];
724 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
726 TranslationBlock
*tb1
, **ptb
;
729 ptb
= &tb
->jmp_next
[n
];
732 /* find tb(n) in circular list */
736 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
737 if (n1
== n
&& tb1
== tb
)
740 ptb
= &tb1
->jmp_first
;
742 ptb
= &tb1
->jmp_next
[n1
];
745 /* now we can suppress tb(n) from the list */
746 *ptb
= tb
->jmp_next
[n
];
748 tb
->jmp_next
[n
] = NULL
;
752 /* reset the jump entry 'n' of a TB so that it is not chained to
754 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
756 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
759 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
764 target_phys_addr_t phys_pc
;
765 TranslationBlock
*tb1
, *tb2
;
767 /* remove the TB from the hash list */
768 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
769 h
= tb_phys_hash_func(phys_pc
);
770 tb_remove(&tb_phys_hash
[h
], tb
,
771 offsetof(TranslationBlock
, phys_hash_next
));
773 /* remove the TB from the page list */
774 if (tb
->page_addr
[0] != page_addr
) {
775 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
776 tb_page_remove(&p
->first_tb
, tb
);
777 invalidate_page_bitmap(p
);
779 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
780 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
781 tb_page_remove(&p
->first_tb
, tb
);
782 invalidate_page_bitmap(p
);
785 tb_invalidated_flag
= 1;
787 /* remove the TB from the hash list */
788 h
= tb_jmp_cache_hash_func(tb
->pc
);
789 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
790 if (env
->tb_jmp_cache
[h
] == tb
)
791 env
->tb_jmp_cache
[h
] = NULL
;
794 /* suppress this TB from the two jump lists */
795 tb_jmp_remove(tb
, 0);
796 tb_jmp_remove(tb
, 1);
798 /* suppress any remaining jumps to this TB */
804 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
805 tb2
= tb1
->jmp_next
[n1
];
806 tb_reset_jump(tb1
, n1
);
807 tb1
->jmp_next
[n1
] = NULL
;
810 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
812 tb_phys_invalidate_count
++;
815 static inline void set_bits(uint8_t *tab
, int start
, int len
)
821 mask
= 0xff << (start
& 7);
822 if ((start
& ~7) == (end
& ~7)) {
824 mask
&= ~(0xff << (end
& 7));
829 start
= (start
+ 8) & ~7;
831 while (start
< end1
) {
836 mask
= ~(0xff << (end
& 7));
842 static void build_page_bitmap(PageDesc
*p
)
844 int n
, tb_start
, tb_end
;
845 TranslationBlock
*tb
;
847 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
852 tb
= (TranslationBlock
*)((long)tb
& ~3);
853 /* NOTE: this is subtle as a TB may span two physical pages */
855 /* NOTE: tb_end may be after the end of the page, but
856 it is not a problem */
857 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
858 tb_end
= tb_start
+ tb
->size
;
859 if (tb_end
> TARGET_PAGE_SIZE
)
860 tb_end
= TARGET_PAGE_SIZE
;
863 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
865 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
866 tb
= tb
->page_next
[n
];
870 TranslationBlock
*tb_gen_code(CPUState
*env
,
871 target_ulong pc
, target_ulong cs_base
,
872 int flags
, int cflags
)
874 TranslationBlock
*tb
;
876 target_ulong phys_pc
, phys_page2
, virt_page2
;
879 phys_pc
= get_phys_addr_code(env
, pc
);
882 /* flush must be done */
884 /* cannot fail at this point */
886 /* Don't forget to invalidate previous TB info. */
887 tb_invalidated_flag
= 1;
889 tc_ptr
= code_gen_ptr
;
891 tb
->cs_base
= cs_base
;
894 cpu_gen_code(env
, tb
, &code_gen_size
);
895 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
897 /* check next page if needed */
898 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
900 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
901 phys_page2
= get_phys_addr_code(env
, virt_page2
);
903 tb_link_phys(tb
, phys_pc
, phys_page2
);
907 /* invalidate all TBs which intersect with the target physical page
908 starting in range [start;end[. NOTE: start and end must refer to
909 the same physical page. 'is_cpu_write_access' should be true if called
910 from a real cpu write access: the virtual CPU will exit the current
911 TB if code is modified inside this TB. */
912 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
913 int is_cpu_write_access
)
915 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
916 CPUState
*env
= cpu_single_env
;
917 target_ulong tb_start
, tb_end
;
920 #ifdef TARGET_HAS_PRECISE_SMC
921 int current_tb_not_found
= is_cpu_write_access
;
922 TranslationBlock
*current_tb
= NULL
;
923 int current_tb_modified
= 0;
924 target_ulong current_pc
= 0;
925 target_ulong current_cs_base
= 0;
926 int current_flags
= 0;
927 #endif /* TARGET_HAS_PRECISE_SMC */
929 p
= page_find(start
>> TARGET_PAGE_BITS
);
932 if (!p
->code_bitmap
&&
933 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
934 is_cpu_write_access
) {
935 /* build code bitmap */
936 build_page_bitmap(p
);
939 /* we remove all the TBs in the range [start, end[ */
940 /* XXX: see if in some cases it could be faster to invalidate all the code */
944 tb
= (TranslationBlock
*)((long)tb
& ~3);
945 tb_next
= tb
->page_next
[n
];
946 /* NOTE: this is subtle as a TB may span two physical pages */
948 /* NOTE: tb_end may be after the end of the page, but
949 it is not a problem */
950 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
951 tb_end
= tb_start
+ tb
->size
;
953 tb_start
= tb
->page_addr
[1];
954 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
956 if (!(tb_end
<= start
|| tb_start
>= end
)) {
957 #ifdef TARGET_HAS_PRECISE_SMC
958 if (current_tb_not_found
) {
959 current_tb_not_found
= 0;
961 if (env
->mem_io_pc
) {
962 /* now we have a real cpu fault */
963 current_tb
= tb_find_pc(env
->mem_io_pc
);
966 if (current_tb
== tb
&&
967 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
968 /* If we are modifying the current TB, we must stop
969 its execution. We could be more precise by checking
970 that the modification is after the current PC, but it
971 would require a specialized function to partially
972 restore the CPU state */
974 current_tb_modified
= 1;
975 cpu_restore_state(current_tb
, env
,
976 env
->mem_io_pc
, NULL
);
977 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
980 #endif /* TARGET_HAS_PRECISE_SMC */
981 /* we need to do that to handle the case where a signal
982 occurs while doing tb_phys_invalidate() */
985 saved_tb
= env
->current_tb
;
986 env
->current_tb
= NULL
;
988 tb_phys_invalidate(tb
, -1);
990 env
->current_tb
= saved_tb
;
991 if (env
->interrupt_request
&& env
->current_tb
)
992 cpu_interrupt(env
, env
->interrupt_request
);
997 #if !defined(CONFIG_USER_ONLY)
998 /* if no code remaining, no need to continue to use slow writes */
1000 invalidate_page_bitmap(p
);
1001 if (is_cpu_write_access
) {
1002 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1006 #ifdef TARGET_HAS_PRECISE_SMC
1007 if (current_tb_modified
) {
1008 /* we generate a block containing just the instruction
1009 modifying the memory. It will ensure that it cannot modify
1011 env
->current_tb
= NULL
;
1012 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1013 cpu_resume_from_signal(env
, NULL
);
1018 /* len must be <= 8 and start must be a multiple of len */
1019 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1025 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1026 cpu_single_env
->mem_io_vaddr
, len
,
1027 cpu_single_env
->eip
,
1028 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1031 p
= page_find(start
>> TARGET_PAGE_BITS
);
1034 if (p
->code_bitmap
) {
1035 offset
= start
& ~TARGET_PAGE_MASK
;
1036 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1037 if (b
& ((1 << len
) - 1))
1041 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1045 #if !defined(CONFIG_SOFTMMU)
1046 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1047 unsigned long pc
, void *puc
)
1049 TranslationBlock
*tb
;
1052 #ifdef TARGET_HAS_PRECISE_SMC
1053 TranslationBlock
*current_tb
= NULL
;
1054 CPUState
*env
= cpu_single_env
;
1055 int current_tb_modified
= 0;
1056 target_ulong current_pc
= 0;
1057 target_ulong current_cs_base
= 0;
1058 int current_flags
= 0;
1061 addr
&= TARGET_PAGE_MASK
;
1062 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1066 #ifdef TARGET_HAS_PRECISE_SMC
1067 if (tb
&& pc
!= 0) {
1068 current_tb
= tb_find_pc(pc
);
1071 while (tb
!= NULL
) {
1073 tb
= (TranslationBlock
*)((long)tb
& ~3);
1074 #ifdef TARGET_HAS_PRECISE_SMC
1075 if (current_tb
== tb
&&
1076 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1077 /* If we are modifying the current TB, we must stop
1078 its execution. We could be more precise by checking
1079 that the modification is after the current PC, but it
1080 would require a specialized function to partially
1081 restore the CPU state */
1083 current_tb_modified
= 1;
1084 cpu_restore_state(current_tb
, env
, pc
, puc
);
1085 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1088 #endif /* TARGET_HAS_PRECISE_SMC */
1089 tb_phys_invalidate(tb
, addr
);
1090 tb
= tb
->page_next
[n
];
1093 #ifdef TARGET_HAS_PRECISE_SMC
1094 if (current_tb_modified
) {
1095 /* we generate a block containing just the instruction
1096 modifying the memory. It will ensure that it cannot modify
1098 env
->current_tb
= NULL
;
1099 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1100 cpu_resume_from_signal(env
, puc
);
1106 /* add the tb in the target page and protect it if necessary */
1107 static inline void tb_alloc_page(TranslationBlock
*tb
,
1108 unsigned int n
, target_ulong page_addr
)
1111 TranslationBlock
*last_first_tb
;
1113 tb
->page_addr
[n
] = page_addr
;
1114 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1115 tb
->page_next
[n
] = p
->first_tb
;
1116 last_first_tb
= p
->first_tb
;
1117 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1118 invalidate_page_bitmap(p
);
1120 #if defined(TARGET_HAS_SMC) || 1
1122 #if defined(CONFIG_USER_ONLY)
1123 if (p
->flags
& PAGE_WRITE
) {
1128 /* force the host page as non writable (writes will have a
1129 page fault + mprotect overhead) */
1130 page_addr
&= qemu_host_page_mask
;
1132 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1133 addr
+= TARGET_PAGE_SIZE
) {
1135 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1139 p2
->flags
&= ~PAGE_WRITE
;
1140 page_get_flags(addr
);
1142 mprotect(g2h(page_addr
), qemu_host_page_size
,
1143 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1144 #ifdef DEBUG_TB_INVALIDATE
1145 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1150 /* if some code is already present, then the pages are already
1151 protected. So we handle the case where only the first TB is
1152 allocated in a physical page */
1153 if (!last_first_tb
) {
1154 tlb_protect_code(page_addr
);
1158 #endif /* TARGET_HAS_SMC */
1161 /* Allocate a new translation block. Flush the translation buffer if
1162 too many translation blocks or too much generated code. */
1163 TranslationBlock
*tb_alloc(target_ulong pc
)
1165 TranslationBlock
*tb
;
1167 if (nb_tbs
>= code_gen_max_blocks
||
1168 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1170 tb
= &tbs
[nb_tbs
++];
1176 void tb_free(TranslationBlock
*tb
)
1178 /* In practice this is mostly used for single use temporary TB
1179 Ignore the hard cases and just back up if this TB happens to
1180 be the last one generated. */
1181 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1182 code_gen_ptr
= tb
->tc_ptr
;
1187 /* add a new TB and link it to the physical page tables. phys_page2 is
1188 (-1) to indicate that only one page contains the TB. */
1189 void tb_link_phys(TranslationBlock
*tb
,
1190 target_ulong phys_pc
, target_ulong phys_page2
)
1193 TranslationBlock
**ptb
;
1195 /* Grab the mmap lock to stop another thread invalidating this TB
1196 before we are done. */
1198 /* add in the physical hash table */
1199 h
= tb_phys_hash_func(phys_pc
);
1200 ptb
= &tb_phys_hash
[h
];
1201 tb
->phys_hash_next
= *ptb
;
1204 /* add in the page list */
1205 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1206 if (phys_page2
!= -1)
1207 tb_alloc_page(tb
, 1, phys_page2
);
1209 tb
->page_addr
[1] = -1;
1211 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1212 tb
->jmp_next
[0] = NULL
;
1213 tb
->jmp_next
[1] = NULL
;
1215 /* init original jump addresses */
1216 if (tb
->tb_next_offset
[0] != 0xffff)
1217 tb_reset_jump(tb
, 0);
1218 if (tb
->tb_next_offset
[1] != 0xffff)
1219 tb_reset_jump(tb
, 1);
1221 #ifdef DEBUG_TB_CHECK
1227 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1228 tb[1].tc_ptr. Return NULL if not found */
1229 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1231 int m_min
, m_max
, m
;
1233 TranslationBlock
*tb
;
1237 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1238 tc_ptr
>= (unsigned long)code_gen_ptr
)
1240 /* binary search (cf Knuth) */
1243 while (m_min
<= m_max
) {
1244 m
= (m_min
+ m_max
) >> 1;
1246 v
= (unsigned long)tb
->tc_ptr
;
1249 else if (tc_ptr
< v
) {
1258 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1260 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1262 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1265 tb1
= tb
->jmp_next
[n
];
1267 /* find head of list */
1270 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1273 tb1
= tb1
->jmp_next
[n1
];
1275 /* we are now sure now that tb jumps to tb1 */
1278 /* remove tb from the jmp_first list */
1279 ptb
= &tb_next
->jmp_first
;
1283 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1284 if (n1
== n
&& tb1
== tb
)
1286 ptb
= &tb1
->jmp_next
[n1
];
1288 *ptb
= tb
->jmp_next
[n
];
1289 tb
->jmp_next
[n
] = NULL
;
1291 /* suppress the jump to next tb in generated code */
1292 tb_reset_jump(tb
, n
);
1294 /* suppress jumps in the tb on which we could have jumped */
1295 tb_reset_jump_recursive(tb_next
);
1299 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1301 tb_reset_jump_recursive2(tb
, 0);
1302 tb_reset_jump_recursive2(tb
, 1);
1305 #if defined(TARGET_HAS_ICE)
1306 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1308 target_phys_addr_t addr
;
1310 ram_addr_t ram_addr
;
1313 addr
= cpu_get_phys_page_debug(env
, pc
);
1314 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1316 pd
= IO_MEM_UNASSIGNED
;
1318 pd
= p
->phys_offset
;
1320 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1321 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1325 /* Add a watchpoint. */
1326 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1327 int flags
, CPUWatchpoint
**watchpoint
)
1329 target_ulong len_mask
= ~(len
- 1);
1332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1333 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1334 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1335 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1338 wp
= qemu_malloc(sizeof(*wp
));
1341 wp
->len_mask
= len_mask
;
1344 /* keep all GDB-injected watchpoints in front */
1346 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1348 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1350 tlb_flush_page(env
, addr
);
1357 /* Remove a specific watchpoint. */
1358 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1361 target_ulong len_mask
= ~(len
- 1);
1364 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1365 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1366 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1367 cpu_watchpoint_remove_by_ref(env
, wp
);
1374 /* Remove a specific watchpoint by reference. */
1375 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1377 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1379 tlb_flush_page(env
, watchpoint
->vaddr
);
1381 qemu_free(watchpoint
);
1384 /* Remove all matching watchpoints. */
1385 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1387 CPUWatchpoint
*wp
, *next
;
1389 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1390 if (wp
->flags
& mask
)
1391 cpu_watchpoint_remove_by_ref(env
, wp
);
1395 /* Add a breakpoint. */
1396 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1397 CPUBreakpoint
**breakpoint
)
1399 #if defined(TARGET_HAS_ICE)
1402 bp
= qemu_malloc(sizeof(*bp
));
1407 /* keep all GDB-injected breakpoints in front */
1409 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1411 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1413 breakpoint_invalidate(env
, pc
);
1423 /* Remove a specific breakpoint. */
1424 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1426 #if defined(TARGET_HAS_ICE)
1429 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1430 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1431 cpu_breakpoint_remove_by_ref(env
, bp
);
1441 /* Remove a specific breakpoint by reference. */
1442 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1444 #if defined(TARGET_HAS_ICE)
1445 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1447 breakpoint_invalidate(env
, breakpoint
->pc
);
1449 qemu_free(breakpoint
);
1453 /* Remove all matching breakpoints. */
1454 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1456 #if defined(TARGET_HAS_ICE)
1457 CPUBreakpoint
*bp
, *next
;
1459 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1460 if (bp
->flags
& mask
)
1461 cpu_breakpoint_remove_by_ref(env
, bp
);
1466 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1467 CPU loop after each instruction */
1468 void cpu_single_step(CPUState
*env
, int enabled
)
1470 #if defined(TARGET_HAS_ICE)
1471 if (env
->singlestep_enabled
!= enabled
) {
1472 env
->singlestep_enabled
= enabled
;
1474 kvm_update_guest_debug(env
, 0);
1476 /* must flush all the translated code to avoid inconsistancies */
1477 /* XXX: only flush what is necessary */
1484 /* enable or disable low levels log */
1485 void cpu_set_log(int log_flags
)
1487 loglevel
= log_flags
;
1488 if (loglevel
&& !logfile
) {
1489 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1491 perror(logfilename
);
1494 #if !defined(CONFIG_SOFTMMU)
1495 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1497 static char logfile_buf
[4096];
1498 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1501 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1505 if (!loglevel
&& logfile
) {
1511 void cpu_set_log_filename(const char *filename
)
1513 logfilename
= strdup(filename
);
1518 cpu_set_log(loglevel
);
1521 static void cpu_unlink_tb(CPUState
*env
)
1523 #if defined(USE_NPTL)
1524 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1525 problem and hope the cpu will stop of its own accord. For userspace
1526 emulation this often isn't actually as bad as it sounds. Often
1527 signals are used primarily to interrupt blocking syscalls. */
1529 TranslationBlock
*tb
;
1530 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1532 tb
= env
->current_tb
;
1533 /* if the cpu is currently executing code, we must unlink it and
1534 all the potentially executing TB */
1535 if (tb
&& !testandset(&interrupt_lock
)) {
1536 env
->current_tb
= NULL
;
1537 tb_reset_jump_recursive(tb
);
1538 resetlock(&interrupt_lock
);
1543 /* mask must never be zero, except for A20 change call */
1544 void cpu_interrupt(CPUState
*env
, int mask
)
1548 old_mask
= env
->interrupt_request
;
1549 env
->interrupt_request
|= mask
;
1550 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1551 kvm_update_interrupt_request(env
);
1554 env
->icount_decr
.u16
.high
= 0xffff;
1555 #ifndef CONFIG_USER_ONLY
1557 && (mask
& ~old_mask
) != 0) {
1558 cpu_abort(env
, "Raised interrupt while not in I/O function");
1566 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1568 env
->interrupt_request
&= ~mask
;
1571 void cpu_exit(CPUState
*env
)
1573 env
->exit_request
= 1;
1577 const CPULogItem cpu_log_items
[] = {
1578 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1579 "show generated host assembly code for each compiled TB" },
1580 { CPU_LOG_TB_IN_ASM
, "in_asm",
1581 "show target assembly code for each compiled TB" },
1582 { CPU_LOG_TB_OP
, "op",
1583 "show micro ops for each compiled TB" },
1584 { CPU_LOG_TB_OP_OPT
, "op_opt",
1587 "before eflags optimization and "
1589 "after liveness analysis" },
1590 { CPU_LOG_INT
, "int",
1591 "show interrupts/exceptions in short format" },
1592 { CPU_LOG_EXEC
, "exec",
1593 "show trace before each executed TB (lots of logs)" },
1594 { CPU_LOG_TB_CPU
, "cpu",
1595 "show CPU state before block translation" },
1597 { CPU_LOG_PCALL
, "pcall",
1598 "show protected mode far calls/returns/exceptions" },
1599 { CPU_LOG_RESET
, "cpu_reset",
1600 "show CPU state before CPU resets" },
1603 { CPU_LOG_IOPORT
, "ioport",
1604 "show all i/o ports accesses" },
1609 static int cmp1(const char *s1
, int n
, const char *s2
)
1611 if (strlen(s2
) != n
)
1613 return memcmp(s1
, s2
, n
) == 0;
1616 /* takes a comma separated list of log masks. Return 0 if error. */
1617 int cpu_str_to_log_mask(const char *str
)
1619 const CPULogItem
*item
;
1626 p1
= strchr(p
, ',');
1629 if(cmp1(p
,p1
-p
,"all")) {
1630 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1634 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1635 if (cmp1(p
, p1
- p
, item
->name
))
1649 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1656 fprintf(stderr
, "qemu: fatal: ");
1657 vfprintf(stderr
, fmt
, ap
);
1658 fprintf(stderr
, "\n");
1660 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1662 cpu_dump_state(env
, stderr
, fprintf
, 0);
1664 if (qemu_log_enabled()) {
1665 qemu_log("qemu: fatal: ");
1666 qemu_log_vprintf(fmt
, ap2
);
1669 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1671 log_cpu_state(env
, 0);
1681 CPUState
*cpu_copy(CPUState
*env
)
1683 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1684 CPUState
*next_cpu
= new_env
->next_cpu
;
1685 int cpu_index
= new_env
->cpu_index
;
1686 #if defined(TARGET_HAS_ICE)
1691 memcpy(new_env
, env
, sizeof(CPUState
));
1693 /* Preserve chaining and index. */
1694 new_env
->next_cpu
= next_cpu
;
1695 new_env
->cpu_index
= cpu_index
;
1697 /* Clone all break/watchpoints.
1698 Note: Once we support ptrace with hw-debug register access, make sure
1699 BP_CPU break/watchpoints are handled correctly on clone. */
1700 TAILQ_INIT(&env
->breakpoints
);
1701 TAILQ_INIT(&env
->watchpoints
);
1702 #if defined(TARGET_HAS_ICE)
1703 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1704 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1706 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1707 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1715 #if !defined(CONFIG_USER_ONLY)
1717 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1721 /* Discard jump cache entries for any tb which might potentially
1722 overlap the flushed page. */
1723 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1724 memset (&env
->tb_jmp_cache
[i
], 0,
1725 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1727 i
= tb_jmp_cache_hash_page(addr
);
1728 memset (&env
->tb_jmp_cache
[i
], 0,
1729 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1732 /* NOTE: if flush_global is true, also flush global entries (not
1734 void tlb_flush(CPUState
*env
, int flush_global
)
1738 #if defined(DEBUG_TLB)
1739 printf("tlb_flush:\n");
1741 /* must reset current TB so that interrupts cannot modify the
1742 links while we are modifying them */
1743 env
->current_tb
= NULL
;
1745 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1746 env
->tlb_table
[0][i
].addr_read
= -1;
1747 env
->tlb_table
[0][i
].addr_write
= -1;
1748 env
->tlb_table
[0][i
].addr_code
= -1;
1749 env
->tlb_table
[1][i
].addr_read
= -1;
1750 env
->tlb_table
[1][i
].addr_write
= -1;
1751 env
->tlb_table
[1][i
].addr_code
= -1;
1752 #if (NB_MMU_MODES >= 3)
1753 env
->tlb_table
[2][i
].addr_read
= -1;
1754 env
->tlb_table
[2][i
].addr_write
= -1;
1755 env
->tlb_table
[2][i
].addr_code
= -1;
1756 #if (NB_MMU_MODES == 4)
1757 env
->tlb_table
[3][i
].addr_read
= -1;
1758 env
->tlb_table
[3][i
].addr_write
= -1;
1759 env
->tlb_table
[3][i
].addr_code
= -1;
1764 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1767 if (env
->kqemu_enabled
) {
1768 kqemu_flush(env
, flush_global
);
1774 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1776 if (addr
== (tlb_entry
->addr_read
&
1777 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1778 addr
== (tlb_entry
->addr_write
&
1779 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1780 addr
== (tlb_entry
->addr_code
&
1781 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1782 tlb_entry
->addr_read
= -1;
1783 tlb_entry
->addr_write
= -1;
1784 tlb_entry
->addr_code
= -1;
1788 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1792 #if defined(DEBUG_TLB)
1793 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1795 /* must reset current TB so that interrupts cannot modify the
1796 links while we are modifying them */
1797 env
->current_tb
= NULL
;
1799 addr
&= TARGET_PAGE_MASK
;
1800 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1801 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1802 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1803 #if (NB_MMU_MODES >= 3)
1804 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1805 #if (NB_MMU_MODES == 4)
1806 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1810 tlb_flush_jmp_cache(env
, addr
);
1813 if (env
->kqemu_enabled
) {
1814 kqemu_flush_page(env
, addr
);
1819 /* update the TLBs so that writes to code in the virtual page 'addr'
1821 static void tlb_protect_code(ram_addr_t ram_addr
)
1823 cpu_physical_memory_reset_dirty(ram_addr
,
1824 ram_addr
+ TARGET_PAGE_SIZE
,
1828 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1829 tested for self modifying code */
1830 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1833 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1836 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1837 unsigned long start
, unsigned long length
)
1840 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1841 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1842 if ((addr
- start
) < length
) {
1843 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1848 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1852 unsigned long length
, start1
;
1856 start
&= TARGET_PAGE_MASK
;
1857 end
= TARGET_PAGE_ALIGN(end
);
1859 length
= end
- start
;
1862 len
= length
>> TARGET_PAGE_BITS
;
1864 /* XXX: should not depend on cpu context */
1866 if (env
->kqemu_enabled
) {
1869 for(i
= 0; i
< len
; i
++) {
1870 kqemu_set_notdirty(env
, addr
);
1871 addr
+= TARGET_PAGE_SIZE
;
1875 mask
= ~dirty_flags
;
1876 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1877 for(i
= 0; i
< len
; i
++)
1880 /* we modify the TLB cache so that the dirty bit will be set again
1881 when accessing the range */
1882 start1
= start
+ (unsigned long)phys_ram_base
;
1883 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1884 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1885 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1886 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1887 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1888 #if (NB_MMU_MODES >= 3)
1889 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1890 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1891 #if (NB_MMU_MODES == 4)
1892 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1893 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1899 int cpu_physical_memory_set_dirty_tracking(int enable
)
1904 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1905 in_migration
= enable
;
1909 int cpu_physical_memory_get_dirty_tracking(void)
1911 return in_migration
;
1914 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1917 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1920 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1922 ram_addr_t ram_addr
;
1924 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1925 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1926 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1927 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1928 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1933 /* update the TLB according to the current state of the dirty bits */
1934 void cpu_tlb_update_dirty(CPUState
*env
)
1937 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1938 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1939 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1940 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1941 #if (NB_MMU_MODES >= 3)
1942 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1943 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1944 #if (NB_MMU_MODES == 4)
1945 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1946 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1951 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1953 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1954 tlb_entry
->addr_write
= vaddr
;
1957 /* update the TLB corresponding to virtual page vaddr
1958 so that it is no longer dirty */
1959 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1963 vaddr
&= TARGET_PAGE_MASK
;
1964 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1965 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1966 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1967 #if (NB_MMU_MODES >= 3)
1968 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1969 #if (NB_MMU_MODES == 4)
1970 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1975 /* add a new TLB entry. At most one entry for a given virtual address
1976 is permitted. Return 0 if OK or 2 if the page could not be mapped
1977 (can only happen in non SOFTMMU mode for I/O pages or pages
1978 conflicting with the host address space). */
1979 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1980 target_phys_addr_t paddr
, int prot
,
1981 int mmu_idx
, int is_softmmu
)
1986 target_ulong address
;
1987 target_ulong code_address
;
1988 target_phys_addr_t addend
;
1992 target_phys_addr_t iotlb
;
1994 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1996 pd
= IO_MEM_UNASSIGNED
;
1998 pd
= p
->phys_offset
;
2000 #if defined(DEBUG_TLB)
2001 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2002 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2007 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2008 /* IO memory case (romd handled later) */
2009 address
|= TLB_MMIO
;
2011 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
2012 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2014 iotlb
= pd
& TARGET_PAGE_MASK
;
2015 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2016 iotlb
|= IO_MEM_NOTDIRTY
;
2018 iotlb
|= IO_MEM_ROM
;
2020 /* IO handlers are currently passed a phsical address.
2021 It would be nice to pass an offset from the base address
2022 of that region. This would avoid having to special case RAM,
2023 and avoid full address decoding in every device.
2024 We can't use the high bits of pd for this because
2025 IO_MEM_ROMD uses these as a ram address. */
2026 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2028 iotlb
+= p
->region_offset
;
2034 code_address
= address
;
2035 /* Make accesses to pages with watchpoints go via the
2036 watchpoint trap routines. */
2037 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2038 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2039 iotlb
= io_mem_watch
+ paddr
;
2040 /* TODO: The memory case can be optimized by not trapping
2041 reads of pages with a write breakpoint. */
2042 address
|= TLB_MMIO
;
2046 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2047 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2048 te
= &env
->tlb_table
[mmu_idx
][index
];
2049 te
->addend
= addend
- vaddr
;
2050 if (prot
& PAGE_READ
) {
2051 te
->addr_read
= address
;
2056 if (prot
& PAGE_EXEC
) {
2057 te
->addr_code
= code_address
;
2061 if (prot
& PAGE_WRITE
) {
2062 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2063 (pd
& IO_MEM_ROMD
)) {
2064 /* Write access calls the I/O callback. */
2065 te
->addr_write
= address
| TLB_MMIO
;
2066 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2067 !cpu_physical_memory_is_dirty(pd
)) {
2068 te
->addr_write
= address
| TLB_NOTDIRTY
;
2070 te
->addr_write
= address
;
2073 te
->addr_write
= -1;
2080 void tlb_flush(CPUState
*env
, int flush_global
)
2084 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2088 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2089 target_phys_addr_t paddr
, int prot
,
2090 int mmu_idx
, int is_softmmu
)
2095 /* dump memory mappings */
2096 void page_dump(FILE *f
)
2098 unsigned long start
, end
;
2099 int i
, j
, prot
, prot1
;
2102 fprintf(f
, "%-8s %-8s %-8s %s\n",
2103 "start", "end", "size", "prot");
2107 for(i
= 0; i
<= L1_SIZE
; i
++) {
2112 for(j
= 0;j
< L2_SIZE
; j
++) {
2117 if (prot1
!= prot
) {
2118 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2120 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2121 start
, end
, end
- start
,
2122 prot
& PAGE_READ
? 'r' : '-',
2123 prot
& PAGE_WRITE
? 'w' : '-',
2124 prot
& PAGE_EXEC
? 'x' : '-');
2138 int page_get_flags(target_ulong address
)
2142 p
= page_find(address
>> TARGET_PAGE_BITS
);
2148 /* modify the flags of a page and invalidate the code if
2149 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2150 depending on PAGE_WRITE */
2151 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2156 /* mmap_lock should already be held. */
2157 start
= start
& TARGET_PAGE_MASK
;
2158 end
= TARGET_PAGE_ALIGN(end
);
2159 if (flags
& PAGE_WRITE
)
2160 flags
|= PAGE_WRITE_ORG
;
2161 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2162 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2163 /* We may be called for host regions that are outside guest
2167 /* if the write protection is set, then we invalidate the code
2169 if (!(p
->flags
& PAGE_WRITE
) &&
2170 (flags
& PAGE_WRITE
) &&
2172 tb_invalidate_phys_page(addr
, 0, NULL
);
2178 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2184 if (start
+ len
< start
)
2185 /* we've wrapped around */
2188 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2189 start
= start
& TARGET_PAGE_MASK
;
2191 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2192 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2195 if( !(p
->flags
& PAGE_VALID
) )
2198 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2200 if (flags
& PAGE_WRITE
) {
2201 if (!(p
->flags
& PAGE_WRITE_ORG
))
2203 /* unprotect the page if it was put read-only because it
2204 contains translated code */
2205 if (!(p
->flags
& PAGE_WRITE
)) {
2206 if (!page_unprotect(addr
, 0, NULL
))
2215 /* called from signal handler: invalidate the code and unprotect the
2216 page. Return TRUE if the fault was succesfully handled. */
2217 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2219 unsigned int page_index
, prot
, pindex
;
2221 target_ulong host_start
, host_end
, addr
;
2223 /* Technically this isn't safe inside a signal handler. However we
2224 know this only ever happens in a synchronous SEGV handler, so in
2225 practice it seems to be ok. */
2228 host_start
= address
& qemu_host_page_mask
;
2229 page_index
= host_start
>> TARGET_PAGE_BITS
;
2230 p1
= page_find(page_index
);
2235 host_end
= host_start
+ qemu_host_page_size
;
2238 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2242 /* if the page was really writable, then we change its
2243 protection back to writable */
2244 if (prot
& PAGE_WRITE_ORG
) {
2245 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2246 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2247 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2248 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2249 p1
[pindex
].flags
|= PAGE_WRITE
;
2250 /* and since the content will be modified, we must invalidate
2251 the corresponding translated code. */
2252 tb_invalidate_phys_page(address
, pc
, puc
);
2253 #ifdef DEBUG_TB_CHECK
2254 tb_invalidate_check(address
);
2264 static inline void tlb_set_dirty(CPUState
*env
,
2265 unsigned long addr
, target_ulong vaddr
)
2268 #endif /* defined(CONFIG_USER_ONLY) */
2270 #if !defined(CONFIG_USER_ONLY)
2272 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2273 ram_addr_t memory
, ram_addr_t region_offset
);
2274 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2275 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2276 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2279 if (addr > start_addr) \
2282 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2283 if (start_addr2 > 0) \
2287 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2288 end_addr2 = TARGET_PAGE_SIZE - 1; \
2290 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2291 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2296 /* register physical memory. 'size' must be a multiple of the target
2297 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2298 io memory page. The address used when calling the IO function is
2299 the offset from the start of the region, plus region_offset. Both
2300 start_region and regon_offset are rounded down to a page boundary
2301 before calculating this offset. This should not be a problem unless
2302 the low bits of start_addr and region_offset differ. */
2303 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2305 ram_addr_t phys_offset
,
2306 ram_addr_t region_offset
)
2308 target_phys_addr_t addr
, end_addr
;
2311 ram_addr_t orig_size
= size
;
2315 /* XXX: should not depend on cpu context */
2317 if (env
->kqemu_enabled
) {
2318 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2322 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2324 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2325 region_offset
= start_addr
;
2327 region_offset
&= TARGET_PAGE_MASK
;
2328 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2329 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2330 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2331 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2332 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2333 ram_addr_t orig_memory
= p
->phys_offset
;
2334 target_phys_addr_t start_addr2
, end_addr2
;
2335 int need_subpage
= 0;
2337 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2339 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2340 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2341 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2342 &p
->phys_offset
, orig_memory
,
2345 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2348 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2350 p
->region_offset
= 0;
2352 p
->phys_offset
= phys_offset
;
2353 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2354 (phys_offset
& IO_MEM_ROMD
))
2355 phys_offset
+= TARGET_PAGE_SIZE
;
2358 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2359 p
->phys_offset
= phys_offset
;
2360 p
->region_offset
= region_offset
;
2361 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2362 (phys_offset
& IO_MEM_ROMD
)) {
2363 phys_offset
+= TARGET_PAGE_SIZE
;
2365 target_phys_addr_t start_addr2
, end_addr2
;
2366 int need_subpage
= 0;
2368 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2369 end_addr2
, need_subpage
);
2371 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2372 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2373 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2374 addr
& TARGET_PAGE_MASK
);
2375 subpage_register(subpage
, start_addr2
, end_addr2
,
2376 phys_offset
, region_offset
);
2377 p
->region_offset
= 0;
2381 region_offset
+= TARGET_PAGE_SIZE
;
2384 /* since each CPU stores ram addresses in its TLB cache, we must
2385 reset the modified entries */
2387 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2392 /* XXX: temporary until new memory mapping API */
2393 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2397 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2399 return IO_MEM_UNASSIGNED
;
2400 return p
->phys_offset
;
2403 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2406 kvm_coalesce_mmio_region(addr
, size
);
2409 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2412 kvm_uncoalesce_mmio_region(addr
, size
);
2415 /* XXX: better than nothing */
2416 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2419 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2420 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2421 (uint64_t)size
, (uint64_t)phys_ram_size
);
2424 addr
= phys_ram_alloc_offset
;
2425 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2429 void qemu_ram_free(ram_addr_t addr
)
2433 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2435 #ifdef DEBUG_UNASSIGNED
2436 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2438 #if defined(TARGET_SPARC)
2439 do_unassigned_access(addr
, 0, 0, 0, 1);
2444 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2446 #ifdef DEBUG_UNASSIGNED
2447 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2449 #if defined(TARGET_SPARC)
2450 do_unassigned_access(addr
, 0, 0, 0, 2);
2455 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2457 #ifdef DEBUG_UNASSIGNED
2458 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2460 #if defined(TARGET_SPARC)
2461 do_unassigned_access(addr
, 0, 0, 0, 4);
2466 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2468 #ifdef DEBUG_UNASSIGNED
2469 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2471 #if defined(TARGET_SPARC)
2472 do_unassigned_access(addr
, 1, 0, 0, 1);
2476 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2478 #ifdef DEBUG_UNASSIGNED
2479 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2481 #if defined(TARGET_SPARC)
2482 do_unassigned_access(addr
, 1, 0, 0, 2);
2486 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2488 #ifdef DEBUG_UNASSIGNED
2489 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2491 #if defined(TARGET_SPARC)
2492 do_unassigned_access(addr
, 1, 0, 0, 4);
2496 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2497 unassigned_mem_readb
,
2498 unassigned_mem_readw
,
2499 unassigned_mem_readl
,
2502 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2503 unassigned_mem_writeb
,
2504 unassigned_mem_writew
,
2505 unassigned_mem_writel
,
2508 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2512 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2513 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2514 #if !defined(CONFIG_USER_ONLY)
2515 tb_invalidate_phys_page_fast(ram_addr
, 1);
2516 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2519 stb_p(phys_ram_base
+ ram_addr
, val
);
2521 if (cpu_single_env
->kqemu_enabled
&&
2522 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2523 kqemu_modify_page(cpu_single_env
, ram_addr
);
2525 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2526 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2527 /* we remove the notdirty callback only if the code has been
2529 if (dirty_flags
== 0xff)
2530 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2533 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2537 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2538 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2539 #if !defined(CONFIG_USER_ONLY)
2540 tb_invalidate_phys_page_fast(ram_addr
, 2);
2541 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2544 stw_p(phys_ram_base
+ ram_addr
, val
);
2546 if (cpu_single_env
->kqemu_enabled
&&
2547 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2548 kqemu_modify_page(cpu_single_env
, ram_addr
);
2550 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2551 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2552 /* we remove the notdirty callback only if the code has been
2554 if (dirty_flags
== 0xff)
2555 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2558 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2562 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2563 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2564 #if !defined(CONFIG_USER_ONLY)
2565 tb_invalidate_phys_page_fast(ram_addr
, 4);
2566 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2569 stl_p(phys_ram_base
+ ram_addr
, val
);
2571 if (cpu_single_env
->kqemu_enabled
&&
2572 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2573 kqemu_modify_page(cpu_single_env
, ram_addr
);
2575 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2576 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2577 /* we remove the notdirty callback only if the code has been
2579 if (dirty_flags
== 0xff)
2580 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2583 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2584 NULL
, /* never used */
2585 NULL
, /* never used */
2586 NULL
, /* never used */
2589 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2590 notdirty_mem_writeb
,
2591 notdirty_mem_writew
,
2592 notdirty_mem_writel
,
2595 /* Generate a debug exception if a watchpoint has been hit. */
2596 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2598 CPUState
*env
= cpu_single_env
;
2599 target_ulong pc
, cs_base
;
2600 TranslationBlock
*tb
;
2605 if (env
->watchpoint_hit
) {
2606 /* We re-entered the check after replacing the TB. Now raise
2607 * the debug interrupt so that is will trigger after the
2608 * current instruction. */
2609 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2612 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2613 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2614 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2615 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2616 wp
->flags
|= BP_WATCHPOINT_HIT
;
2617 if (!env
->watchpoint_hit
) {
2618 env
->watchpoint_hit
= wp
;
2619 tb
= tb_find_pc(env
->mem_io_pc
);
2621 cpu_abort(env
, "check_watchpoint: could not find TB for "
2622 "pc=%p", (void *)env
->mem_io_pc
);
2624 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2625 tb_phys_invalidate(tb
, -1);
2626 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2627 env
->exception_index
= EXCP_DEBUG
;
2629 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2630 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2632 cpu_resume_from_signal(env
, NULL
);
2635 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2640 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2641 so these check for a hit then pass through to the normal out-of-line
2643 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2645 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2646 return ldub_phys(addr
);
2649 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2651 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2652 return lduw_phys(addr
);
2655 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2657 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2658 return ldl_phys(addr
);
2661 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2664 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2665 stb_phys(addr
, val
);
2668 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2671 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2672 stw_phys(addr
, val
);
2675 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2678 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2679 stl_phys(addr
, val
);
2682 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2688 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2694 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2700 idx
= SUBPAGE_IDX(addr
);
2701 #if defined(DEBUG_SUBPAGE)
2702 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2703 mmio
, len
, addr
, idx
);
2705 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2706 addr
+ mmio
->region_offset
[idx
][0][len
]);
2711 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2712 uint32_t value
, unsigned int len
)
2716 idx
= SUBPAGE_IDX(addr
);
2717 #if defined(DEBUG_SUBPAGE)
2718 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2719 mmio
, len
, addr
, idx
, value
);
2721 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2722 addr
+ mmio
->region_offset
[idx
][1][len
],
2726 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2728 #if defined(DEBUG_SUBPAGE)
2729 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2732 return subpage_readlen(opaque
, addr
, 0);
2735 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2738 #if defined(DEBUG_SUBPAGE)
2739 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2741 subpage_writelen(opaque
, addr
, value
, 0);
2744 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2746 #if defined(DEBUG_SUBPAGE)
2747 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2750 return subpage_readlen(opaque
, addr
, 1);
2753 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2756 #if defined(DEBUG_SUBPAGE)
2757 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2759 subpage_writelen(opaque
, addr
, value
, 1);
2762 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2764 #if defined(DEBUG_SUBPAGE)
2765 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2768 return subpage_readlen(opaque
, addr
, 2);
2771 static void subpage_writel (void *opaque
,
2772 target_phys_addr_t addr
, uint32_t value
)
2774 #if defined(DEBUG_SUBPAGE)
2775 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2777 subpage_writelen(opaque
, addr
, value
, 2);
2780 static CPUReadMemoryFunc
*subpage_read
[] = {
2786 static CPUWriteMemoryFunc
*subpage_write
[] = {
2792 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2793 ram_addr_t memory
, ram_addr_t region_offset
)
2798 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2800 idx
= SUBPAGE_IDX(start
);
2801 eidx
= SUBPAGE_IDX(end
);
2802 #if defined(DEBUG_SUBPAGE)
2803 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2804 mmio
, start
, end
, idx
, eidx
, memory
);
2806 memory
>>= IO_MEM_SHIFT
;
2807 for (; idx
<= eidx
; idx
++) {
2808 for (i
= 0; i
< 4; i
++) {
2809 if (io_mem_read
[memory
][i
]) {
2810 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2811 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2812 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2814 if (io_mem_write
[memory
][i
]) {
2815 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2816 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2817 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2825 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2826 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2831 mmio
= qemu_mallocz(sizeof(subpage_t
));
2834 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2835 #if defined(DEBUG_SUBPAGE)
2836 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2837 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2839 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2840 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2846 static int get_free_io_mem_idx(void)
2850 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2851 if (!io_mem_used
[i
]) {
2859 static void io_mem_init(void)
2863 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2864 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2865 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2869 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2870 watch_mem_write
, NULL
);
2871 /* alloc dirty bits array */
2872 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2873 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2876 /* mem_read and mem_write are arrays of functions containing the
2877 function to access byte (index 0), word (index 1) and dword (index
2878 2). Functions can be omitted with a NULL function pointer. The
2879 registered functions may be modified dynamically later.
2880 If io_index is non zero, the corresponding io zone is
2881 modified. If it is zero, a new io zone is allocated. The return
2882 value can be used with cpu_register_physical_memory(). (-1) is
2883 returned if error. */
2884 int cpu_register_io_memory(int io_index
,
2885 CPUReadMemoryFunc
**mem_read
,
2886 CPUWriteMemoryFunc
**mem_write
,
2889 int i
, subwidth
= 0;
2891 if (io_index
<= 0) {
2892 io_index
= get_free_io_mem_idx();
2896 if (io_index
>= IO_MEM_NB_ENTRIES
)
2900 for(i
= 0;i
< 3; i
++) {
2901 if (!mem_read
[i
] || !mem_write
[i
])
2902 subwidth
= IO_MEM_SUBWIDTH
;
2903 io_mem_read
[io_index
][i
] = mem_read
[i
];
2904 io_mem_write
[io_index
][i
] = mem_write
[i
];
2906 io_mem_opaque
[io_index
] = opaque
;
2907 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2910 void cpu_unregister_io_memory(int io_table_address
)
2913 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2915 for (i
=0;i
< 3; i
++) {
2916 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2917 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2919 io_mem_opaque
[io_index
] = NULL
;
2920 io_mem_used
[io_index
] = 0;
2923 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2925 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2928 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2930 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2933 #endif /* !defined(CONFIG_USER_ONLY) */
2935 /* physical memory access (slow version, mainly for debug) */
2936 #if defined(CONFIG_USER_ONLY)
2937 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2938 int len
, int is_write
)
2945 page
= addr
& TARGET_PAGE_MASK
;
2946 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2949 flags
= page_get_flags(page
);
2950 if (!(flags
& PAGE_VALID
))
2953 if (!(flags
& PAGE_WRITE
))
2955 /* XXX: this code should not depend on lock_user */
2956 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2957 /* FIXME - should this return an error rather than just fail? */
2960 unlock_user(p
, addr
, l
);
2962 if (!(flags
& PAGE_READ
))
2964 /* XXX: this code should not depend on lock_user */
2965 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2966 /* FIXME - should this return an error rather than just fail? */
2969 unlock_user(p
, addr
, 0);
2978 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2979 int len
, int is_write
)
2984 target_phys_addr_t page
;
2989 page
= addr
& TARGET_PAGE_MASK
;
2990 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2993 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2995 pd
= IO_MEM_UNASSIGNED
;
2997 pd
= p
->phys_offset
;
3001 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3002 target_phys_addr_t addr1
= addr
;
3003 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3005 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3006 /* XXX: could force cpu_single_env to NULL to avoid
3008 if (l
>= 4 && ((addr1
& 3) == 0)) {
3009 /* 32 bit write access */
3011 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3013 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3014 /* 16 bit write access */
3016 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3019 /* 8 bit write access */
3021 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3025 unsigned long addr1
;
3026 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3028 ptr
= phys_ram_base
+ addr1
;
3029 memcpy(ptr
, buf
, l
);
3030 if (!cpu_physical_memory_is_dirty(addr1
)) {
3031 /* invalidate code */
3032 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3034 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3035 (0xff & ~CODE_DIRTY_FLAG
);
3037 /* qemu doesn't execute guest code directly, but kvm does
3038 therefore fluch instruction caches */
3040 flush_icache_range((unsigned long)ptr
,
3041 ((unsigned long)ptr
)+l
);
3044 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3045 !(pd
& IO_MEM_ROMD
)) {
3046 target_phys_addr_t addr1
= addr
;
3048 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3050 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3051 if (l
>= 4 && ((addr1
& 3) == 0)) {
3052 /* 32 bit read access */
3053 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3056 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3057 /* 16 bit read access */
3058 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3062 /* 8 bit read access */
3063 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3069 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3070 (addr
& ~TARGET_PAGE_MASK
);
3071 memcpy(buf
, ptr
, l
);
3080 /* used for ROM loading : can write in RAM and ROM */
3081 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3082 const uint8_t *buf
, int len
)
3086 target_phys_addr_t page
;
3091 page
= addr
& TARGET_PAGE_MASK
;
3092 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3095 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3097 pd
= IO_MEM_UNASSIGNED
;
3099 pd
= p
->phys_offset
;
3102 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3103 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3104 !(pd
& IO_MEM_ROMD
)) {
3107 unsigned long addr1
;
3108 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3110 ptr
= phys_ram_base
+ addr1
;
3111 memcpy(ptr
, buf
, l
);
3121 target_phys_addr_t addr
;
3122 target_phys_addr_t len
;
3125 static BounceBuffer bounce
;
3127 typedef struct MapClient
{
3129 void (*callback
)(void *opaque
);
3130 LIST_ENTRY(MapClient
) link
;
3133 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3134 = LIST_HEAD_INITIALIZER(map_client_list
);
3136 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3138 MapClient
*client
= qemu_malloc(sizeof(*client
));
3140 client
->opaque
= opaque
;
3141 client
->callback
= callback
;
3142 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3146 void cpu_unregister_map_client(void *_client
)
3148 MapClient
*client
= (MapClient
*)_client
;
3150 LIST_REMOVE(client
, link
);
3153 static void cpu_notify_map_clients(void)
3157 while (!LIST_EMPTY(&map_client_list
)) {
3158 client
= LIST_FIRST(&map_client_list
);
3159 client
->callback(client
->opaque
);
3160 LIST_REMOVE(client
, link
);
3164 /* Map a physical memory region into a host virtual address.
3165 * May map a subset of the requested range, given by and returned in *plen.
3166 * May return NULL if resources needed to perform the mapping are exhausted.
3167 * Use only for reads OR writes - not for read-modify-write operations.
3168 * Use cpu_register_map_client() to know when retrying the map operation is
3169 * likely to succeed.
3171 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3172 target_phys_addr_t
*plen
,
3175 target_phys_addr_t len
= *plen
;
3176 target_phys_addr_t done
= 0;
3178 uint8_t *ret
= NULL
;
3180 target_phys_addr_t page
;
3183 unsigned long addr1
;
3186 page
= addr
& TARGET_PAGE_MASK
;
3187 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3190 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3192 pd
= IO_MEM_UNASSIGNED
;
3194 pd
= p
->phys_offset
;
3197 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3198 if (done
|| bounce
.buffer
) {
3201 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3205 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3207 ptr
= bounce
.buffer
;
3209 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3210 ptr
= phys_ram_base
+ addr1
;
3214 } else if (ret
+ done
!= ptr
) {
3226 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3227 * Will also mark the memory as dirty if is_write == 1. access_len gives
3228 * the amount of memory that was actually read or written by the caller.
3230 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3231 int is_write
, target_phys_addr_t access_len
)
3233 if (buffer
!= bounce
.buffer
) {
3235 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3236 while (access_len
) {
3238 l
= TARGET_PAGE_SIZE
;
3241 if (!cpu_physical_memory_is_dirty(addr1
)) {
3242 /* invalidate code */
3243 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3245 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3246 (0xff & ~CODE_DIRTY_FLAG
);
3255 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3257 qemu_free(bounce
.buffer
);
3258 bounce
.buffer
= NULL
;
3259 cpu_notify_map_clients();
3262 /* warning: addr must be aligned */
3263 uint32_t ldl_phys(target_phys_addr_t addr
)
3271 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3273 pd
= IO_MEM_UNASSIGNED
;
3275 pd
= p
->phys_offset
;
3278 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3279 !(pd
& IO_MEM_ROMD
)) {
3281 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3283 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3284 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3287 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3288 (addr
& ~TARGET_PAGE_MASK
);
3294 /* warning: addr must be aligned */
3295 uint64_t ldq_phys(target_phys_addr_t addr
)
3303 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3305 pd
= IO_MEM_UNASSIGNED
;
3307 pd
= p
->phys_offset
;
3310 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3311 !(pd
& IO_MEM_ROMD
)) {
3313 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3315 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3316 #ifdef TARGET_WORDS_BIGENDIAN
3317 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3318 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3320 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3321 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3325 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3326 (addr
& ~TARGET_PAGE_MASK
);
3333 uint32_t ldub_phys(target_phys_addr_t addr
)
3336 cpu_physical_memory_read(addr
, &val
, 1);
3341 uint32_t lduw_phys(target_phys_addr_t addr
)
3344 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3345 return tswap16(val
);
3349 #define likely(x) __builtin_expect(!!(x), 1)
3350 #define unlikely(x) __builtin_expect(!!(x), 0)
3353 #define unlikely(x) x
3356 /* warning: addr must be aligned. The ram page is not masked as dirty
3357 and the code inside is not invalidated. It is useful if the dirty
3358 bits are used to track modified PTEs */
3359 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3366 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3368 pd
= IO_MEM_UNASSIGNED
;
3370 pd
= p
->phys_offset
;
3373 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3374 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3376 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3377 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3379 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3380 ptr
= phys_ram_base
+ addr1
;
3383 if (unlikely(in_migration
)) {
3384 if (!cpu_physical_memory_is_dirty(addr1
)) {
3385 /* invalidate code */
3386 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3388 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3389 (0xff & ~CODE_DIRTY_FLAG
);
3395 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3402 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3404 pd
= IO_MEM_UNASSIGNED
;
3406 pd
= p
->phys_offset
;
3409 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3410 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3412 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3413 #ifdef TARGET_WORDS_BIGENDIAN
3414 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3415 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3417 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3418 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3421 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3422 (addr
& ~TARGET_PAGE_MASK
);
3427 /* warning: addr must be aligned */
3428 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3435 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3437 pd
= IO_MEM_UNASSIGNED
;
3439 pd
= p
->phys_offset
;
3442 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3443 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3445 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3446 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3448 unsigned long addr1
;
3449 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3451 ptr
= phys_ram_base
+ addr1
;
3453 if (!cpu_physical_memory_is_dirty(addr1
)) {
3454 /* invalidate code */
3455 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3457 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3458 (0xff & ~CODE_DIRTY_FLAG
);
3464 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3467 cpu_physical_memory_write(addr
, &v
, 1);
3471 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3473 uint16_t v
= tswap16(val
);
3474 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3478 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3481 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3486 /* virtual memory access for debug (includes writing to ROM) */
3487 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3488 uint8_t *buf
, int len
, int is_write
)
3491 target_phys_addr_t phys_addr
;
3495 page
= addr
& TARGET_PAGE_MASK
;
3496 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3497 /* if no physical page mapped, return an error */
3498 if (phys_addr
== -1)
3500 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3503 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3504 #if !defined(CONFIG_USER_ONLY)
3506 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3509 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3517 /* in deterministic execution mode, instructions doing device I/Os
3518 must be at the end of the TB */
3519 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3521 TranslationBlock
*tb
;
3523 target_ulong pc
, cs_base
;
3526 tb
= tb_find_pc((unsigned long)retaddr
);
3528 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3531 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3532 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3533 /* Calculate how many instructions had been executed before the fault
3535 n
= n
- env
->icount_decr
.u16
.low
;
3536 /* Generate a new TB ending on the I/O insn. */
3538 /* On MIPS and SH, delay slot instructions can only be restarted if
3539 they were already the first instruction in the TB. If this is not
3540 the first instruction in a TB then re-execute the preceding
3542 #if defined(TARGET_MIPS)
3543 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3544 env
->active_tc
.PC
-= 4;
3545 env
->icount_decr
.u16
.low
++;
3546 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3548 #elif defined(TARGET_SH4)
3549 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3552 env
->icount_decr
.u16
.low
++;
3553 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3556 /* This should never happen. */
3557 if (n
> CF_COUNT_MASK
)
3558 cpu_abort(env
, "TB too big during recompile");
3560 cflags
= n
| CF_LAST_IO
;
3562 cs_base
= tb
->cs_base
;
3564 tb_phys_invalidate(tb
, -1);
3565 /* FIXME: In theory this could raise an exception. In practice
3566 we have already translated the block once so it's probably ok. */
3567 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3568 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3569 the first in the TB) then we end up generating a whole new TB and
3570 repeating the fault, which is horribly inefficient.
3571 Better would be to execute just this insn uncached, or generate a
3573 cpu_resume_from_signal(env
, NULL
);
3576 void dump_exec_info(FILE *f
,
3577 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3579 int i
, target_code_size
, max_target_code_size
;
3580 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3581 TranslationBlock
*tb
;
3583 target_code_size
= 0;
3584 max_target_code_size
= 0;
3586 direct_jmp_count
= 0;
3587 direct_jmp2_count
= 0;
3588 for(i
= 0; i
< nb_tbs
; i
++) {
3590 target_code_size
+= tb
->size
;
3591 if (tb
->size
> max_target_code_size
)
3592 max_target_code_size
= tb
->size
;
3593 if (tb
->page_addr
[1] != -1)
3595 if (tb
->tb_next_offset
[0] != 0xffff) {
3597 if (tb
->tb_next_offset
[1] != 0xffff) {
3598 direct_jmp2_count
++;
3602 /* XXX: avoid using doubles ? */
3603 cpu_fprintf(f
, "Translation buffer state:\n");
3604 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3605 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3606 cpu_fprintf(f
, "TB count %d/%d\n",
3607 nb_tbs
, code_gen_max_blocks
);
3608 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3609 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3610 max_target_code_size
);
3611 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3612 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3613 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3614 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3616 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3617 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3619 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3621 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3622 cpu_fprintf(f
, "\nStatistics:\n");
3623 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3624 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3625 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3626 tcg_dump_info(f
, cpu_fprintf
);
3629 #if !defined(CONFIG_USER_ONLY)
3631 #define MMUSUFFIX _cmmu
3632 #define GETPC() NULL
3633 #define env cpu_single_env
3634 #define SOFTMMU_CODE_ACCESS
3637 #include "softmmu_template.h"
3640 #include "softmmu_template.h"
3643 #include "softmmu_template.h"
3646 #include "softmmu_template.h"