2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 #include <sys/types.h>
37 #include "qemu-common.h"
39 #if !defined(TARGET_IA64)
47 #if defined(CONFIG_USER_ONLY)
51 //#define DEBUG_TB_INVALIDATE
54 //#define DEBUG_UNASSIGNED
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation. */
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 #elif defined(TARGET_IA64)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
87 #define TARGET_PHYS_ADDR_SPACE_BITS 32
90 static TranslationBlock
*tbs
;
91 int code_gen_max_blocks
;
92 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
94 /* any access to the tbs or the page table must use this lock */
95 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
97 #if defined(__arm__) || defined(__sparc_v9__)
98 /* The prologue must be reachable with a direct jump. ARM and Sparc64
99 have limited branch ranges (possibly also PPC) so place it in a
100 section close to code segment. */
101 #define code_gen_section \
102 __attribute__((__section__(".gen_code"))) \
103 __attribute__((aligned (32)))
105 #define code_gen_section \
106 __attribute__((aligned (32)))
109 uint8_t code_gen_prologue
[1024] code_gen_section
;
110 static uint8_t *code_gen_buffer
;
111 static unsigned long code_gen_buffer_size
;
112 /* threshold to flush the translated code buffer */
113 static unsigned long code_gen_buffer_max_size
;
114 uint8_t *code_gen_ptr
;
116 #if !defined(CONFIG_USER_ONLY)
117 ram_addr_t phys_ram_size
;
119 uint8_t *phys_ram_base
;
120 uint8_t *phys_ram_dirty
;
122 static int in_migration
;
123 static ram_addr_t phys_ram_alloc_offset
= 0;
127 /* current CPU in the current thread. It is only valid inside
129 CPUState
*cpu_single_env
;
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
134 /* Current instruction counter. While executing translated code this may
135 include some instructions that have not yet been executed. */
138 typedef struct PageDesc
{
139 /* list of TBs intersecting this ram page */
140 TranslationBlock
*first_tb
;
141 /* in order to optimize self modifying code, we count the number
142 of lookups we do to a given page to use a bitmap */
143 unsigned int code_write_count
;
144 uint8_t *code_bitmap
;
145 #if defined(CONFIG_USER_ONLY)
150 typedef struct PhysPageDesc
{
151 /* offset in host memory of the page + io_index in the low bits */
152 ram_addr_t phys_offset
;
153 ram_addr_t region_offset
;
157 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
158 /* XXX: this is a temporary hack for alpha target.
159 * In the future, this is to be replaced by a multi-level table
160 * to actually be able to handle the complete 64 bits address space.
162 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
164 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167 #define L1_SIZE (1 << L1_BITS)
168 #define L2_SIZE (1 << L2_BITS)
170 unsigned long qemu_real_host_page_size
;
171 unsigned long qemu_host_page_bits
;
172 unsigned long qemu_host_page_size
;
173 unsigned long qemu_host_page_mask
;
175 /* XXX: for system emulation, it could just be an array */
176 static PageDesc
*l1_map
[L1_SIZE
];
177 static PhysPageDesc
**l1_phys_map
;
179 #if !defined(CONFIG_USER_ONLY)
180 static void io_mem_init(void);
182 /* io memory support */
183 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
184 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
185 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
186 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
187 static int io_mem_watch
;
191 static const char *logfilename
= "/tmp/qemu.log";
194 static int log_append
= 0;
197 static int tlb_flush_count
;
198 static int tb_flush_count
;
199 static int tb_phys_invalidate_count
;
201 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
202 typedef struct subpage_t
{
203 target_phys_addr_t base
;
204 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
205 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
206 void *opaque
[TARGET_PAGE_SIZE
][2][4];
207 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
211 static void map_exec(void *addr
, long size
)
214 VirtualProtect(addr
, size
,
215 PAGE_EXECUTE_READWRITE
, &old_protect
);
219 static void map_exec(void *addr
, long size
)
221 unsigned long start
, end
, page_size
;
223 page_size
= getpagesize();
224 start
= (unsigned long)addr
;
225 start
&= ~(page_size
- 1);
227 end
= (unsigned long)addr
+ size
;
228 end
+= page_size
- 1;
229 end
&= ~(page_size
- 1);
231 mprotect((void *)start
, end
- start
,
232 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
236 static void page_init(void)
238 /* NOTE: we can always suppose that qemu_host_page_size >=
242 SYSTEM_INFO system_info
;
244 GetSystemInfo(&system_info
);
245 qemu_real_host_page_size
= system_info
.dwPageSize
;
248 qemu_real_host_page_size
= getpagesize();
250 if (qemu_host_page_size
== 0)
251 qemu_host_page_size
= qemu_real_host_page_size
;
252 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
253 qemu_host_page_size
= TARGET_PAGE_SIZE
;
254 qemu_host_page_bits
= 0;
255 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
256 qemu_host_page_bits
++;
257 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
258 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
259 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
261 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
263 long long startaddr
, endaddr
;
268 last_brk
= (unsigned long)sbrk(0);
269 f
= fopen("/proc/self/maps", "r");
272 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
274 startaddr
= MIN(startaddr
,
275 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
276 endaddr
= MIN(endaddr
,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
278 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
279 TARGET_PAGE_ALIGN(endaddr
),
290 static inline PageDesc
**page_l1_map(target_ulong index
)
292 #if TARGET_LONG_BITS > 32
293 /* Host memory outside guest VM. For 32-bit targets we have already
294 excluded high addresses. */
295 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
298 return &l1_map
[index
>> L2_BITS
];
301 static inline PageDesc
*page_find_alloc(target_ulong index
)
304 lp
= page_l1_map(index
);
310 /* allocate if not found */
311 #if defined(CONFIG_USER_ONLY)
312 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
313 /* Don't use qemu_malloc because it may recurse. */
314 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
315 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
318 unsigned long addr
= h2g(p
);
319 page_set_flags(addr
& TARGET_PAGE_MASK
,
320 TARGET_PAGE_ALIGN(addr
+ len
),
324 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
328 return p
+ (index
& (L2_SIZE
- 1));
331 static inline PageDesc
*page_find(target_ulong index
)
334 lp
= page_l1_map(index
);
341 return p
+ (index
& (L2_SIZE
- 1));
344 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
349 p
= (void **)l1_phys_map
;
350 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
352 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
353 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
355 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
358 /* allocate if not found */
361 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
362 memset(p
, 0, sizeof(void *) * L1_SIZE
);
366 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
370 /* allocate if not found */
373 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
375 for (i
= 0; i
< L2_SIZE
; i
++) {
376 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
377 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
380 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
383 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
385 return phys_page_find_alloc(index
, 0);
388 #if !defined(CONFIG_USER_ONLY)
389 static void tlb_protect_code(ram_addr_t ram_addr
);
390 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
392 #define mmap_lock() do { } while(0)
393 #define mmap_unlock() do { } while(0)
396 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
398 #if defined(CONFIG_USER_ONLY)
399 /* Currently it is not recommanded to allocate big chunks of data in
400 user mode. It will change when a dedicated libc will be used */
401 #define USE_STATIC_CODE_GEN_BUFFER
404 #ifdef USE_STATIC_CODE_GEN_BUFFER
405 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
408 static void code_gen_alloc(unsigned long tb_size
)
413 #ifdef USE_STATIC_CODE_GEN_BUFFER
414 code_gen_buffer
= static_code_gen_buffer
;
415 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
416 map_exec(code_gen_buffer
, code_gen_buffer_size
);
418 code_gen_buffer_size
= tb_size
;
419 if (code_gen_buffer_size
== 0) {
420 #if defined(CONFIG_USER_ONLY)
421 /* in user mode, phys_ram_size is not meaningful */
422 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
424 /* XXX: needs ajustments */
425 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
428 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
429 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
430 /* The code gen buffer location may have constraints depending on
431 the host cpu and OS */
432 #if defined(__linux__)
437 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
438 #if defined(__x86_64__)
440 /* Cannot map more than that */
441 if (code_gen_buffer_size
> (800 * 1024 * 1024))
442 code_gen_buffer_size
= (800 * 1024 * 1024);
443 #elif defined(__sparc_v9__)
444 // Map the buffer below 2G, so we can use direct calls and branches
446 start
= (void *) 0x60000000UL
;
447 if (code_gen_buffer_size
> (512 * 1024 * 1024))
448 code_gen_buffer_size
= (512 * 1024 * 1024);
449 #elif defined(__arm__)
450 /* Map the buffer below 32M, so we can use direct calls and branches */
452 start
= (void *) 0x01000000UL
;
453 if (code_gen_buffer_size
> 16 * 1024 * 1024)
454 code_gen_buffer_size
= 16 * 1024 * 1024;
456 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
457 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
459 if (code_gen_buffer
== MAP_FAILED
) {
460 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
464 #elif defined(__FreeBSD__) || defined(__DragonFly__)
468 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
469 #if defined(__x86_64__)
470 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
471 * 0x40000000 is free */
473 addr
= (void *)0x40000000;
474 /* Cannot map more than that */
475 if (code_gen_buffer_size
> (800 * 1024 * 1024))
476 code_gen_buffer_size
= (800 * 1024 * 1024);
478 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
479 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
481 if (code_gen_buffer
== MAP_FAILED
) {
482 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
487 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
488 map_exec(code_gen_buffer
, code_gen_buffer_size
);
490 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
491 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
492 code_gen_buffer_max_size
= code_gen_buffer_size
-
493 code_gen_max_block_size();
494 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
495 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
498 /* Must be called before using the QEMU cpus. 'tb_size' is the size
499 (in bytes) allocated to the translation buffer. Zero means default
501 void cpu_exec_init_all(unsigned long tb_size
)
504 code_gen_alloc(tb_size
);
505 code_gen_ptr
= code_gen_buffer
;
507 #if !defined(CONFIG_USER_ONLY)
512 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
514 #define CPU_COMMON_SAVE_VERSION 1
516 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
518 CPUState
*env
= opaque
;
520 qemu_put_be32s(f
, &env
->halted
);
521 qemu_put_be32s(f
, &env
->interrupt_request
);
524 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
526 CPUState
*env
= opaque
;
528 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
531 qemu_get_be32s(f
, &env
->halted
);
532 qemu_get_be32s(f
, &env
->interrupt_request
);
533 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
534 version_id is increased. */
535 env
->interrupt_request
&= ~0x01;
542 void cpu_exec_init(CPUState
*env
)
547 #if defined(CONFIG_USER_ONLY)
550 env
->next_cpu
= NULL
;
553 while (*penv
!= NULL
) {
554 penv
= (CPUState
**)&(*penv
)->next_cpu
;
557 env
->cpu_index
= cpu_index
;
558 TAILQ_INIT(&env
->breakpoints
);
559 TAILQ_INIT(&env
->watchpoints
);
561 env
->thread_id
= GetCurrentProcessId();
563 env
->thread_id
= getpid();
566 #if defined(CONFIG_USER_ONLY)
569 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
570 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
571 cpu_common_save
, cpu_common_load
, env
);
572 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
573 cpu_save
, cpu_load
, env
);
577 static inline void invalidate_page_bitmap(PageDesc
*p
)
579 if (p
->code_bitmap
) {
580 qemu_free(p
->code_bitmap
);
581 p
->code_bitmap
= NULL
;
583 p
->code_write_count
= 0;
586 /* set to NULL all the 'first_tb' fields in all PageDescs */
587 static void page_flush_tb(void)
592 for(i
= 0; i
< L1_SIZE
; i
++) {
595 for(j
= 0; j
< L2_SIZE
; j
++) {
597 invalidate_page_bitmap(p
);
604 /* flush all the translation blocks */
605 /* XXX: tb_flush is currently not thread safe */
606 void tb_flush(CPUState
*env1
)
609 #if defined(DEBUG_FLUSH)
610 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
611 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
613 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
615 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
616 cpu_abort(env1
, "Internal error: code buffer overflow\n");
620 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
621 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
624 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
627 code_gen_ptr
= code_gen_buffer
;
628 /* XXX: flush processor icache at this point if cache flush is
633 #ifdef DEBUG_TB_CHECK
635 static void tb_invalidate_check(target_ulong address
)
637 TranslationBlock
*tb
;
639 address
&= TARGET_PAGE_MASK
;
640 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
641 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
642 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
643 address
>= tb
->pc
+ tb
->size
)) {
644 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
645 address
, (long)tb
->pc
, tb
->size
);
651 /* verify that all the pages have correct rights for code */
652 static void tb_page_check(void)
654 TranslationBlock
*tb
;
655 int i
, flags1
, flags2
;
657 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
658 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
659 flags1
= page_get_flags(tb
->pc
);
660 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
661 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
662 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
663 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
669 static void tb_jmp_check(TranslationBlock
*tb
)
671 TranslationBlock
*tb1
;
674 /* suppress any remaining jumps to this TB */
678 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
681 tb1
= tb1
->jmp_next
[n1
];
683 /* check end of list */
685 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
691 /* invalidate one TB */
692 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
695 TranslationBlock
*tb1
;
699 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
702 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
706 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
708 TranslationBlock
*tb1
;
714 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
716 *ptb
= tb1
->page_next
[n1
];
719 ptb
= &tb1
->page_next
[n1
];
723 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
725 TranslationBlock
*tb1
, **ptb
;
728 ptb
= &tb
->jmp_next
[n
];
731 /* find tb(n) in circular list */
735 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
736 if (n1
== n
&& tb1
== tb
)
739 ptb
= &tb1
->jmp_first
;
741 ptb
= &tb1
->jmp_next
[n1
];
744 /* now we can suppress tb(n) from the list */
745 *ptb
= tb
->jmp_next
[n
];
747 tb
->jmp_next
[n
] = NULL
;
751 /* reset the jump entry 'n' of a TB so that it is not chained to
753 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
755 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
758 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
763 target_phys_addr_t phys_pc
;
764 TranslationBlock
*tb1
, *tb2
;
766 /* remove the TB from the hash list */
767 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
768 h
= tb_phys_hash_func(phys_pc
);
769 tb_remove(&tb_phys_hash
[h
], tb
,
770 offsetof(TranslationBlock
, phys_hash_next
));
772 /* remove the TB from the page list */
773 if (tb
->page_addr
[0] != page_addr
) {
774 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
775 tb_page_remove(&p
->first_tb
, tb
);
776 invalidate_page_bitmap(p
);
778 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
779 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
780 tb_page_remove(&p
->first_tb
, tb
);
781 invalidate_page_bitmap(p
);
784 tb_invalidated_flag
= 1;
786 /* remove the TB from the hash list */
787 h
= tb_jmp_cache_hash_func(tb
->pc
);
788 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
789 if (env
->tb_jmp_cache
[h
] == tb
)
790 env
->tb_jmp_cache
[h
] = NULL
;
793 /* suppress this TB from the two jump lists */
794 tb_jmp_remove(tb
, 0);
795 tb_jmp_remove(tb
, 1);
797 /* suppress any remaining jumps to this TB */
803 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
804 tb2
= tb1
->jmp_next
[n1
];
805 tb_reset_jump(tb1
, n1
);
806 tb1
->jmp_next
[n1
] = NULL
;
809 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
811 tb_phys_invalidate_count
++;
814 static inline void set_bits(uint8_t *tab
, int start
, int len
)
820 mask
= 0xff << (start
& 7);
821 if ((start
& ~7) == (end
& ~7)) {
823 mask
&= ~(0xff << (end
& 7));
828 start
= (start
+ 8) & ~7;
830 while (start
< end1
) {
835 mask
= ~(0xff << (end
& 7));
841 static void build_page_bitmap(PageDesc
*p
)
843 int n
, tb_start
, tb_end
;
844 TranslationBlock
*tb
;
846 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
851 tb
= (TranslationBlock
*)((long)tb
& ~3);
852 /* NOTE: this is subtle as a TB may span two physical pages */
854 /* NOTE: tb_end may be after the end of the page, but
855 it is not a problem */
856 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
857 tb_end
= tb_start
+ tb
->size
;
858 if (tb_end
> TARGET_PAGE_SIZE
)
859 tb_end
= TARGET_PAGE_SIZE
;
862 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
864 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
865 tb
= tb
->page_next
[n
];
869 TranslationBlock
*tb_gen_code(CPUState
*env
,
870 target_ulong pc
, target_ulong cs_base
,
871 int flags
, int cflags
)
873 TranslationBlock
*tb
;
875 target_ulong phys_pc
, phys_page2
, virt_page2
;
878 phys_pc
= get_phys_addr_code(env
, pc
);
881 /* flush must be done */
883 /* cannot fail at this point */
885 /* Don't forget to invalidate previous TB info. */
886 tb_invalidated_flag
= 1;
888 tc_ptr
= code_gen_ptr
;
890 tb
->cs_base
= cs_base
;
893 cpu_gen_code(env
, tb
, &code_gen_size
);
894 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
896 /* check next page if needed */
897 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
899 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
900 phys_page2
= get_phys_addr_code(env
, virt_page2
);
902 tb_link_phys(tb
, phys_pc
, phys_page2
);
906 /* invalidate all TBs which intersect with the target physical page
907 starting in range [start;end[. NOTE: start and end must refer to
908 the same physical page. 'is_cpu_write_access' should be true if called
909 from a real cpu write access: the virtual CPU will exit the current
910 TB if code is modified inside this TB. */
911 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
912 int is_cpu_write_access
)
914 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
915 CPUState
*env
= cpu_single_env
;
916 target_ulong tb_start
, tb_end
;
919 #ifdef TARGET_HAS_PRECISE_SMC
920 int current_tb_not_found
= is_cpu_write_access
;
921 TranslationBlock
*current_tb
= NULL
;
922 int current_tb_modified
= 0;
923 target_ulong current_pc
= 0;
924 target_ulong current_cs_base
= 0;
925 int current_flags
= 0;
926 #endif /* TARGET_HAS_PRECISE_SMC */
928 p
= page_find(start
>> TARGET_PAGE_BITS
);
931 if (!p
->code_bitmap
&&
932 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
933 is_cpu_write_access
) {
934 /* build code bitmap */
935 build_page_bitmap(p
);
938 /* we remove all the TBs in the range [start, end[ */
939 /* XXX: see if in some cases it could be faster to invalidate all the code */
943 tb
= (TranslationBlock
*)((long)tb
& ~3);
944 tb_next
= tb
->page_next
[n
];
945 /* NOTE: this is subtle as a TB may span two physical pages */
947 /* NOTE: tb_end may be after the end of the page, but
948 it is not a problem */
949 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
950 tb_end
= tb_start
+ tb
->size
;
952 tb_start
= tb
->page_addr
[1];
953 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
955 if (!(tb_end
<= start
|| tb_start
>= end
)) {
956 #ifdef TARGET_HAS_PRECISE_SMC
957 if (current_tb_not_found
) {
958 current_tb_not_found
= 0;
960 if (env
->mem_io_pc
) {
961 /* now we have a real cpu fault */
962 current_tb
= tb_find_pc(env
->mem_io_pc
);
965 if (current_tb
== tb
&&
966 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
967 /* If we are modifying the current TB, we must stop
968 its execution. We could be more precise by checking
969 that the modification is after the current PC, but it
970 would require a specialized function to partially
971 restore the CPU state */
973 current_tb_modified
= 1;
974 cpu_restore_state(current_tb
, env
,
975 env
->mem_io_pc
, NULL
);
976 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
979 #endif /* TARGET_HAS_PRECISE_SMC */
980 /* we need to do that to handle the case where a signal
981 occurs while doing tb_phys_invalidate() */
984 saved_tb
= env
->current_tb
;
985 env
->current_tb
= NULL
;
987 tb_phys_invalidate(tb
, -1);
989 env
->current_tb
= saved_tb
;
990 if (env
->interrupt_request
&& env
->current_tb
)
991 cpu_interrupt(env
, env
->interrupt_request
);
996 #if !defined(CONFIG_USER_ONLY)
997 /* if no code remaining, no need to continue to use slow writes */
999 invalidate_page_bitmap(p
);
1000 if (is_cpu_write_access
) {
1001 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1005 #ifdef TARGET_HAS_PRECISE_SMC
1006 if (current_tb_modified
) {
1007 /* we generate a block containing just the instruction
1008 modifying the memory. It will ensure that it cannot modify
1010 env
->current_tb
= NULL
;
1011 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1012 cpu_resume_from_signal(env
, NULL
);
1017 /* len must be <= 8 and start must be a multiple of len */
1018 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1024 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1025 cpu_single_env
->mem_io_vaddr
, len
,
1026 cpu_single_env
->eip
,
1027 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1030 p
= page_find(start
>> TARGET_PAGE_BITS
);
1033 if (p
->code_bitmap
) {
1034 offset
= start
& ~TARGET_PAGE_MASK
;
1035 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1036 if (b
& ((1 << len
) - 1))
1040 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1044 #if !defined(CONFIG_SOFTMMU)
1045 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1046 unsigned long pc
, void *puc
)
1048 TranslationBlock
*tb
;
1051 #ifdef TARGET_HAS_PRECISE_SMC
1052 TranslationBlock
*current_tb
= NULL
;
1053 CPUState
*env
= cpu_single_env
;
1054 int current_tb_modified
= 0;
1055 target_ulong current_pc
= 0;
1056 target_ulong current_cs_base
= 0;
1057 int current_flags
= 0;
1060 addr
&= TARGET_PAGE_MASK
;
1061 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 if (tb
&& pc
!= 0) {
1067 current_tb
= tb_find_pc(pc
);
1070 while (tb
!= NULL
) {
1072 tb
= (TranslationBlock
*)((long)tb
& ~3);
1073 #ifdef TARGET_HAS_PRECISE_SMC
1074 if (current_tb
== tb
&&
1075 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1076 /* If we are modifying the current TB, we must stop
1077 its execution. We could be more precise by checking
1078 that the modification is after the current PC, but it
1079 would require a specialized function to partially
1080 restore the CPU state */
1082 current_tb_modified
= 1;
1083 cpu_restore_state(current_tb
, env
, pc
, puc
);
1084 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1087 #endif /* TARGET_HAS_PRECISE_SMC */
1088 tb_phys_invalidate(tb
, addr
);
1089 tb
= tb
->page_next
[n
];
1092 #ifdef TARGET_HAS_PRECISE_SMC
1093 if (current_tb_modified
) {
1094 /* we generate a block containing just the instruction
1095 modifying the memory. It will ensure that it cannot modify
1097 env
->current_tb
= NULL
;
1098 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1099 cpu_resume_from_signal(env
, puc
);
1105 /* add the tb in the target page and protect it if necessary */
1106 static inline void tb_alloc_page(TranslationBlock
*tb
,
1107 unsigned int n
, target_ulong page_addr
)
1110 TranslationBlock
*last_first_tb
;
1112 tb
->page_addr
[n
] = page_addr
;
1113 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1114 tb
->page_next
[n
] = p
->first_tb
;
1115 last_first_tb
= p
->first_tb
;
1116 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1117 invalidate_page_bitmap(p
);
1119 #if defined(TARGET_HAS_SMC) || 1
1121 #if defined(CONFIG_USER_ONLY)
1122 if (p
->flags
& PAGE_WRITE
) {
1127 /* force the host page as non writable (writes will have a
1128 page fault + mprotect overhead) */
1129 page_addr
&= qemu_host_page_mask
;
1131 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1132 addr
+= TARGET_PAGE_SIZE
) {
1134 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1138 p2
->flags
&= ~PAGE_WRITE
;
1139 page_get_flags(addr
);
1141 mprotect(g2h(page_addr
), qemu_host_page_size
,
1142 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1143 #ifdef DEBUG_TB_INVALIDATE
1144 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1149 /* if some code is already present, then the pages are already
1150 protected. So we handle the case where only the first TB is
1151 allocated in a physical page */
1152 if (!last_first_tb
) {
1153 tlb_protect_code(page_addr
);
1157 #endif /* TARGET_HAS_SMC */
1160 /* Allocate a new translation block. Flush the translation buffer if
1161 too many translation blocks or too much generated code. */
1162 TranslationBlock
*tb_alloc(target_ulong pc
)
1164 TranslationBlock
*tb
;
1166 if (nb_tbs
>= code_gen_max_blocks
||
1167 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1169 tb
= &tbs
[nb_tbs
++];
1175 void tb_free(TranslationBlock
*tb
)
1177 /* In practice this is mostly used for single use temporary TB
1178 Ignore the hard cases and just back up if this TB happens to
1179 be the last one generated. */
1180 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1181 code_gen_ptr
= tb
->tc_ptr
;
1186 /* add a new TB and link it to the physical page tables. phys_page2 is
1187 (-1) to indicate that only one page contains the TB. */
1188 void tb_link_phys(TranslationBlock
*tb
,
1189 target_ulong phys_pc
, target_ulong phys_page2
)
1192 TranslationBlock
**ptb
;
1194 /* Grab the mmap lock to stop another thread invalidating this TB
1195 before we are done. */
1197 /* add in the physical hash table */
1198 h
= tb_phys_hash_func(phys_pc
);
1199 ptb
= &tb_phys_hash
[h
];
1200 tb
->phys_hash_next
= *ptb
;
1203 /* add in the page list */
1204 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1205 if (phys_page2
!= -1)
1206 tb_alloc_page(tb
, 1, phys_page2
);
1208 tb
->page_addr
[1] = -1;
1210 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1211 tb
->jmp_next
[0] = NULL
;
1212 tb
->jmp_next
[1] = NULL
;
1214 /* init original jump addresses */
1215 if (tb
->tb_next_offset
[0] != 0xffff)
1216 tb_reset_jump(tb
, 0);
1217 if (tb
->tb_next_offset
[1] != 0xffff)
1218 tb_reset_jump(tb
, 1);
1220 #ifdef DEBUG_TB_CHECK
1226 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1227 tb[1].tc_ptr. Return NULL if not found */
1228 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1230 int m_min
, m_max
, m
;
1232 TranslationBlock
*tb
;
1236 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1237 tc_ptr
>= (unsigned long)code_gen_ptr
)
1239 /* binary search (cf Knuth) */
1242 while (m_min
<= m_max
) {
1243 m
= (m_min
+ m_max
) >> 1;
1245 v
= (unsigned long)tb
->tc_ptr
;
1248 else if (tc_ptr
< v
) {
1257 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1259 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1261 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1264 tb1
= tb
->jmp_next
[n
];
1266 /* find head of list */
1269 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1272 tb1
= tb1
->jmp_next
[n1
];
1274 /* we are now sure now that tb jumps to tb1 */
1277 /* remove tb from the jmp_first list */
1278 ptb
= &tb_next
->jmp_first
;
1282 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1283 if (n1
== n
&& tb1
== tb
)
1285 ptb
= &tb1
->jmp_next
[n1
];
1287 *ptb
= tb
->jmp_next
[n
];
1288 tb
->jmp_next
[n
] = NULL
;
1290 /* suppress the jump to next tb in generated code */
1291 tb_reset_jump(tb
, n
);
1293 /* suppress jumps in the tb on which we could have jumped */
1294 tb_reset_jump_recursive(tb_next
);
1298 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1300 tb_reset_jump_recursive2(tb
, 0);
1301 tb_reset_jump_recursive2(tb
, 1);
1304 #if defined(TARGET_HAS_ICE)
1305 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1307 target_phys_addr_t addr
;
1309 ram_addr_t ram_addr
;
1312 addr
= cpu_get_phys_page_debug(env
, pc
);
1313 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1315 pd
= IO_MEM_UNASSIGNED
;
1317 pd
= p
->phys_offset
;
1319 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1320 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1324 /* Add a watchpoint. */
1325 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1326 int flags
, CPUWatchpoint
**watchpoint
)
1328 target_ulong len_mask
= ~(len
- 1);
1331 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1332 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1333 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1334 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1337 wp
= qemu_malloc(sizeof(*wp
));
1340 wp
->len_mask
= len_mask
;
1343 /* keep all GDB-injected watchpoints in front */
1345 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1347 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1349 tlb_flush_page(env
, addr
);
1356 /* Remove a specific watchpoint. */
1357 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1360 target_ulong len_mask
= ~(len
- 1);
1363 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1364 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1365 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1366 cpu_watchpoint_remove_by_ref(env
, wp
);
1373 /* Remove a specific watchpoint by reference. */
1374 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1376 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1378 tlb_flush_page(env
, watchpoint
->vaddr
);
1380 qemu_free(watchpoint
);
1383 /* Remove all matching watchpoints. */
1384 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1386 CPUWatchpoint
*wp
, *next
;
1388 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1389 if (wp
->flags
& mask
)
1390 cpu_watchpoint_remove_by_ref(env
, wp
);
1394 /* Add a breakpoint. */
1395 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1396 CPUBreakpoint
**breakpoint
)
1398 #if defined(TARGET_HAS_ICE)
1401 bp
= qemu_malloc(sizeof(*bp
));
1406 /* keep all GDB-injected breakpoints in front */
1408 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1410 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1412 breakpoint_invalidate(env
, pc
);
1422 /* Remove a specific breakpoint. */
1423 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1425 #if defined(TARGET_HAS_ICE)
1428 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1429 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1430 cpu_breakpoint_remove_by_ref(env
, bp
);
1440 /* Remove a specific breakpoint by reference. */
1441 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1443 #if defined(TARGET_HAS_ICE)
1444 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1446 breakpoint_invalidate(env
, breakpoint
->pc
);
1448 qemu_free(breakpoint
);
1452 /* Remove all matching breakpoints. */
1453 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1455 #if defined(TARGET_HAS_ICE)
1456 CPUBreakpoint
*bp
, *next
;
1458 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1459 if (bp
->flags
& mask
)
1460 cpu_breakpoint_remove_by_ref(env
, bp
);
1465 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1466 CPU loop after each instruction */
1467 void cpu_single_step(CPUState
*env
, int enabled
)
1469 #if defined(TARGET_HAS_ICE)
1470 if (env
->singlestep_enabled
!= enabled
) {
1471 env
->singlestep_enabled
= enabled
;
1473 kvm_update_guest_debug(env
, 0);
1475 /* must flush all the translated code to avoid inconsistancies */
1476 /* XXX: only flush what is necessary */
1483 /* enable or disable low levels log */
1484 void cpu_set_log(int log_flags
)
1486 loglevel
= log_flags
;
1487 if (loglevel
&& !logfile
) {
1488 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1490 perror(logfilename
);
1493 #if !defined(CONFIG_SOFTMMU)
1494 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1496 static char logfile_buf
[4096];
1497 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1500 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1504 if (!loglevel
&& logfile
) {
1510 void cpu_set_log_filename(const char *filename
)
1512 logfilename
= strdup(filename
);
1517 cpu_set_log(loglevel
);
1520 static void cpu_unlink_tb(CPUState
*env
)
1522 #if defined(USE_NPTL)
1523 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1524 problem and hope the cpu will stop of its own accord. For userspace
1525 emulation this often isn't actually as bad as it sounds. Often
1526 signals are used primarily to interrupt blocking syscalls. */
1528 TranslationBlock
*tb
;
1529 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1531 tb
= env
->current_tb
;
1532 /* if the cpu is currently executing code, we must unlink it and
1533 all the potentially executing TB */
1534 if (tb
&& !testandset(&interrupt_lock
)) {
1535 env
->current_tb
= NULL
;
1536 tb_reset_jump_recursive(tb
);
1537 resetlock(&interrupt_lock
);
1542 /* mask must never be zero, except for A20 change call */
1543 void cpu_interrupt(CPUState
*env
, int mask
)
1547 old_mask
= env
->interrupt_request
;
1548 env
->interrupt_request
|= mask
;
1549 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1550 kvm_update_interrupt_request(env
);
1553 env
->icount_decr
.u16
.high
= 0xffff;
1554 #ifndef CONFIG_USER_ONLY
1556 && (mask
& ~old_mask
) != 0) {
1557 cpu_abort(env
, "Raised interrupt while not in I/O function");
1565 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1567 env
->interrupt_request
&= ~mask
;
1570 void cpu_exit(CPUState
*env
)
1572 env
->exit_request
= 1;
1576 const CPULogItem cpu_log_items
[] = {
1577 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1578 "show generated host assembly code for each compiled TB" },
1579 { CPU_LOG_TB_IN_ASM
, "in_asm",
1580 "show target assembly code for each compiled TB" },
1581 { CPU_LOG_TB_OP
, "op",
1582 "show micro ops for each compiled TB" },
1583 { CPU_LOG_TB_OP_OPT
, "op_opt",
1586 "before eflags optimization and "
1588 "after liveness analysis" },
1589 { CPU_LOG_INT
, "int",
1590 "show interrupts/exceptions in short format" },
1591 { CPU_LOG_EXEC
, "exec",
1592 "show trace before each executed TB (lots of logs)" },
1593 { CPU_LOG_TB_CPU
, "cpu",
1594 "show CPU state before block translation" },
1596 { CPU_LOG_PCALL
, "pcall",
1597 "show protected mode far calls/returns/exceptions" },
1598 { CPU_LOG_RESET
, "cpu_reset",
1599 "show CPU state before CPU resets" },
1602 { CPU_LOG_IOPORT
, "ioport",
1603 "show all i/o ports accesses" },
1608 static int cmp1(const char *s1
, int n
, const char *s2
)
1610 if (strlen(s2
) != n
)
1612 return memcmp(s1
, s2
, n
) == 0;
1615 /* takes a comma separated list of log masks. Return 0 if error. */
1616 int cpu_str_to_log_mask(const char *str
)
1618 const CPULogItem
*item
;
1625 p1
= strchr(p
, ',');
1628 if(cmp1(p
,p1
-p
,"all")) {
1629 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1633 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1634 if (cmp1(p
, p1
- p
, item
->name
))
1648 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1655 fprintf(stderr
, "qemu: fatal: ");
1656 vfprintf(stderr
, fmt
, ap
);
1657 fprintf(stderr
, "\n");
1659 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1661 cpu_dump_state(env
, stderr
, fprintf
, 0);
1663 if (qemu_log_enabled()) {
1664 qemu_log("qemu: fatal: ");
1665 qemu_log_vprintf(fmt
, ap2
);
1668 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1670 log_cpu_state(env
, 0);
1680 CPUState
*cpu_copy(CPUState
*env
)
1682 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1683 CPUState
*next_cpu
= new_env
->next_cpu
;
1684 int cpu_index
= new_env
->cpu_index
;
1685 #if defined(TARGET_HAS_ICE)
1690 memcpy(new_env
, env
, sizeof(CPUState
));
1692 /* Preserve chaining and index. */
1693 new_env
->next_cpu
= next_cpu
;
1694 new_env
->cpu_index
= cpu_index
;
1696 /* Clone all break/watchpoints.
1697 Note: Once we support ptrace with hw-debug register access, make sure
1698 BP_CPU break/watchpoints are handled correctly on clone. */
1699 TAILQ_INIT(&env
->breakpoints
);
1700 TAILQ_INIT(&env
->watchpoints
);
1701 #if defined(TARGET_HAS_ICE)
1702 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1703 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1705 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1706 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1714 #if !defined(CONFIG_USER_ONLY)
1716 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1720 /* Discard jump cache entries for any tb which might potentially
1721 overlap the flushed page. */
1722 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1723 memset (&env
->tb_jmp_cache
[i
], 0,
1724 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1726 i
= tb_jmp_cache_hash_page(addr
);
1727 memset (&env
->tb_jmp_cache
[i
], 0,
1728 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1731 /* NOTE: if flush_global is true, also flush global entries (not
1733 void tlb_flush(CPUState
*env
, int flush_global
)
1737 #if defined(DEBUG_TLB)
1738 printf("tlb_flush:\n");
1740 /* must reset current TB so that interrupts cannot modify the
1741 links while we are modifying them */
1742 env
->current_tb
= NULL
;
1744 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1745 env
->tlb_table
[0][i
].addr_read
= -1;
1746 env
->tlb_table
[0][i
].addr_write
= -1;
1747 env
->tlb_table
[0][i
].addr_code
= -1;
1748 env
->tlb_table
[1][i
].addr_read
= -1;
1749 env
->tlb_table
[1][i
].addr_write
= -1;
1750 env
->tlb_table
[1][i
].addr_code
= -1;
1751 #if (NB_MMU_MODES >= 3)
1752 env
->tlb_table
[2][i
].addr_read
= -1;
1753 env
->tlb_table
[2][i
].addr_write
= -1;
1754 env
->tlb_table
[2][i
].addr_code
= -1;
1755 #if (NB_MMU_MODES == 4)
1756 env
->tlb_table
[3][i
].addr_read
= -1;
1757 env
->tlb_table
[3][i
].addr_write
= -1;
1758 env
->tlb_table
[3][i
].addr_code
= -1;
1763 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1766 if (env
->kqemu_enabled
) {
1767 kqemu_flush(env
, flush_global
);
1773 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1775 if (addr
== (tlb_entry
->addr_read
&
1776 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1777 addr
== (tlb_entry
->addr_write
&
1778 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1779 addr
== (tlb_entry
->addr_code
&
1780 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1781 tlb_entry
->addr_read
= -1;
1782 tlb_entry
->addr_write
= -1;
1783 tlb_entry
->addr_code
= -1;
1787 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1791 #if defined(DEBUG_TLB)
1792 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1794 /* must reset current TB so that interrupts cannot modify the
1795 links while we are modifying them */
1796 env
->current_tb
= NULL
;
1798 addr
&= TARGET_PAGE_MASK
;
1799 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1800 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1801 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1802 #if (NB_MMU_MODES >= 3)
1803 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1804 #if (NB_MMU_MODES == 4)
1805 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1809 tlb_flush_jmp_cache(env
, addr
);
1812 if (env
->kqemu_enabled
) {
1813 kqemu_flush_page(env
, addr
);
1818 /* update the TLBs so that writes to code in the virtual page 'addr'
1820 static void tlb_protect_code(ram_addr_t ram_addr
)
1822 cpu_physical_memory_reset_dirty(ram_addr
,
1823 ram_addr
+ TARGET_PAGE_SIZE
,
1827 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1828 tested for self modifying code */
1829 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1832 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1835 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1836 unsigned long start
, unsigned long length
)
1839 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1840 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1841 if ((addr
- start
) < length
) {
1842 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1847 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1851 unsigned long length
, start1
;
1855 start
&= TARGET_PAGE_MASK
;
1856 end
= TARGET_PAGE_ALIGN(end
);
1858 length
= end
- start
;
1861 len
= length
>> TARGET_PAGE_BITS
;
1863 /* XXX: should not depend on cpu context */
1865 if (env
->kqemu_enabled
) {
1868 for(i
= 0; i
< len
; i
++) {
1869 kqemu_set_notdirty(env
, addr
);
1870 addr
+= TARGET_PAGE_SIZE
;
1874 mask
= ~dirty_flags
;
1875 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1876 for(i
= 0; i
< len
; i
++)
1879 /* we modify the TLB cache so that the dirty bit will be set again
1880 when accessing the range */
1881 start1
= start
+ (unsigned long)phys_ram_base
;
1882 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1883 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1884 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1885 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1886 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1887 #if (NB_MMU_MODES >= 3)
1888 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1889 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1890 #if (NB_MMU_MODES == 4)
1891 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1892 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1898 int cpu_physical_memory_set_dirty_tracking(int enable
)
1903 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1904 in_migration
= enable
;
1908 int cpu_physical_memory_get_dirty_tracking(void)
1910 return in_migration
;
1913 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1916 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1919 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1921 ram_addr_t ram_addr
;
1923 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1924 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1925 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1926 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1927 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1932 /* update the TLB according to the current state of the dirty bits */
1933 void cpu_tlb_update_dirty(CPUState
*env
)
1936 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1937 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1938 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1939 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1940 #if (NB_MMU_MODES >= 3)
1941 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1942 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1943 #if (NB_MMU_MODES == 4)
1944 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1945 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1950 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1952 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1953 tlb_entry
->addr_write
= vaddr
;
1956 /* update the TLB corresponding to virtual page vaddr
1957 so that it is no longer dirty */
1958 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1962 vaddr
&= TARGET_PAGE_MASK
;
1963 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1964 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1965 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1966 #if (NB_MMU_MODES >= 3)
1967 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1968 #if (NB_MMU_MODES == 4)
1969 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1974 /* add a new TLB entry. At most one entry for a given virtual address
1975 is permitted. Return 0 if OK or 2 if the page could not be mapped
1976 (can only happen in non SOFTMMU mode for I/O pages or pages
1977 conflicting with the host address space). */
1978 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1979 target_phys_addr_t paddr
, int prot
,
1980 int mmu_idx
, int is_softmmu
)
1985 target_ulong address
;
1986 target_ulong code_address
;
1987 target_phys_addr_t addend
;
1991 target_phys_addr_t iotlb
;
1993 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1995 pd
= IO_MEM_UNASSIGNED
;
1997 pd
= p
->phys_offset
;
1999 #if defined(DEBUG_TLB)
2000 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2001 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2006 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2007 /* IO memory case (romd handled later) */
2008 address
|= TLB_MMIO
;
2010 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
2011 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2013 iotlb
= pd
& TARGET_PAGE_MASK
;
2014 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2015 iotlb
|= IO_MEM_NOTDIRTY
;
2017 iotlb
|= IO_MEM_ROM
;
2019 /* IO handlers are currently passed a phsical address.
2020 It would be nice to pass an offset from the base address
2021 of that region. This would avoid having to special case RAM,
2022 and avoid full address decoding in every device.
2023 We can't use the high bits of pd for this because
2024 IO_MEM_ROMD uses these as a ram address. */
2025 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2027 iotlb
+= p
->region_offset
;
2033 code_address
= address
;
2034 /* Make accesses to pages with watchpoints go via the
2035 watchpoint trap routines. */
2036 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2037 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2038 iotlb
= io_mem_watch
+ paddr
;
2039 /* TODO: The memory case can be optimized by not trapping
2040 reads of pages with a write breakpoint. */
2041 address
|= TLB_MMIO
;
2045 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2046 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2047 te
= &env
->tlb_table
[mmu_idx
][index
];
2048 te
->addend
= addend
- vaddr
;
2049 if (prot
& PAGE_READ
) {
2050 te
->addr_read
= address
;
2055 if (prot
& PAGE_EXEC
) {
2056 te
->addr_code
= code_address
;
2060 if (prot
& PAGE_WRITE
) {
2061 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2062 (pd
& IO_MEM_ROMD
)) {
2063 /* Write access calls the I/O callback. */
2064 te
->addr_write
= address
| TLB_MMIO
;
2065 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2066 !cpu_physical_memory_is_dirty(pd
)) {
2067 te
->addr_write
= address
| TLB_NOTDIRTY
;
2069 te
->addr_write
= address
;
2072 te
->addr_write
= -1;
2079 void tlb_flush(CPUState
*env
, int flush_global
)
2083 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2087 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2088 target_phys_addr_t paddr
, int prot
,
2089 int mmu_idx
, int is_softmmu
)
2094 /* dump memory mappings */
2095 void page_dump(FILE *f
)
2097 unsigned long start
, end
;
2098 int i
, j
, prot
, prot1
;
2101 fprintf(f
, "%-8s %-8s %-8s %s\n",
2102 "start", "end", "size", "prot");
2106 for(i
= 0; i
<= L1_SIZE
; i
++) {
2111 for(j
= 0;j
< L2_SIZE
; j
++) {
2116 if (prot1
!= prot
) {
2117 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2119 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2120 start
, end
, end
- start
,
2121 prot
& PAGE_READ
? 'r' : '-',
2122 prot
& PAGE_WRITE
? 'w' : '-',
2123 prot
& PAGE_EXEC
? 'x' : '-');
2137 int page_get_flags(target_ulong address
)
2141 p
= page_find(address
>> TARGET_PAGE_BITS
);
2147 /* modify the flags of a page and invalidate the code if
2148 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2149 depending on PAGE_WRITE */
2150 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2155 /* mmap_lock should already be held. */
2156 start
= start
& TARGET_PAGE_MASK
;
2157 end
= TARGET_PAGE_ALIGN(end
);
2158 if (flags
& PAGE_WRITE
)
2159 flags
|= PAGE_WRITE_ORG
;
2160 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2161 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2162 /* We may be called for host regions that are outside guest
2166 /* if the write protection is set, then we invalidate the code
2168 if (!(p
->flags
& PAGE_WRITE
) &&
2169 (flags
& PAGE_WRITE
) &&
2171 tb_invalidate_phys_page(addr
, 0, NULL
);
2177 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2183 if (start
+ len
< start
)
2184 /* we've wrapped around */
2187 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2188 start
= start
& TARGET_PAGE_MASK
;
2190 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2191 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2194 if( !(p
->flags
& PAGE_VALID
) )
2197 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2199 if (flags
& PAGE_WRITE
) {
2200 if (!(p
->flags
& PAGE_WRITE_ORG
))
2202 /* unprotect the page if it was put read-only because it
2203 contains translated code */
2204 if (!(p
->flags
& PAGE_WRITE
)) {
2205 if (!page_unprotect(addr
, 0, NULL
))
2214 /* called from signal handler: invalidate the code and unprotect the
2215 page. Return TRUE if the fault was succesfully handled. */
2216 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2218 unsigned int page_index
, prot
, pindex
;
2220 target_ulong host_start
, host_end
, addr
;
2222 /* Technically this isn't safe inside a signal handler. However we
2223 know this only ever happens in a synchronous SEGV handler, so in
2224 practice it seems to be ok. */
2227 host_start
= address
& qemu_host_page_mask
;
2228 page_index
= host_start
>> TARGET_PAGE_BITS
;
2229 p1
= page_find(page_index
);
2234 host_end
= host_start
+ qemu_host_page_size
;
2237 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2241 /* if the page was really writable, then we change its
2242 protection back to writable */
2243 if (prot
& PAGE_WRITE_ORG
) {
2244 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2245 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2246 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2247 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2248 p1
[pindex
].flags
|= PAGE_WRITE
;
2249 /* and since the content will be modified, we must invalidate
2250 the corresponding translated code. */
2251 tb_invalidate_phys_page(address
, pc
, puc
);
2252 #ifdef DEBUG_TB_CHECK
2253 tb_invalidate_check(address
);
2263 static inline void tlb_set_dirty(CPUState
*env
,
2264 unsigned long addr
, target_ulong vaddr
)
2267 #endif /* defined(CONFIG_USER_ONLY) */
2269 #if !defined(CONFIG_USER_ONLY)
2271 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2272 ram_addr_t memory
, ram_addr_t region_offset
);
2273 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2274 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2275 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2278 if (addr > start_addr) \
2281 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2282 if (start_addr2 > 0) \
2286 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2287 end_addr2 = TARGET_PAGE_SIZE - 1; \
2289 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2290 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2295 /* register physical memory. 'size' must be a multiple of the target
2296 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2297 io memory page. The address used when calling the IO function is
2298 the offset from the start of the region, plus region_offset. Both
2299 start_region and regon_offset are rounded down to a page boundary
2300 before calculating this offset. This should not be a problem unless
2301 the low bits of start_addr and region_offset differ. */
2302 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2304 ram_addr_t phys_offset
,
2305 ram_addr_t region_offset
)
2307 target_phys_addr_t addr
, end_addr
;
2310 ram_addr_t orig_size
= size
;
2314 /* XXX: should not depend on cpu context */
2316 if (env
->kqemu_enabled
) {
2317 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2321 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2323 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2324 region_offset
= start_addr
;
2326 region_offset
&= TARGET_PAGE_MASK
;
2327 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2328 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2329 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2330 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2331 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2332 ram_addr_t orig_memory
= p
->phys_offset
;
2333 target_phys_addr_t start_addr2
, end_addr2
;
2334 int need_subpage
= 0;
2336 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2338 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2339 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2340 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2341 &p
->phys_offset
, orig_memory
,
2344 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2347 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2349 p
->region_offset
= 0;
2351 p
->phys_offset
= phys_offset
;
2352 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2353 (phys_offset
& IO_MEM_ROMD
))
2354 phys_offset
+= TARGET_PAGE_SIZE
;
2357 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2358 p
->phys_offset
= phys_offset
;
2359 p
->region_offset
= region_offset
;
2360 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2361 (phys_offset
& IO_MEM_ROMD
)) {
2362 phys_offset
+= TARGET_PAGE_SIZE
;
2364 target_phys_addr_t start_addr2
, end_addr2
;
2365 int need_subpage
= 0;
2367 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2368 end_addr2
, need_subpage
);
2370 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2371 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2372 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2373 addr
& TARGET_PAGE_MASK
);
2374 subpage_register(subpage
, start_addr2
, end_addr2
,
2375 phys_offset
, region_offset
);
2376 p
->region_offset
= 0;
2380 region_offset
+= TARGET_PAGE_SIZE
;
2383 /* since each CPU stores ram addresses in its TLB cache, we must
2384 reset the modified entries */
2386 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2391 /* XXX: temporary until new memory mapping API */
2392 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2396 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2398 return IO_MEM_UNASSIGNED
;
2399 return p
->phys_offset
;
2402 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2405 kvm_coalesce_mmio_region(addr
, size
);
2408 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2411 kvm_uncoalesce_mmio_region(addr
, size
);
2414 /* XXX: better than nothing */
2415 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2418 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2419 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2420 (uint64_t)size
, (uint64_t)phys_ram_size
);
2423 addr
= phys_ram_alloc_offset
;
2424 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2428 void qemu_ram_free(ram_addr_t addr
)
2432 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2434 #ifdef DEBUG_UNASSIGNED
2435 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2437 #if defined(TARGET_SPARC)
2438 do_unassigned_access(addr
, 0, 0, 0, 1);
2443 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2445 #ifdef DEBUG_UNASSIGNED
2446 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2448 #if defined(TARGET_SPARC)
2449 do_unassigned_access(addr
, 0, 0, 0, 2);
2454 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2456 #ifdef DEBUG_UNASSIGNED
2457 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2459 #if defined(TARGET_SPARC)
2460 do_unassigned_access(addr
, 0, 0, 0, 4);
2465 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2467 #ifdef DEBUG_UNASSIGNED
2468 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2470 #if defined(TARGET_SPARC)
2471 do_unassigned_access(addr
, 1, 0, 0, 1);
2475 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2477 #ifdef DEBUG_UNASSIGNED
2478 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2480 #if defined(TARGET_SPARC)
2481 do_unassigned_access(addr
, 1, 0, 0, 2);
2485 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2487 #ifdef DEBUG_UNASSIGNED
2488 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2490 #if defined(TARGET_SPARC)
2491 do_unassigned_access(addr
, 1, 0, 0, 4);
2495 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2496 unassigned_mem_readb
,
2497 unassigned_mem_readw
,
2498 unassigned_mem_readl
,
2501 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2502 unassigned_mem_writeb
,
2503 unassigned_mem_writew
,
2504 unassigned_mem_writel
,
2507 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2511 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2512 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2513 #if !defined(CONFIG_USER_ONLY)
2514 tb_invalidate_phys_page_fast(ram_addr
, 1);
2515 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2518 stb_p(phys_ram_base
+ ram_addr
, val
);
2520 if (cpu_single_env
->kqemu_enabled
&&
2521 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2522 kqemu_modify_page(cpu_single_env
, ram_addr
);
2524 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2525 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2526 /* we remove the notdirty callback only if the code has been
2528 if (dirty_flags
== 0xff)
2529 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2532 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2536 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2537 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2538 #if !defined(CONFIG_USER_ONLY)
2539 tb_invalidate_phys_page_fast(ram_addr
, 2);
2540 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2543 stw_p(phys_ram_base
+ ram_addr
, val
);
2545 if (cpu_single_env
->kqemu_enabled
&&
2546 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2547 kqemu_modify_page(cpu_single_env
, ram_addr
);
2549 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2550 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2551 /* we remove the notdirty callback only if the code has been
2553 if (dirty_flags
== 0xff)
2554 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2557 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2561 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2562 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2563 #if !defined(CONFIG_USER_ONLY)
2564 tb_invalidate_phys_page_fast(ram_addr
, 4);
2565 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2568 stl_p(phys_ram_base
+ ram_addr
, val
);
2570 if (cpu_single_env
->kqemu_enabled
&&
2571 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2572 kqemu_modify_page(cpu_single_env
, ram_addr
);
2574 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2575 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2576 /* we remove the notdirty callback only if the code has been
2578 if (dirty_flags
== 0xff)
2579 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2582 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2583 NULL
, /* never used */
2584 NULL
, /* never used */
2585 NULL
, /* never used */
2588 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2589 notdirty_mem_writeb
,
2590 notdirty_mem_writew
,
2591 notdirty_mem_writel
,
2594 /* Generate a debug exception if a watchpoint has been hit. */
2595 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2597 CPUState
*env
= cpu_single_env
;
2598 target_ulong pc
, cs_base
;
2599 TranslationBlock
*tb
;
2604 if (env
->watchpoint_hit
) {
2605 /* We re-entered the check after replacing the TB. Now raise
2606 * the debug interrupt so that is will trigger after the
2607 * current instruction. */
2608 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2611 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2612 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2613 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2614 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2615 wp
->flags
|= BP_WATCHPOINT_HIT
;
2616 if (!env
->watchpoint_hit
) {
2617 env
->watchpoint_hit
= wp
;
2618 tb
= tb_find_pc(env
->mem_io_pc
);
2620 cpu_abort(env
, "check_watchpoint: could not find TB for "
2621 "pc=%p", (void *)env
->mem_io_pc
);
2623 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2624 tb_phys_invalidate(tb
, -1);
2625 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2626 env
->exception_index
= EXCP_DEBUG
;
2628 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2629 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2631 cpu_resume_from_signal(env
, NULL
);
2634 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2639 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2640 so these check for a hit then pass through to the normal out-of-line
2642 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2644 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2645 return ldub_phys(addr
);
2648 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2650 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2651 return lduw_phys(addr
);
2654 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2656 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2657 return ldl_phys(addr
);
2660 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2663 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2664 stb_phys(addr
, val
);
2667 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2670 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2671 stw_phys(addr
, val
);
2674 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2677 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2678 stl_phys(addr
, val
);
2681 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2687 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2693 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2699 idx
= SUBPAGE_IDX(addr
);
2700 #if defined(DEBUG_SUBPAGE)
2701 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2702 mmio
, len
, addr
, idx
);
2704 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2705 addr
+ mmio
->region_offset
[idx
][0][len
]);
2710 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2711 uint32_t value
, unsigned int len
)
2715 idx
= SUBPAGE_IDX(addr
);
2716 #if defined(DEBUG_SUBPAGE)
2717 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2718 mmio
, len
, addr
, idx
, value
);
2720 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2721 addr
+ mmio
->region_offset
[idx
][1][len
],
2725 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2727 #if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2731 return subpage_readlen(opaque
, addr
, 0);
2734 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2737 #if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2740 subpage_writelen(opaque
, addr
, value
, 0);
2743 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2745 #if defined(DEBUG_SUBPAGE)
2746 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2749 return subpage_readlen(opaque
, addr
, 1);
2752 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2755 #if defined(DEBUG_SUBPAGE)
2756 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2758 subpage_writelen(opaque
, addr
, value
, 1);
2761 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2763 #if defined(DEBUG_SUBPAGE)
2764 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2767 return subpage_readlen(opaque
, addr
, 2);
2770 static void subpage_writel (void *opaque
,
2771 target_phys_addr_t addr
, uint32_t value
)
2773 #if defined(DEBUG_SUBPAGE)
2774 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2776 subpage_writelen(opaque
, addr
, value
, 2);
2779 static CPUReadMemoryFunc
*subpage_read
[] = {
2785 static CPUWriteMemoryFunc
*subpage_write
[] = {
2791 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2792 ram_addr_t memory
, ram_addr_t region_offset
)
2797 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2799 idx
= SUBPAGE_IDX(start
);
2800 eidx
= SUBPAGE_IDX(end
);
2801 #if defined(DEBUG_SUBPAGE)
2802 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2803 mmio
, start
, end
, idx
, eidx
, memory
);
2805 memory
>>= IO_MEM_SHIFT
;
2806 for (; idx
<= eidx
; idx
++) {
2807 for (i
= 0; i
< 4; i
++) {
2808 if (io_mem_read
[memory
][i
]) {
2809 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2810 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2811 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2813 if (io_mem_write
[memory
][i
]) {
2814 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2815 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2816 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2824 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2825 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2830 mmio
= qemu_mallocz(sizeof(subpage_t
));
2833 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2834 #if defined(DEBUG_SUBPAGE)
2835 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2836 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2838 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2839 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2845 static int get_free_io_mem_idx(void)
2849 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2850 if (!io_mem_used
[i
]) {
2858 static void io_mem_init(void)
2862 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2863 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2864 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2868 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2869 watch_mem_write
, NULL
);
2870 /* alloc dirty bits array */
2871 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2872 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2875 /* mem_read and mem_write are arrays of functions containing the
2876 function to access byte (index 0), word (index 1) and dword (index
2877 2). Functions can be omitted with a NULL function pointer. The
2878 registered functions may be modified dynamically later.
2879 If io_index is non zero, the corresponding io zone is
2880 modified. If it is zero, a new io zone is allocated. The return
2881 value can be used with cpu_register_physical_memory(). (-1) is
2882 returned if error. */
2883 int cpu_register_io_memory(int io_index
,
2884 CPUReadMemoryFunc
**mem_read
,
2885 CPUWriteMemoryFunc
**mem_write
,
2888 int i
, subwidth
= 0;
2890 if (io_index
<= 0) {
2891 io_index
= get_free_io_mem_idx();
2895 if (io_index
>= IO_MEM_NB_ENTRIES
)
2899 for(i
= 0;i
< 3; i
++) {
2900 if (!mem_read
[i
] || !mem_write
[i
])
2901 subwidth
= IO_MEM_SUBWIDTH
;
2902 io_mem_read
[io_index
][i
] = mem_read
[i
];
2903 io_mem_write
[io_index
][i
] = mem_write
[i
];
2905 io_mem_opaque
[io_index
] = opaque
;
2906 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2909 void cpu_unregister_io_memory(int io_table_address
)
2912 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2914 for (i
=0;i
< 3; i
++) {
2915 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2916 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2918 io_mem_opaque
[io_index
] = NULL
;
2919 io_mem_used
[io_index
] = 0;
2922 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2924 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2927 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2929 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2932 #endif /* !defined(CONFIG_USER_ONLY) */
2934 /* physical memory access (slow version, mainly for debug) */
2935 #if defined(CONFIG_USER_ONLY)
2936 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2937 int len
, int is_write
)
2944 page
= addr
& TARGET_PAGE_MASK
;
2945 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2948 flags
= page_get_flags(page
);
2949 if (!(flags
& PAGE_VALID
))
2952 if (!(flags
& PAGE_WRITE
))
2954 /* XXX: this code should not depend on lock_user */
2955 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2956 /* FIXME - should this return an error rather than just fail? */
2959 unlock_user(p
, addr
, l
);
2961 if (!(flags
& PAGE_READ
))
2963 /* XXX: this code should not depend on lock_user */
2964 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2965 /* FIXME - should this return an error rather than just fail? */
2968 unlock_user(p
, addr
, 0);
2977 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2978 int len
, int is_write
)
2983 target_phys_addr_t page
;
2988 page
= addr
& TARGET_PAGE_MASK
;
2989 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2992 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2994 pd
= IO_MEM_UNASSIGNED
;
2996 pd
= p
->phys_offset
;
3000 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3001 target_phys_addr_t addr1
= addr
;
3002 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3004 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3005 /* XXX: could force cpu_single_env to NULL to avoid
3007 if (l
>= 4 && ((addr1
& 3) == 0)) {
3008 /* 32 bit write access */
3010 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3012 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3013 /* 16 bit write access */
3015 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3018 /* 8 bit write access */
3020 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3024 unsigned long addr1
;
3025 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3027 ptr
= phys_ram_base
+ addr1
;
3028 memcpy(ptr
, buf
, l
);
3029 if (!cpu_physical_memory_is_dirty(addr1
)) {
3030 /* invalidate code */
3031 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3033 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3034 (0xff & ~CODE_DIRTY_FLAG
);
3036 /* qemu doesn't execute guest code directly, but kvm does
3037 therefore fluch instruction caches */
3039 flush_icache_range((unsigned long)ptr
,
3040 ((unsigned long)ptr
)+l
);
3043 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3044 !(pd
& IO_MEM_ROMD
)) {
3045 target_phys_addr_t addr1
= addr
;
3047 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3049 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3050 if (l
>= 4 && ((addr1
& 3) == 0)) {
3051 /* 32 bit read access */
3052 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3055 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3056 /* 16 bit read access */
3057 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3061 /* 8 bit read access */
3062 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3068 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3069 (addr
& ~TARGET_PAGE_MASK
);
3070 memcpy(buf
, ptr
, l
);
3079 /* used for ROM loading : can write in RAM and ROM */
3080 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3081 const uint8_t *buf
, int len
)
3085 target_phys_addr_t page
;
3090 page
= addr
& TARGET_PAGE_MASK
;
3091 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3094 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3096 pd
= IO_MEM_UNASSIGNED
;
3098 pd
= p
->phys_offset
;
3101 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3102 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3103 !(pd
& IO_MEM_ROMD
)) {
3106 unsigned long addr1
;
3107 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3109 ptr
= phys_ram_base
+ addr1
;
3110 memcpy(ptr
, buf
, l
);
3120 target_phys_addr_t addr
;
3121 target_phys_addr_t len
;
3124 static BounceBuffer bounce
;
3126 typedef struct MapClient
{
3128 void (*callback
)(void *opaque
);
3129 LIST_ENTRY(MapClient
) link
;
3132 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3133 = LIST_HEAD_INITIALIZER(map_client_list
);
3135 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3137 MapClient
*client
= qemu_malloc(sizeof(*client
));
3139 client
->opaque
= opaque
;
3140 client
->callback
= callback
;
3141 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3145 void cpu_unregister_map_client(void *_client
)
3147 MapClient
*client
= (MapClient
*)_client
;
3149 LIST_REMOVE(client
, link
);
3152 static void cpu_notify_map_clients(void)
3156 while (!LIST_EMPTY(&map_client_list
)) {
3157 client
= LIST_FIRST(&map_client_list
);
3158 client
->callback(client
->opaque
);
3159 LIST_REMOVE(client
, link
);
3163 /* Map a physical memory region into a host virtual address.
3164 * May map a subset of the requested range, given by and returned in *plen.
3165 * May return NULL if resources needed to perform the mapping are exhausted.
3166 * Use only for reads OR writes - not for read-modify-write operations.
3167 * Use cpu_register_map_client() to know when retrying the map operation is
3168 * likely to succeed.
3170 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3171 target_phys_addr_t
*plen
,
3174 target_phys_addr_t len
= *plen
;
3175 target_phys_addr_t done
= 0;
3177 uint8_t *ret
= NULL
;
3179 target_phys_addr_t page
;
3182 unsigned long addr1
;
3185 page
= addr
& TARGET_PAGE_MASK
;
3186 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3189 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3191 pd
= IO_MEM_UNASSIGNED
;
3193 pd
= p
->phys_offset
;
3196 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3197 if (done
|| bounce
.buffer
) {
3200 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3204 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3206 ptr
= bounce
.buffer
;
3208 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3209 ptr
= phys_ram_base
+ addr1
;
3213 } else if (ret
+ done
!= ptr
) {
3225 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3226 * Will also mark the memory as dirty if is_write == 1. access_len gives
3227 * the amount of memory that was actually read or written by the caller.
3229 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3230 int is_write
, target_phys_addr_t access_len
)
3232 if (buffer
!= bounce
.buffer
) {
3234 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3235 while (access_len
) {
3237 l
= TARGET_PAGE_SIZE
;
3240 if (!cpu_physical_memory_is_dirty(addr1
)) {
3241 /* invalidate code */
3242 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3244 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3245 (0xff & ~CODE_DIRTY_FLAG
);
3251 flush_icache_range((unsigned long)buffer
,
3252 (unsigned long)buffer
+ access_len
);
3257 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3259 qemu_free(bounce
.buffer
);
3260 bounce
.buffer
= NULL
;
3261 cpu_notify_map_clients();
3264 /* warning: addr must be aligned */
3265 uint32_t ldl_phys(target_phys_addr_t addr
)
3273 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3275 pd
= IO_MEM_UNASSIGNED
;
3277 pd
= p
->phys_offset
;
3280 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3281 !(pd
& IO_MEM_ROMD
)) {
3283 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3285 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3286 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3289 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3290 (addr
& ~TARGET_PAGE_MASK
);
3296 /* warning: addr must be aligned */
3297 uint64_t ldq_phys(target_phys_addr_t addr
)
3305 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3307 pd
= IO_MEM_UNASSIGNED
;
3309 pd
= p
->phys_offset
;
3312 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3313 !(pd
& IO_MEM_ROMD
)) {
3315 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3317 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3318 #ifdef TARGET_WORDS_BIGENDIAN
3319 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3320 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3322 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3323 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3327 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3328 (addr
& ~TARGET_PAGE_MASK
);
3335 uint32_t ldub_phys(target_phys_addr_t addr
)
3338 cpu_physical_memory_read(addr
, &val
, 1);
3343 uint32_t lduw_phys(target_phys_addr_t addr
)
3346 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3347 return tswap16(val
);
3351 #define likely(x) __builtin_expect(!!(x), 1)
3352 #define unlikely(x) __builtin_expect(!!(x), 0)
3355 #define unlikely(x) x
3358 /* warning: addr must be aligned. The ram page is not masked as dirty
3359 and the code inside is not invalidated. It is useful if the dirty
3360 bits are used to track modified PTEs */
3361 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3368 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3370 pd
= IO_MEM_UNASSIGNED
;
3372 pd
= p
->phys_offset
;
3375 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3376 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3378 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3379 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3381 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3382 ptr
= phys_ram_base
+ addr1
;
3385 if (unlikely(in_migration
)) {
3386 if (!cpu_physical_memory_is_dirty(addr1
)) {
3387 /* invalidate code */
3388 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3390 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3391 (0xff & ~CODE_DIRTY_FLAG
);
3397 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3404 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3406 pd
= IO_MEM_UNASSIGNED
;
3408 pd
= p
->phys_offset
;
3411 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3412 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3414 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3415 #ifdef TARGET_WORDS_BIGENDIAN
3416 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3417 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3419 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3420 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3423 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3424 (addr
& ~TARGET_PAGE_MASK
);
3429 /* warning: addr must be aligned */
3430 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3437 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3439 pd
= IO_MEM_UNASSIGNED
;
3441 pd
= p
->phys_offset
;
3444 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3445 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3447 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3448 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3450 unsigned long addr1
;
3451 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3453 ptr
= phys_ram_base
+ addr1
;
3455 if (!cpu_physical_memory_is_dirty(addr1
)) {
3456 /* invalidate code */
3457 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3459 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3460 (0xff & ~CODE_DIRTY_FLAG
);
3466 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3469 cpu_physical_memory_write(addr
, &v
, 1);
3473 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3475 uint16_t v
= tswap16(val
);
3476 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3480 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3483 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3488 /* virtual memory access for debug (includes writing to ROM) */
3489 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3490 uint8_t *buf
, int len
, int is_write
)
3493 target_phys_addr_t phys_addr
;
3497 page
= addr
& TARGET_PAGE_MASK
;
3498 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3499 /* if no physical page mapped, return an error */
3500 if (phys_addr
== -1)
3502 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3505 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3506 #if !defined(CONFIG_USER_ONLY)
3508 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3511 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3519 /* in deterministic execution mode, instructions doing device I/Os
3520 must be at the end of the TB */
3521 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3523 TranslationBlock
*tb
;
3525 target_ulong pc
, cs_base
;
3528 tb
= tb_find_pc((unsigned long)retaddr
);
3530 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3533 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3534 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3535 /* Calculate how many instructions had been executed before the fault
3537 n
= n
- env
->icount_decr
.u16
.low
;
3538 /* Generate a new TB ending on the I/O insn. */
3540 /* On MIPS and SH, delay slot instructions can only be restarted if
3541 they were already the first instruction in the TB. If this is not
3542 the first instruction in a TB then re-execute the preceding
3544 #if defined(TARGET_MIPS)
3545 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3546 env
->active_tc
.PC
-= 4;
3547 env
->icount_decr
.u16
.low
++;
3548 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3550 #elif defined(TARGET_SH4)
3551 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3554 env
->icount_decr
.u16
.low
++;
3555 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3558 /* This should never happen. */
3559 if (n
> CF_COUNT_MASK
)
3560 cpu_abort(env
, "TB too big during recompile");
3562 cflags
= n
| CF_LAST_IO
;
3564 cs_base
= tb
->cs_base
;
3566 tb_phys_invalidate(tb
, -1);
3567 /* FIXME: In theory this could raise an exception. In practice
3568 we have already translated the block once so it's probably ok. */
3569 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3570 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3571 the first in the TB) then we end up generating a whole new TB and
3572 repeating the fault, which is horribly inefficient.
3573 Better would be to execute just this insn uncached, or generate a
3575 cpu_resume_from_signal(env
, NULL
);
3578 void dump_exec_info(FILE *f
,
3579 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3581 int i
, target_code_size
, max_target_code_size
;
3582 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3583 TranslationBlock
*tb
;
3585 target_code_size
= 0;
3586 max_target_code_size
= 0;
3588 direct_jmp_count
= 0;
3589 direct_jmp2_count
= 0;
3590 for(i
= 0; i
< nb_tbs
; i
++) {
3592 target_code_size
+= tb
->size
;
3593 if (tb
->size
> max_target_code_size
)
3594 max_target_code_size
= tb
->size
;
3595 if (tb
->page_addr
[1] != -1)
3597 if (tb
->tb_next_offset
[0] != 0xffff) {
3599 if (tb
->tb_next_offset
[1] != 0xffff) {
3600 direct_jmp2_count
++;
3604 /* XXX: avoid using doubles ? */
3605 cpu_fprintf(f
, "Translation buffer state:\n");
3606 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3607 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3608 cpu_fprintf(f
, "TB count %d/%d\n",
3609 nb_tbs
, code_gen_max_blocks
);
3610 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3611 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3612 max_target_code_size
);
3613 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3614 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3615 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3616 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3618 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3619 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3621 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3623 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3624 cpu_fprintf(f
, "\nStatistics:\n");
3625 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3626 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3627 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3628 tcg_dump_info(f
, cpu_fprintf
);
3631 #if !defined(CONFIG_USER_ONLY)
3633 #define MMUSUFFIX _cmmu
3634 #define GETPC() NULL
3635 #define env cpu_single_env
3636 #define SOFTMMU_CODE_ACCESS
3639 #include "softmmu_template.h"
3642 #include "softmmu_template.h"
3645 #include "softmmu_template.h"
3648 #include "softmmu_template.h"