2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 //#define DEBUG_TB_INVALIDATE
63 //#define DEBUG_UNASSIGNED
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
67 //#define DEBUG_TLB_CHECK
69 //#define DEBUG_IOPORT
70 //#define DEBUG_SUBPAGE
72 #if !defined(CONFIG_USER_ONLY)
73 /* TB consistency checks only implemented for usermode emulation. */
77 #define SMC_BITMAP_USE_THRESHOLD 10
79 static TranslationBlock
*tbs
;
80 static int code_gen_max_blocks
;
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 #if defined(__arm__) || defined(__sparc_v9__)
87 /* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
89 section close to code segment. */
90 #define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
94 /* Maximum alignment for Win32 is 16. */
95 #define code_gen_section \
96 __attribute__((aligned (16)))
98 #define code_gen_section \
99 __attribute__((aligned (32)))
102 uint8_t code_gen_prologue
[1024] code_gen_section
;
103 static uint8_t *code_gen_buffer
;
104 static unsigned long code_gen_buffer_size
;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size
;
107 static uint8_t *code_gen_ptr
;
109 #if !defined(CONFIG_USER_ONLY)
111 static int in_migration
;
113 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
115 static MemoryRegion
*system_memory
;
120 /* current CPU in the current thread. It is only valid inside
122 CPUState
*cpu_single_env
;
123 /* 0 = Do not count executed instructions.
124 1 = Precise instruction counting.
125 2 = Adaptive rate instruction counting. */
127 /* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
131 typedef struct PageDesc
{
132 /* list of TBs intersecting this ram page */
133 TranslationBlock
*first_tb
;
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count
;
137 uint8_t *code_bitmap
;
138 #if defined(CONFIG_USER_ONLY)
143 /* In system mode we want L1_MAP to be based on ram offsets,
144 while in user mode we want it to be based on virtual addresses. */
145 #if !defined(CONFIG_USER_ONLY)
146 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
147 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
155 /* Size of the L2 (and L3, etc) page tables. */
157 #define L2_SIZE (1 << L2_BITS)
159 /* The bits remaining after N lower levels of page tables. */
160 #define P_L1_BITS_REM \
161 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162 #define V_L1_BITS_REM \
163 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165 /* Size of the L1 page table. Avoid silly small sizes. */
166 #if P_L1_BITS_REM < 4
167 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169 #define P_L1_BITS P_L1_BITS_REM
172 #if V_L1_BITS_REM < 4
173 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175 #define V_L1_BITS V_L1_BITS_REM
178 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
179 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
182 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184 unsigned long qemu_real_host_page_size
;
185 unsigned long qemu_host_page_bits
;
186 unsigned long qemu_host_page_size
;
187 unsigned long qemu_host_page_mask
;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map
[V_L1_SIZE
];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc
{
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset
;
197 ram_addr_t region_offset
;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map
[P_L1_SIZE
];
204 static void io_mem_init(void);
205 static void memory_map_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_bits
= 0;
277 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
278 qemu_host_page_bits
++;
279 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
281 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
283 #ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry
*freep
;
287 freep
= kinfo_getvmmap(getpid(), &cnt
);
290 for (i
= 0; i
< cnt
; i
++) {
291 unsigned long startaddr
, endaddr
;
293 startaddr
= freep
[i
].kve_start
;
294 endaddr
= freep
[i
].kve_end
;
295 if (h2g_valid(startaddr
)) {
296 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
298 if (h2g_valid(endaddr
)) {
299 endaddr
= h2g(endaddr
);
300 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
302 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
315 last_brk
= (unsigned long)sbrk(0);
317 f
= fopen("/compat/linux/proc/self/maps", "r");
322 unsigned long startaddr
, endaddr
;
325 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
327 if (n
== 2 && h2g_valid(startaddr
)) {
328 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
330 if (h2g_valid(endaddr
)) {
331 endaddr
= h2g(endaddr
);
335 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
347 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
353 #if defined(CONFIG_USER_ONLY)
354 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
355 # define ALLOC(P, SIZE) \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
361 # define ALLOC(P, SIZE) \
362 do { P = qemu_mallocz(SIZE); } while (0)
365 /* Level 1. Always allocated. */
366 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
369 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
376 ALLOC(p
, sizeof(void *) * L2_SIZE
);
380 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
388 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
394 return pd
+ (index
& (L2_SIZE
- 1));
397 static inline PageDesc
*page_find(tb_page_addr_t index
)
399 return page_find_alloc(index
, 0);
402 #if !defined(CONFIG_USER_ONLY)
403 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
409 /* Level 1. Always allocated. */
410 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
413 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
419 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
421 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
432 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
434 for (i
= 0; i
< L2_SIZE
; i
++) {
435 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
436 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
440 return pd
+ (index
& (L2_SIZE
- 1));
443 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
445 return phys_page_find_alloc(index
, 0);
448 static void tlb_protect_code(ram_addr_t ram_addr
);
449 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
451 #define mmap_lock() do { } while(0)
452 #define mmap_unlock() do { } while(0)
455 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
457 #if defined(CONFIG_USER_ONLY)
458 /* Currently it is not recommended to allocate big chunks of data in
459 user mode. It will change when a dedicated libc will be used */
460 #define USE_STATIC_CODE_GEN_BUFFER
463 #ifdef USE_STATIC_CODE_GEN_BUFFER
464 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
465 __attribute__((aligned (CODE_GEN_ALIGN
)));
468 static void code_gen_alloc(unsigned long tb_size
)
470 #ifdef USE_STATIC_CODE_GEN_BUFFER
471 code_gen_buffer
= static_code_gen_buffer
;
472 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
473 map_exec(code_gen_buffer
, code_gen_buffer_size
);
475 code_gen_buffer_size
= tb_size
;
476 if (code_gen_buffer_size
== 0) {
477 #if defined(CONFIG_USER_ONLY)
478 /* in user mode, phys_ram_size is not meaningful */
479 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
481 /* XXX: needs adjustments */
482 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
485 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
486 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
487 /* The code gen buffer location may have constraints depending on
488 the host cpu and OS */
489 #if defined(__linux__)
494 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
495 #if defined(__x86_64__)
497 /* Cannot map more than that */
498 if (code_gen_buffer_size
> (800 * 1024 * 1024))
499 code_gen_buffer_size
= (800 * 1024 * 1024);
500 #elif defined(__sparc_v9__)
501 // Map the buffer below 2G, so we can use direct calls and branches
503 start
= (void *) 0x60000000UL
;
504 if (code_gen_buffer_size
> (512 * 1024 * 1024))
505 code_gen_buffer_size
= (512 * 1024 * 1024);
506 #elif defined(__arm__)
507 /* Map the buffer below 32M, so we can use direct calls and branches */
509 start
= (void *) 0x01000000UL
;
510 if (code_gen_buffer_size
> 16 * 1024 * 1024)
511 code_gen_buffer_size
= 16 * 1024 * 1024;
512 #elif defined(__s390x__)
513 /* Map the buffer so that we can use direct calls and branches. */
514 /* We have a +- 4GB range on the branches; leave some slop. */
515 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
516 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
518 start
= (void *)0x90000000UL
;
520 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
521 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
523 if (code_gen_buffer
== MAP_FAILED
) {
524 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
528 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
529 || defined(__DragonFly__) || defined(__OpenBSD__)
533 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
534 #if defined(__x86_64__)
535 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
536 * 0x40000000 is free */
538 addr
= (void *)0x40000000;
539 /* Cannot map more than that */
540 if (code_gen_buffer_size
> (800 * 1024 * 1024))
541 code_gen_buffer_size
= (800 * 1024 * 1024);
542 #elif defined(__sparc_v9__)
543 // Map the buffer below 2G, so we can use direct calls and branches
545 addr
= (void *) 0x60000000UL
;
546 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
547 code_gen_buffer_size
= (512 * 1024 * 1024);
550 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
551 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
553 if (code_gen_buffer
== MAP_FAILED
) {
554 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
559 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
560 map_exec(code_gen_buffer
, code_gen_buffer_size
);
562 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
563 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
564 code_gen_buffer_max_size
= code_gen_buffer_size
-
565 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
566 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
567 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
570 /* Must be called before using the QEMU cpus. 'tb_size' is the size
571 (in bytes) allocated to the translation buffer. Zero means default
573 void cpu_exec_init_all(unsigned long tb_size
)
576 code_gen_alloc(tb_size
);
577 code_gen_ptr
= code_gen_buffer
;
579 #if !defined(CONFIG_USER_ONLY)
583 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
584 /* There's no guest base to take into account, so go ahead and
585 initialize the prologue now. */
586 tcg_prologue_init(&tcg_ctx
);
590 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
592 static int cpu_common_post_load(void *opaque
, int version_id
)
594 CPUState
*env
= opaque
;
596 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
597 version_id is increased. */
598 env
->interrupt_request
&= ~0x01;
604 static const VMStateDescription vmstate_cpu_common
= {
605 .name
= "cpu_common",
607 .minimum_version_id
= 1,
608 .minimum_version_id_old
= 1,
609 .post_load
= cpu_common_post_load
,
610 .fields
= (VMStateField
[]) {
611 VMSTATE_UINT32(halted
, CPUState
),
612 VMSTATE_UINT32(interrupt_request
, CPUState
),
613 VMSTATE_END_OF_LIST()
618 CPUState
*qemu_get_cpu(int cpu
)
620 CPUState
*env
= first_cpu
;
623 if (env
->cpu_index
== cpu
)
631 void cpu_exec_init(CPUState
*env
)
636 #if defined(CONFIG_USER_ONLY)
639 env
->next_cpu
= NULL
;
642 while (*penv
!= NULL
) {
643 penv
= &(*penv
)->next_cpu
;
646 env
->cpu_index
= cpu_index
;
648 QTAILQ_INIT(&env
->breakpoints
);
649 QTAILQ_INIT(&env
->watchpoints
);
650 #ifndef CONFIG_USER_ONLY
651 env
->thread_id
= qemu_get_thread_id();
654 #if defined(CONFIG_USER_ONLY)
657 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
659 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
660 cpu_save
, cpu_load
, env
);
664 /* Allocate a new translation block. Flush the translation buffer if
665 too many translation blocks or too much generated code. */
666 static TranslationBlock
*tb_alloc(target_ulong pc
)
668 TranslationBlock
*tb
;
670 if (nb_tbs
>= code_gen_max_blocks
||
671 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
679 void tb_free(TranslationBlock
*tb
)
681 /* In practice this is mostly used for single use temporary TB
682 Ignore the hard cases and just back up if this TB happens to
683 be the last one generated. */
684 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
685 code_gen_ptr
= tb
->tc_ptr
;
690 static inline void invalidate_page_bitmap(PageDesc
*p
)
692 if (p
->code_bitmap
) {
693 qemu_free(p
->code_bitmap
);
694 p
->code_bitmap
= NULL
;
696 p
->code_write_count
= 0;
699 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
701 static void page_flush_tb_1 (int level
, void **lp
)
710 for (i
= 0; i
< L2_SIZE
; ++i
) {
711 pd
[i
].first_tb
= NULL
;
712 invalidate_page_bitmap(pd
+ i
);
716 for (i
= 0; i
< L2_SIZE
; ++i
) {
717 page_flush_tb_1 (level
- 1, pp
+ i
);
722 static void page_flush_tb(void)
725 for (i
= 0; i
< V_L1_SIZE
; i
++) {
726 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
730 /* flush all the translation blocks */
731 /* XXX: tb_flush is currently not thread safe */
732 void tb_flush(CPUState
*env1
)
735 #if defined(DEBUG_FLUSH)
736 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
737 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
739 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
741 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
742 cpu_abort(env1
, "Internal error: code buffer overflow\n");
746 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
747 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
750 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
753 code_gen_ptr
= code_gen_buffer
;
754 /* XXX: flush processor icache at this point if cache flush is
759 #ifdef DEBUG_TB_CHECK
761 static void tb_invalidate_check(target_ulong address
)
763 TranslationBlock
*tb
;
765 address
&= TARGET_PAGE_MASK
;
766 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
767 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
768 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
769 address
>= tb
->pc
+ tb
->size
)) {
770 printf("ERROR invalidate: address=" TARGET_FMT_lx
771 " PC=%08lx size=%04x\n",
772 address
, (long)tb
->pc
, tb
->size
);
778 /* verify that all the pages have correct rights for code */
779 static void tb_page_check(void)
781 TranslationBlock
*tb
;
782 int i
, flags1
, flags2
;
784 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
785 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
786 flags1
= page_get_flags(tb
->pc
);
787 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
788 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
789 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
790 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
798 /* invalidate one TB */
799 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
802 TranslationBlock
*tb1
;
806 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
809 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
813 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
815 TranslationBlock
*tb1
;
821 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
823 *ptb
= tb1
->page_next
[n1
];
826 ptb
= &tb1
->page_next
[n1
];
830 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
832 TranslationBlock
*tb1
, **ptb
;
835 ptb
= &tb
->jmp_next
[n
];
838 /* find tb(n) in circular list */
842 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
843 if (n1
== n
&& tb1
== tb
)
846 ptb
= &tb1
->jmp_first
;
848 ptb
= &tb1
->jmp_next
[n1
];
851 /* now we can suppress tb(n) from the list */
852 *ptb
= tb
->jmp_next
[n
];
854 tb
->jmp_next
[n
] = NULL
;
858 /* reset the jump entry 'n' of a TB so that it is not chained to
860 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
862 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
865 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
870 tb_page_addr_t phys_pc
;
871 TranslationBlock
*tb1
, *tb2
;
873 /* remove the TB from the hash list */
874 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
875 h
= tb_phys_hash_func(phys_pc
);
876 tb_remove(&tb_phys_hash
[h
], tb
,
877 offsetof(TranslationBlock
, phys_hash_next
));
879 /* remove the TB from the page list */
880 if (tb
->page_addr
[0] != page_addr
) {
881 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
882 tb_page_remove(&p
->first_tb
, tb
);
883 invalidate_page_bitmap(p
);
885 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
886 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
887 tb_page_remove(&p
->first_tb
, tb
);
888 invalidate_page_bitmap(p
);
891 tb_invalidated_flag
= 1;
893 /* remove the TB from the hash list */
894 h
= tb_jmp_cache_hash_func(tb
->pc
);
895 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
896 if (env
->tb_jmp_cache
[h
] == tb
)
897 env
->tb_jmp_cache
[h
] = NULL
;
900 /* suppress this TB from the two jump lists */
901 tb_jmp_remove(tb
, 0);
902 tb_jmp_remove(tb
, 1);
904 /* suppress any remaining jumps to this TB */
910 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
911 tb2
= tb1
->jmp_next
[n1
];
912 tb_reset_jump(tb1
, n1
);
913 tb1
->jmp_next
[n1
] = NULL
;
916 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
918 tb_phys_invalidate_count
++;
921 static inline void set_bits(uint8_t *tab
, int start
, int len
)
927 mask
= 0xff << (start
& 7);
928 if ((start
& ~7) == (end
& ~7)) {
930 mask
&= ~(0xff << (end
& 7));
935 start
= (start
+ 8) & ~7;
937 while (start
< end1
) {
942 mask
= ~(0xff << (end
& 7));
948 static void build_page_bitmap(PageDesc
*p
)
950 int n
, tb_start
, tb_end
;
951 TranslationBlock
*tb
;
953 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
958 tb
= (TranslationBlock
*)((long)tb
& ~3);
959 /* NOTE: this is subtle as a TB may span two physical pages */
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
964 tb_end
= tb_start
+ tb
->size
;
965 if (tb_end
> TARGET_PAGE_SIZE
)
966 tb_end
= TARGET_PAGE_SIZE
;
969 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
971 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
972 tb
= tb
->page_next
[n
];
976 TranslationBlock
*tb_gen_code(CPUState
*env
,
977 target_ulong pc
, target_ulong cs_base
,
978 int flags
, int cflags
)
980 TranslationBlock
*tb
;
982 tb_page_addr_t phys_pc
, phys_page2
;
983 target_ulong virt_page2
;
986 phys_pc
= get_page_addr_code(env
, pc
);
989 /* flush must be done */
991 /* cannot fail at this point */
993 /* Don't forget to invalidate previous TB info. */
994 tb_invalidated_flag
= 1;
996 tc_ptr
= code_gen_ptr
;
998 tb
->cs_base
= cs_base
;
1000 tb
->cflags
= cflags
;
1001 cpu_gen_code(env
, tb
, &code_gen_size
);
1002 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1004 /* check next page if needed */
1005 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1007 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1008 phys_page2
= get_page_addr_code(env
, virt_page2
);
1010 tb_link_page(tb
, phys_pc
, phys_page2
);
1014 /* invalidate all TBs which intersect with the target physical page
1015 starting in range [start;end[. NOTE: start and end must refer to
1016 the same physical page. 'is_cpu_write_access' should be true if called
1017 from a real cpu write access: the virtual CPU will exit the current
1018 TB if code is modified inside this TB. */
1019 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1020 int is_cpu_write_access
)
1022 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1023 CPUState
*env
= cpu_single_env
;
1024 tb_page_addr_t tb_start
, tb_end
;
1027 #ifdef TARGET_HAS_PRECISE_SMC
1028 int current_tb_not_found
= is_cpu_write_access
;
1029 TranslationBlock
*current_tb
= NULL
;
1030 int current_tb_modified
= 0;
1031 target_ulong current_pc
= 0;
1032 target_ulong current_cs_base
= 0;
1033 int current_flags
= 0;
1034 #endif /* TARGET_HAS_PRECISE_SMC */
1036 p
= page_find(start
>> TARGET_PAGE_BITS
);
1039 if (!p
->code_bitmap
&&
1040 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1041 is_cpu_write_access
) {
1042 /* build code bitmap */
1043 build_page_bitmap(p
);
1046 /* we remove all the TBs in the range [start, end[ */
1047 /* XXX: see if in some cases it could be faster to invalidate all the code */
1049 while (tb
!= NULL
) {
1051 tb
= (TranslationBlock
*)((long)tb
& ~3);
1052 tb_next
= tb
->page_next
[n
];
1053 /* NOTE: this is subtle as a TB may span two physical pages */
1055 /* NOTE: tb_end may be after the end of the page, but
1056 it is not a problem */
1057 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1058 tb_end
= tb_start
+ tb
->size
;
1060 tb_start
= tb
->page_addr
[1];
1061 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1063 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1064 #ifdef TARGET_HAS_PRECISE_SMC
1065 if (current_tb_not_found
) {
1066 current_tb_not_found
= 0;
1068 if (env
->mem_io_pc
) {
1069 /* now we have a real cpu fault */
1070 current_tb
= tb_find_pc(env
->mem_io_pc
);
1073 if (current_tb
== tb
&&
1074 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1075 /* If we are modifying the current TB, we must stop
1076 its execution. We could be more precise by checking
1077 that the modification is after the current PC, but it
1078 would require a specialized function to partially
1079 restore the CPU state */
1081 current_tb_modified
= 1;
1082 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1083 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1086 #endif /* TARGET_HAS_PRECISE_SMC */
1087 /* we need to do that to handle the case where a signal
1088 occurs while doing tb_phys_invalidate() */
1091 saved_tb
= env
->current_tb
;
1092 env
->current_tb
= NULL
;
1094 tb_phys_invalidate(tb
, -1);
1096 env
->current_tb
= saved_tb
;
1097 if (env
->interrupt_request
&& env
->current_tb
)
1098 cpu_interrupt(env
, env
->interrupt_request
);
1103 #if !defined(CONFIG_USER_ONLY)
1104 /* if no code remaining, no need to continue to use slow writes */
1106 invalidate_page_bitmap(p
);
1107 if (is_cpu_write_access
) {
1108 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1112 #ifdef TARGET_HAS_PRECISE_SMC
1113 if (current_tb_modified
) {
1114 /* we generate a block containing just the instruction
1115 modifying the memory. It will ensure that it cannot modify
1117 env
->current_tb
= NULL
;
1118 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1119 cpu_resume_from_signal(env
, NULL
);
1124 /* len must be <= 8 and start must be a multiple of len */
1125 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1131 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1132 cpu_single_env
->mem_io_vaddr
, len
,
1133 cpu_single_env
->eip
,
1134 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1137 p
= page_find(start
>> TARGET_PAGE_BITS
);
1140 if (p
->code_bitmap
) {
1141 offset
= start
& ~TARGET_PAGE_MASK
;
1142 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1143 if (b
& ((1 << len
) - 1))
1147 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1151 #if !defined(CONFIG_SOFTMMU)
1152 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1153 unsigned long pc
, void *puc
)
1155 TranslationBlock
*tb
;
1158 #ifdef TARGET_HAS_PRECISE_SMC
1159 TranslationBlock
*current_tb
= NULL
;
1160 CPUState
*env
= cpu_single_env
;
1161 int current_tb_modified
= 0;
1162 target_ulong current_pc
= 0;
1163 target_ulong current_cs_base
= 0;
1164 int current_flags
= 0;
1167 addr
&= TARGET_PAGE_MASK
;
1168 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1172 #ifdef TARGET_HAS_PRECISE_SMC
1173 if (tb
&& pc
!= 0) {
1174 current_tb
= tb_find_pc(pc
);
1177 while (tb
!= NULL
) {
1179 tb
= (TranslationBlock
*)((long)tb
& ~3);
1180 #ifdef TARGET_HAS_PRECISE_SMC
1181 if (current_tb
== tb
&&
1182 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1183 /* If we are modifying the current TB, we must stop
1184 its execution. We could be more precise by checking
1185 that the modification is after the current PC, but it
1186 would require a specialized function to partially
1187 restore the CPU state */
1189 current_tb_modified
= 1;
1190 cpu_restore_state(current_tb
, env
, pc
);
1191 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1194 #endif /* TARGET_HAS_PRECISE_SMC */
1195 tb_phys_invalidate(tb
, addr
);
1196 tb
= tb
->page_next
[n
];
1199 #ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified
) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1204 env
->current_tb
= NULL
;
1205 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1206 cpu_resume_from_signal(env
, puc
);
1212 /* add the tb in the target page and protect it if necessary */
1213 static inline void tb_alloc_page(TranslationBlock
*tb
,
1214 unsigned int n
, tb_page_addr_t page_addr
)
1217 #ifndef CONFIG_USER_ONLY
1218 bool page_already_protected
;
1221 tb
->page_addr
[n
] = page_addr
;
1222 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1223 tb
->page_next
[n
] = p
->first_tb
;
1224 #ifndef CONFIG_USER_ONLY
1225 page_already_protected
= p
->first_tb
!= NULL
;
1227 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1228 invalidate_page_bitmap(p
);
1230 #if defined(TARGET_HAS_SMC) || 1
1232 #if defined(CONFIG_USER_ONLY)
1233 if (p
->flags
& PAGE_WRITE
) {
1238 /* force the host page as non writable (writes will have a
1239 page fault + mprotect overhead) */
1240 page_addr
&= qemu_host_page_mask
;
1242 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1243 addr
+= TARGET_PAGE_SIZE
) {
1245 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1249 p2
->flags
&= ~PAGE_WRITE
;
1251 mprotect(g2h(page_addr
), qemu_host_page_size
,
1252 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1253 #ifdef DEBUG_TB_INVALIDATE
1254 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1259 /* if some code is already present, then the pages are already
1260 protected. So we handle the case where only the first TB is
1261 allocated in a physical page */
1262 if (!page_already_protected
) {
1263 tlb_protect_code(page_addr
);
1267 #endif /* TARGET_HAS_SMC */
1270 /* add a new TB and link it to the physical page tables. phys_page2 is
1271 (-1) to indicate that only one page contains the TB. */
1272 void tb_link_page(TranslationBlock
*tb
,
1273 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1276 TranslationBlock
**ptb
;
1278 /* Grab the mmap lock to stop another thread invalidating this TB
1279 before we are done. */
1281 /* add in the physical hash table */
1282 h
= tb_phys_hash_func(phys_pc
);
1283 ptb
= &tb_phys_hash
[h
];
1284 tb
->phys_hash_next
= *ptb
;
1287 /* add in the page list */
1288 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1289 if (phys_page2
!= -1)
1290 tb_alloc_page(tb
, 1, phys_page2
);
1292 tb
->page_addr
[1] = -1;
1294 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1295 tb
->jmp_next
[0] = NULL
;
1296 tb
->jmp_next
[1] = NULL
;
1298 /* init original jump addresses */
1299 if (tb
->tb_next_offset
[0] != 0xffff)
1300 tb_reset_jump(tb
, 0);
1301 if (tb
->tb_next_offset
[1] != 0xffff)
1302 tb_reset_jump(tb
, 1);
1304 #ifdef DEBUG_TB_CHECK
1310 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1311 tb[1].tc_ptr. Return NULL if not found */
1312 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1314 int m_min
, m_max
, m
;
1316 TranslationBlock
*tb
;
1320 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1321 tc_ptr
>= (unsigned long)code_gen_ptr
)
1323 /* binary search (cf Knuth) */
1326 while (m_min
<= m_max
) {
1327 m
= (m_min
+ m_max
) >> 1;
1329 v
= (unsigned long)tb
->tc_ptr
;
1332 else if (tc_ptr
< v
) {
1341 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1343 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1345 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1348 tb1
= tb
->jmp_next
[n
];
1350 /* find head of list */
1353 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1356 tb1
= tb1
->jmp_next
[n1
];
1358 /* we are now sure now that tb jumps to tb1 */
1361 /* remove tb from the jmp_first list */
1362 ptb
= &tb_next
->jmp_first
;
1366 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1367 if (n1
== n
&& tb1
== tb
)
1369 ptb
= &tb1
->jmp_next
[n1
];
1371 *ptb
= tb
->jmp_next
[n
];
1372 tb
->jmp_next
[n
] = NULL
;
1374 /* suppress the jump to next tb in generated code */
1375 tb_reset_jump(tb
, n
);
1377 /* suppress jumps in the tb on which we could have jumped */
1378 tb_reset_jump_recursive(tb_next
);
1382 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1384 tb_reset_jump_recursive2(tb
, 0);
1385 tb_reset_jump_recursive2(tb
, 1);
1388 #if defined(TARGET_HAS_ICE)
1389 #if defined(CONFIG_USER_ONLY)
1390 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1392 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1395 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1397 target_phys_addr_t addr
;
1399 ram_addr_t ram_addr
;
1402 addr
= cpu_get_phys_page_debug(env
, pc
);
1403 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1405 pd
= IO_MEM_UNASSIGNED
;
1407 pd
= p
->phys_offset
;
1409 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1410 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1413 #endif /* TARGET_HAS_ICE */
1415 #if defined(CONFIG_USER_ONLY)
1416 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1421 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1422 int flags
, CPUWatchpoint
**watchpoint
)
1427 /* Add a watchpoint. */
1428 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1429 int flags
, CPUWatchpoint
**watchpoint
)
1431 target_ulong len_mask
= ~(len
- 1);
1434 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1435 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1436 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1437 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1440 wp
= qemu_malloc(sizeof(*wp
));
1443 wp
->len_mask
= len_mask
;
1446 /* keep all GDB-injected watchpoints in front */
1448 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1450 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1452 tlb_flush_page(env
, addr
);
1459 /* Remove a specific watchpoint. */
1460 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1463 target_ulong len_mask
= ~(len
- 1);
1466 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1467 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1468 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1469 cpu_watchpoint_remove_by_ref(env
, wp
);
1476 /* Remove a specific watchpoint by reference. */
1477 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1479 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1481 tlb_flush_page(env
, watchpoint
->vaddr
);
1483 qemu_free(watchpoint
);
1486 /* Remove all matching watchpoints. */
1487 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1489 CPUWatchpoint
*wp
, *next
;
1491 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1492 if (wp
->flags
& mask
)
1493 cpu_watchpoint_remove_by_ref(env
, wp
);
1498 /* Add a breakpoint. */
1499 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1500 CPUBreakpoint
**breakpoint
)
1502 #if defined(TARGET_HAS_ICE)
1505 bp
= qemu_malloc(sizeof(*bp
));
1510 /* keep all GDB-injected breakpoints in front */
1512 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1514 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1516 breakpoint_invalidate(env
, pc
);
1526 /* Remove a specific breakpoint. */
1527 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1529 #if defined(TARGET_HAS_ICE)
1532 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1533 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1534 cpu_breakpoint_remove_by_ref(env
, bp
);
1544 /* Remove a specific breakpoint by reference. */
1545 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1547 #if defined(TARGET_HAS_ICE)
1548 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1550 breakpoint_invalidate(env
, breakpoint
->pc
);
1552 qemu_free(breakpoint
);
1556 /* Remove all matching breakpoints. */
1557 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1559 #if defined(TARGET_HAS_ICE)
1560 CPUBreakpoint
*bp
, *next
;
1562 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1563 if (bp
->flags
& mask
)
1564 cpu_breakpoint_remove_by_ref(env
, bp
);
1569 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1570 CPU loop after each instruction */
1571 void cpu_single_step(CPUState
*env
, int enabled
)
1573 #if defined(TARGET_HAS_ICE)
1574 if (env
->singlestep_enabled
!= enabled
) {
1575 env
->singlestep_enabled
= enabled
;
1577 kvm_update_guest_debug(env
, 0);
1579 /* must flush all the translated code to avoid inconsistencies */
1580 /* XXX: only flush what is necessary */
1587 /* enable or disable low levels log */
1588 void cpu_set_log(int log_flags
)
1590 loglevel
= log_flags
;
1591 if (loglevel
&& !logfile
) {
1592 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1594 perror(logfilename
);
1597 #if !defined(CONFIG_SOFTMMU)
1598 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1600 static char logfile_buf
[4096];
1601 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1603 #elif !defined(_WIN32)
1604 /* Win32 doesn't support line-buffering and requires size >= 2 */
1605 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1609 if (!loglevel
&& logfile
) {
1615 void cpu_set_log_filename(const char *filename
)
1617 logfilename
= strdup(filename
);
1622 cpu_set_log(loglevel
);
1625 static void cpu_unlink_tb(CPUState
*env
)
1627 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1628 problem and hope the cpu will stop of its own accord. For userspace
1629 emulation this often isn't actually as bad as it sounds. Often
1630 signals are used primarily to interrupt blocking syscalls. */
1631 TranslationBlock
*tb
;
1632 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1634 spin_lock(&interrupt_lock
);
1635 tb
= env
->current_tb
;
1636 /* if the cpu is currently executing code, we must unlink it and
1637 all the potentially executing TB */
1639 env
->current_tb
= NULL
;
1640 tb_reset_jump_recursive(tb
);
1642 spin_unlock(&interrupt_lock
);
1645 #ifndef CONFIG_USER_ONLY
1646 /* mask must never be zero, except for A20 change call */
1647 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1651 old_mask
= env
->interrupt_request
;
1652 env
->interrupt_request
|= mask
;
1655 * If called from iothread context, wake the target cpu in
1658 if (!qemu_cpu_is_self(env
)) {
1664 env
->icount_decr
.u16
.high
= 0xffff;
1666 && (mask
& ~old_mask
) != 0) {
1667 cpu_abort(env
, "Raised interrupt while not in I/O function");
1674 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1676 #else /* CONFIG_USER_ONLY */
1678 void cpu_interrupt(CPUState
*env
, int mask
)
1680 env
->interrupt_request
|= mask
;
1683 #endif /* CONFIG_USER_ONLY */
1685 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1687 env
->interrupt_request
&= ~mask
;
1690 void cpu_exit(CPUState
*env
)
1692 env
->exit_request
= 1;
1696 const CPULogItem cpu_log_items
[] = {
1697 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1698 "show generated host assembly code for each compiled TB" },
1699 { CPU_LOG_TB_IN_ASM
, "in_asm",
1700 "show target assembly code for each compiled TB" },
1701 { CPU_LOG_TB_OP
, "op",
1702 "show micro ops for each compiled TB" },
1703 { CPU_LOG_TB_OP_OPT
, "op_opt",
1706 "before eflags optimization and "
1708 "after liveness analysis" },
1709 { CPU_LOG_INT
, "int",
1710 "show interrupts/exceptions in short format" },
1711 { CPU_LOG_EXEC
, "exec",
1712 "show trace before each executed TB (lots of logs)" },
1713 { CPU_LOG_TB_CPU
, "cpu",
1714 "show CPU state before block translation" },
1716 { CPU_LOG_PCALL
, "pcall",
1717 "show protected mode far calls/returns/exceptions" },
1718 { CPU_LOG_RESET
, "cpu_reset",
1719 "show CPU state before CPU resets" },
1722 { CPU_LOG_IOPORT
, "ioport",
1723 "show all i/o ports accesses" },
1728 #ifndef CONFIG_USER_ONLY
1729 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1730 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1732 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1734 ram_addr_t phys_offset
,
1737 CPUPhysMemoryClient
*client
;
1738 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1739 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1743 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1744 target_phys_addr_t end
)
1746 CPUPhysMemoryClient
*client
;
1747 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1748 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1755 static int cpu_notify_migration_log(int enable
)
1757 CPUPhysMemoryClient
*client
;
1758 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1759 int r
= client
->migration_log(client
, enable
);
1767 target_phys_addr_t start_addr
;
1769 ram_addr_t phys_offset
;
1772 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1773 * address. Each intermediate table provides the next L2_BITs of guest
1774 * physical address space. The number of levels vary based on host and
1775 * guest configuration, making it efficient to build the final guest
1776 * physical address by seeding the L1 offset and shifting and adding in
1777 * each L2 offset as we recurse through them. */
1778 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
, int level
,
1779 void **lp
, target_phys_addr_t addr
,
1780 struct last_map
*map
)
1788 PhysPageDesc
*pd
= *lp
;
1789 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1790 for (i
= 0; i
< L2_SIZE
; ++i
) {
1791 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1792 target_phys_addr_t start_addr
= addr
| i
<< TARGET_PAGE_BITS
;
1795 start_addr
== map
->start_addr
+ map
->size
&&
1796 pd
[i
].phys_offset
== map
->phys_offset
+ map
->size
) {
1798 map
->size
+= TARGET_PAGE_SIZE
;
1800 } else if (map
->size
) {
1801 client
->set_memory(client
, map
->start_addr
,
1802 map
->size
, map
->phys_offset
, false);
1805 map
->start_addr
= start_addr
;
1806 map
->size
= TARGET_PAGE_SIZE
;
1807 map
->phys_offset
= pd
[i
].phys_offset
;
1812 for (i
= 0; i
< L2_SIZE
; ++i
) {
1813 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1814 (addr
<< L2_BITS
) | i
, map
);
1819 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1822 struct last_map map
= { };
1824 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1825 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1826 l1_phys_map
+ i
, i
, &map
);
1829 client
->set_memory(client
, map
.start_addr
, map
.size
, map
.phys_offset
,
1834 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1836 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1837 phys_page_for_each(client
);
1840 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1842 QLIST_REMOVE(client
, list
);
1846 static int cmp1(const char *s1
, int n
, const char *s2
)
1848 if (strlen(s2
) != n
)
1850 return memcmp(s1
, s2
, n
) == 0;
1853 /* takes a comma separated list of log masks. Return 0 if error. */
1854 int cpu_str_to_log_mask(const char *str
)
1856 const CPULogItem
*item
;
1863 p1
= strchr(p
, ',');
1866 if(cmp1(p
,p1
-p
,"all")) {
1867 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1871 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1872 if (cmp1(p
, p1
- p
, item
->name
))
1886 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1893 fprintf(stderr
, "qemu: fatal: ");
1894 vfprintf(stderr
, fmt
, ap
);
1895 fprintf(stderr
, "\n");
1897 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1899 cpu_dump_state(env
, stderr
, fprintf
, 0);
1901 if (qemu_log_enabled()) {
1902 qemu_log("qemu: fatal: ");
1903 qemu_log_vprintf(fmt
, ap2
);
1906 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1908 log_cpu_state(env
, 0);
1915 #if defined(CONFIG_USER_ONLY)
1917 struct sigaction act
;
1918 sigfillset(&act
.sa_mask
);
1919 act
.sa_handler
= SIG_DFL
;
1920 sigaction(SIGABRT
, &act
, NULL
);
1926 CPUState
*cpu_copy(CPUState
*env
)
1928 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1929 CPUState
*next_cpu
= new_env
->next_cpu
;
1930 int cpu_index
= new_env
->cpu_index
;
1931 #if defined(TARGET_HAS_ICE)
1936 memcpy(new_env
, env
, sizeof(CPUState
));
1938 /* Preserve chaining and index. */
1939 new_env
->next_cpu
= next_cpu
;
1940 new_env
->cpu_index
= cpu_index
;
1942 /* Clone all break/watchpoints.
1943 Note: Once we support ptrace with hw-debug register access, make sure
1944 BP_CPU break/watchpoints are handled correctly on clone. */
1945 QTAILQ_INIT(&env
->breakpoints
);
1946 QTAILQ_INIT(&env
->watchpoints
);
1947 #if defined(TARGET_HAS_ICE)
1948 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1949 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1951 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1952 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1960 #if !defined(CONFIG_USER_ONLY)
1962 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1966 /* Discard jump cache entries for any tb which might potentially
1967 overlap the flushed page. */
1968 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1969 memset (&env
->tb_jmp_cache
[i
], 0,
1970 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1972 i
= tb_jmp_cache_hash_page(addr
);
1973 memset (&env
->tb_jmp_cache
[i
], 0,
1974 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1977 static CPUTLBEntry s_cputlb_empty_entry
= {
1984 /* NOTE: if flush_global is true, also flush global entries (not
1986 void tlb_flush(CPUState
*env
, int flush_global
)
1990 #if defined(DEBUG_TLB)
1991 printf("tlb_flush:\n");
1993 /* must reset current TB so that interrupts cannot modify the
1994 links while we are modifying them */
1995 env
->current_tb
= NULL
;
1997 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1999 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2000 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
2004 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
2006 env
->tlb_flush_addr
= -1;
2007 env
->tlb_flush_mask
= 0;
2011 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
2013 if (addr
== (tlb_entry
->addr_read
&
2014 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2015 addr
== (tlb_entry
->addr_write
&
2016 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2017 addr
== (tlb_entry
->addr_code
&
2018 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
2019 *tlb_entry
= s_cputlb_empty_entry
;
2023 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2028 #if defined(DEBUG_TLB)
2029 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
2031 /* Check if we need to flush due to large pages. */
2032 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
2033 #if defined(DEBUG_TLB)
2034 printf("tlb_flush_page: forced full flush ("
2035 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
2036 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2041 /* must reset current TB so that interrupts cannot modify the
2042 links while we are modifying them */
2043 env
->current_tb
= NULL
;
2045 addr
&= TARGET_PAGE_MASK
;
2046 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2047 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2048 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2050 tlb_flush_jmp_cache(env
, addr
);
2053 /* update the TLBs so that writes to code in the virtual page 'addr'
2055 static void tlb_protect_code(ram_addr_t ram_addr
)
2057 cpu_physical_memory_reset_dirty(ram_addr
,
2058 ram_addr
+ TARGET_PAGE_SIZE
,
2062 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2063 tested for self modifying code */
2064 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2067 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2070 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2071 unsigned long start
, unsigned long length
)
2074 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2075 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2076 if ((addr
- start
) < length
) {
2077 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2082 /* Note: start and end must be within the same ram block. */
2083 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2087 unsigned long length
, start1
;
2090 start
&= TARGET_PAGE_MASK
;
2091 end
= TARGET_PAGE_ALIGN(end
);
2093 length
= end
- start
;
2096 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2098 /* we modify the TLB cache so that the dirty bit will be set again
2099 when accessing the range */
2100 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2101 /* Check that we don't span multiple blocks - this breaks the
2102 address comparisons below. */
2103 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2104 != (end
- 1) - start
) {
2108 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2110 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2111 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2112 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2118 int cpu_physical_memory_set_dirty_tracking(int enable
)
2121 in_migration
= enable
;
2122 ret
= cpu_notify_migration_log(!!enable
);
2126 int cpu_physical_memory_get_dirty_tracking(void)
2128 return in_migration
;
2131 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2132 target_phys_addr_t end_addr
)
2136 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2140 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2143 CPUPhysMemoryClient
*client
;
2144 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2145 if (client
->log_start
) {
2146 int r
= client
->log_start(client
, start_addr
, size
);
2155 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2158 CPUPhysMemoryClient
*client
;
2159 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2160 if (client
->log_stop
) {
2161 int r
= client
->log_stop(client
, start_addr
, size
);
2170 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2172 ram_addr_t ram_addr
;
2175 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2176 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2177 + tlb_entry
->addend
);
2178 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2179 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2180 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2185 /* update the TLB according to the current state of the dirty bits */
2186 void cpu_tlb_update_dirty(CPUState
*env
)
2190 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2191 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2192 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2196 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2198 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2199 tlb_entry
->addr_write
= vaddr
;
2202 /* update the TLB corresponding to virtual page vaddr
2203 so that it is no longer dirty */
2204 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2209 vaddr
&= TARGET_PAGE_MASK
;
2210 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2211 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2212 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2215 /* Our TLB does not support large pages, so remember the area covered by
2216 large pages and trigger a full TLB flush if these are invalidated. */
2217 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2220 target_ulong mask
= ~(size
- 1);
2222 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2223 env
->tlb_flush_addr
= vaddr
& mask
;
2224 env
->tlb_flush_mask
= mask
;
2227 /* Extend the existing region to include the new page.
2228 This is a compromise between unnecessary flushes and the cost
2229 of maintaining a full variable size TLB. */
2230 mask
&= env
->tlb_flush_mask
;
2231 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2234 env
->tlb_flush_addr
&= mask
;
2235 env
->tlb_flush_mask
= mask
;
2238 /* Add a new TLB entry. At most one entry for a given virtual address
2239 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2240 supplied size is only used by tlb_flush_page. */
2241 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2242 target_phys_addr_t paddr
, int prot
,
2243 int mmu_idx
, target_ulong size
)
2248 target_ulong address
;
2249 target_ulong code_address
;
2250 unsigned long addend
;
2253 target_phys_addr_t iotlb
;
2255 assert(size
>= TARGET_PAGE_SIZE
);
2256 if (size
!= TARGET_PAGE_SIZE
) {
2257 tlb_add_large_page(env
, vaddr
, size
);
2259 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2261 pd
= IO_MEM_UNASSIGNED
;
2263 pd
= p
->phys_offset
;
2265 #if defined(DEBUG_TLB)
2266 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2267 " prot=%x idx=%d pd=0x%08lx\n",
2268 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2272 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2273 /* IO memory case (romd handled later) */
2274 address
|= TLB_MMIO
;
2276 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2277 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2279 iotlb
= pd
& TARGET_PAGE_MASK
;
2280 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2281 iotlb
|= IO_MEM_NOTDIRTY
;
2283 iotlb
|= IO_MEM_ROM
;
2285 /* IO handlers are currently passed a physical address.
2286 It would be nice to pass an offset from the base address
2287 of that region. This would avoid having to special case RAM,
2288 and avoid full address decoding in every device.
2289 We can't use the high bits of pd for this because
2290 IO_MEM_ROMD uses these as a ram address. */
2291 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2293 iotlb
+= p
->region_offset
;
2299 code_address
= address
;
2300 /* Make accesses to pages with watchpoints go via the
2301 watchpoint trap routines. */
2302 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2303 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2304 /* Avoid trapping reads of pages with a write breakpoint. */
2305 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2306 iotlb
= io_mem_watch
+ paddr
;
2307 address
|= TLB_MMIO
;
2313 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2314 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2315 te
= &env
->tlb_table
[mmu_idx
][index
];
2316 te
->addend
= addend
- vaddr
;
2317 if (prot
& PAGE_READ
) {
2318 te
->addr_read
= address
;
2323 if (prot
& PAGE_EXEC
) {
2324 te
->addr_code
= code_address
;
2328 if (prot
& PAGE_WRITE
) {
2329 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2330 (pd
& IO_MEM_ROMD
)) {
2331 /* Write access calls the I/O callback. */
2332 te
->addr_write
= address
| TLB_MMIO
;
2333 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2334 !cpu_physical_memory_is_dirty(pd
)) {
2335 te
->addr_write
= address
| TLB_NOTDIRTY
;
2337 te
->addr_write
= address
;
2340 te
->addr_write
= -1;
2346 void tlb_flush(CPUState
*env
, int flush_global
)
2350 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2355 * Walks guest process memory "regions" one by one
2356 * and calls callback function 'fn' for each region.
2359 struct walk_memory_regions_data
2361 walk_memory_regions_fn fn
;
2363 unsigned long start
;
2367 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2368 abi_ulong end
, int new_prot
)
2370 if (data
->start
!= -1ul) {
2371 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2377 data
->start
= (new_prot
? end
: -1ul);
2378 data
->prot
= new_prot
;
2383 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2384 abi_ulong base
, int level
, void **lp
)
2390 return walk_memory_regions_end(data
, base
, 0);
2395 for (i
= 0; i
< L2_SIZE
; ++i
) {
2396 int prot
= pd
[i
].flags
;
2398 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2399 if (prot
!= data
->prot
) {
2400 rc
= walk_memory_regions_end(data
, pa
, prot
);
2408 for (i
= 0; i
< L2_SIZE
; ++i
) {
2409 pa
= base
| ((abi_ulong
)i
<<
2410 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2411 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2421 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2423 struct walk_memory_regions_data data
;
2431 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2432 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2433 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2439 return walk_memory_regions_end(&data
, 0, 0);
2442 static int dump_region(void *priv
, abi_ulong start
,
2443 abi_ulong end
, unsigned long prot
)
2445 FILE *f
= (FILE *)priv
;
2447 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2448 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2449 start
, end
, end
- start
,
2450 ((prot
& PAGE_READ
) ? 'r' : '-'),
2451 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2452 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2457 /* dump memory mappings */
2458 void page_dump(FILE *f
)
2460 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2461 "start", "end", "size", "prot");
2462 walk_memory_regions(f
, dump_region
);
2465 int page_get_flags(target_ulong address
)
2469 p
= page_find(address
>> TARGET_PAGE_BITS
);
2475 /* Modify the flags of a page and invalidate the code if necessary.
2476 The flag PAGE_WRITE_ORG is positioned automatically depending
2477 on PAGE_WRITE. The mmap_lock should already be held. */
2478 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2480 target_ulong addr
, len
;
2482 /* This function should never be called with addresses outside the
2483 guest address space. If this assert fires, it probably indicates
2484 a missing call to h2g_valid. */
2485 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2486 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2488 assert(start
< end
);
2490 start
= start
& TARGET_PAGE_MASK
;
2491 end
= TARGET_PAGE_ALIGN(end
);
2493 if (flags
& PAGE_WRITE
) {
2494 flags
|= PAGE_WRITE_ORG
;
2497 for (addr
= start
, len
= end
- start
;
2499 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2500 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2502 /* If the write protection bit is set, then we invalidate
2504 if (!(p
->flags
& PAGE_WRITE
) &&
2505 (flags
& PAGE_WRITE
) &&
2507 tb_invalidate_phys_page(addr
, 0, NULL
);
2513 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2519 /* This function should never be called with addresses outside the
2520 guest address space. If this assert fires, it probably indicates
2521 a missing call to h2g_valid. */
2522 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2523 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2529 if (start
+ len
- 1 < start
) {
2530 /* We've wrapped around. */
2534 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2535 start
= start
& TARGET_PAGE_MASK
;
2537 for (addr
= start
, len
= end
- start
;
2539 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2540 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2543 if( !(p
->flags
& PAGE_VALID
) )
2546 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2548 if (flags
& PAGE_WRITE
) {
2549 if (!(p
->flags
& PAGE_WRITE_ORG
))
2551 /* unprotect the page if it was put read-only because it
2552 contains translated code */
2553 if (!(p
->flags
& PAGE_WRITE
)) {
2554 if (!page_unprotect(addr
, 0, NULL
))
2563 /* called from signal handler: invalidate the code and unprotect the
2564 page. Return TRUE if the fault was successfully handled. */
2565 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2569 target_ulong host_start
, host_end
, addr
;
2571 /* Technically this isn't safe inside a signal handler. However we
2572 know this only ever happens in a synchronous SEGV handler, so in
2573 practice it seems to be ok. */
2576 p
= page_find(address
>> TARGET_PAGE_BITS
);
2582 /* if the page was really writable, then we change its
2583 protection back to writable */
2584 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2585 host_start
= address
& qemu_host_page_mask
;
2586 host_end
= host_start
+ qemu_host_page_size
;
2589 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2590 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2591 p
->flags
|= PAGE_WRITE
;
2594 /* and since the content will be modified, we must invalidate
2595 the corresponding translated code. */
2596 tb_invalidate_phys_page(addr
, pc
, puc
);
2597 #ifdef DEBUG_TB_CHECK
2598 tb_invalidate_check(addr
);
2601 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2611 static inline void tlb_set_dirty(CPUState
*env
,
2612 unsigned long addr
, target_ulong vaddr
)
2615 #endif /* defined(CONFIG_USER_ONLY) */
2617 #if !defined(CONFIG_USER_ONLY)
2619 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2620 typedef struct subpage_t
{
2621 target_phys_addr_t base
;
2622 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2623 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2626 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2627 ram_addr_t memory
, ram_addr_t region_offset
);
2628 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2629 ram_addr_t orig_memory
,
2630 ram_addr_t region_offset
);
2631 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2634 if (addr > start_addr) \
2637 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2638 if (start_addr2 > 0) \
2642 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2643 end_addr2 = TARGET_PAGE_SIZE - 1; \
2645 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2646 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2651 /* register physical memory.
2652 For RAM, 'size' must be a multiple of the target page size.
2653 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2654 io memory page. The address used when calling the IO function is
2655 the offset from the start of the region, plus region_offset. Both
2656 start_addr and region_offset are rounded down to a page boundary
2657 before calculating this offset. This should not be a problem unless
2658 the low bits of start_addr and region_offset differ. */
2659 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2661 ram_addr_t phys_offset
,
2662 ram_addr_t region_offset
,
2665 target_phys_addr_t addr
, end_addr
;
2668 ram_addr_t orig_size
= size
;
2672 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2674 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2675 region_offset
= start_addr
;
2677 region_offset
&= TARGET_PAGE_MASK
;
2678 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2679 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2683 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2684 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2685 ram_addr_t orig_memory
= p
->phys_offset
;
2686 target_phys_addr_t start_addr2
, end_addr2
;
2687 int need_subpage
= 0;
2689 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2692 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2693 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2694 &p
->phys_offset
, orig_memory
,
2697 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2700 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2702 p
->region_offset
= 0;
2704 p
->phys_offset
= phys_offset
;
2705 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2706 (phys_offset
& IO_MEM_ROMD
))
2707 phys_offset
+= TARGET_PAGE_SIZE
;
2710 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2711 p
->phys_offset
= phys_offset
;
2712 p
->region_offset
= region_offset
;
2713 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2714 (phys_offset
& IO_MEM_ROMD
)) {
2715 phys_offset
+= TARGET_PAGE_SIZE
;
2717 target_phys_addr_t start_addr2
, end_addr2
;
2718 int need_subpage
= 0;
2720 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2721 end_addr2
, need_subpage
);
2724 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2725 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2726 addr
& TARGET_PAGE_MASK
);
2727 subpage_register(subpage
, start_addr2
, end_addr2
,
2728 phys_offset
, region_offset
);
2729 p
->region_offset
= 0;
2733 region_offset
+= TARGET_PAGE_SIZE
;
2734 addr
+= TARGET_PAGE_SIZE
;
2735 } while (addr
!= end_addr
);
2737 /* since each CPU stores ram addresses in its TLB cache, we must
2738 reset the modified entries */
2740 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2745 /* XXX: temporary until new memory mapping API */
2746 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2750 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2752 return IO_MEM_UNASSIGNED
;
2753 return p
->phys_offset
;
2756 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2759 kvm_coalesce_mmio_region(addr
, size
);
2762 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2765 kvm_uncoalesce_mmio_region(addr
, size
);
2768 void qemu_flush_coalesced_mmio_buffer(void)
2771 kvm_flush_coalesced_mmio_buffer();
2774 #if defined(__linux__) && !defined(TARGET_S390X)
2776 #include <sys/vfs.h>
2778 #define HUGETLBFS_MAGIC 0x958458f6
2780 static long gethugepagesize(const char *path
)
2786 ret
= statfs(path
, &fs
);
2787 } while (ret
!= 0 && errno
== EINTR
);
2794 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2795 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2800 static void *file_ram_alloc(RAMBlock
*block
,
2810 unsigned long hpagesize
;
2812 hpagesize
= gethugepagesize(path
);
2817 if (memory
< hpagesize
) {
2821 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2822 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2826 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2830 fd
= mkstemp(filename
);
2832 perror("unable to create backing store for hugepages");
2839 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2842 * ftruncate is not supported by hugetlbfs in older
2843 * hosts, so don't bother bailing out on errors.
2844 * If anything goes wrong with it under other filesystems,
2847 if (ftruncate(fd
, memory
))
2848 perror("ftruncate");
2851 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2852 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2853 * to sidestep this quirk.
2855 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2856 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2858 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2860 if (area
== MAP_FAILED
) {
2861 perror("file_ram_alloc: can't mmap RAM pages");
2870 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2872 RAMBlock
*block
, *next_block
;
2873 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2875 if (QLIST_EMPTY(&ram_list
.blocks
))
2878 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2879 ram_addr_t end
, next
= ULONG_MAX
;
2881 end
= block
->offset
+ block
->length
;
2883 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2884 if (next_block
->offset
>= end
) {
2885 next
= MIN(next
, next_block
->offset
);
2888 if (next
- end
>= size
&& next
- end
< mingap
) {
2890 mingap
= next
- end
;
2896 static ram_addr_t
last_ram_offset(void)
2899 ram_addr_t last
= 0;
2901 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2902 last
= MAX(last
, block
->offset
+ block
->length
);
2907 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2908 ram_addr_t size
, void *host
)
2910 RAMBlock
*new_block
, *block
;
2912 size
= TARGET_PAGE_ALIGN(size
);
2913 new_block
= qemu_mallocz(sizeof(*new_block
));
2915 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2916 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2918 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2922 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2924 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2925 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2926 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2932 new_block
->offset
= find_ram_offset(size
);
2934 new_block
->host
= host
;
2935 new_block
->flags
|= RAM_PREALLOC_MASK
;
2938 #if defined (__linux__) && !defined(TARGET_S390X)
2939 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2940 if (!new_block
->host
) {
2941 new_block
->host
= qemu_vmalloc(size
);
2942 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2945 fprintf(stderr
, "-mem-path option unsupported\n");
2949 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2950 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2951 an system defined value, which is at least 256GB. Larger systems
2952 have larger values. We put the guest between the end of data
2953 segment (system break) and this value. We use 32GB as a base to
2954 have enough room for the system break to grow. */
2955 new_block
->host
= mmap((void*)0x800000000, size
,
2956 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2957 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2958 if (new_block
->host
== MAP_FAILED
) {
2959 fprintf(stderr
, "Allocating RAM failed\n");
2963 if (xen_enabled()) {
2964 xen_ram_alloc(new_block
->offset
, size
);
2966 new_block
->host
= qemu_vmalloc(size
);
2969 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2972 new_block
->length
= size
;
2974 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2976 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2977 last_ram_offset() >> TARGET_PAGE_BITS
);
2978 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2979 0xff, size
>> TARGET_PAGE_BITS
);
2982 kvm_setup_guest_memory(new_block
->host
, size
);
2984 return new_block
->offset
;
2987 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2989 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2992 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2996 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2997 if (addr
== block
->offset
) {
2998 QLIST_REMOVE(block
, next
);
3005 void qemu_ram_free(ram_addr_t addr
)
3009 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3010 if (addr
== block
->offset
) {
3011 QLIST_REMOVE(block
, next
);
3012 if (block
->flags
& RAM_PREALLOC_MASK
) {
3014 } else if (mem_path
) {
3015 #if defined (__linux__) && !defined(TARGET_S390X)
3017 munmap(block
->host
, block
->length
);
3020 qemu_vfree(block
->host
);
3026 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3027 munmap(block
->host
, block
->length
);
3029 if (xen_enabled()) {
3030 xen_invalidate_map_cache_entry(block
->host
);
3032 qemu_vfree(block
->host
);
3044 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3051 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3052 offset
= addr
- block
->offset
;
3053 if (offset
< block
->length
) {
3054 vaddr
= block
->host
+ offset
;
3055 if (block
->flags
& RAM_PREALLOC_MASK
) {
3059 munmap(vaddr
, length
);
3061 #if defined(__linux__) && !defined(TARGET_S390X)
3064 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3067 flags
|= MAP_PRIVATE
;
3069 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3070 flags
, block
->fd
, offset
);
3072 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3073 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3080 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3081 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3082 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3085 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3086 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3090 if (area
!= vaddr
) {
3091 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3095 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3101 #endif /* !_WIN32 */
3103 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3104 With the exception of the softmmu code in this file, this should
3105 only be used for local memory (e.g. video ram) that the device owns,
3106 and knows it isn't going to access beyond the end of the block.
3108 It should not be used for general purpose DMA.
3109 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3111 void *qemu_get_ram_ptr(ram_addr_t addr
)
3115 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3116 if (addr
- block
->offset
< block
->length
) {
3117 /* Move this entry to to start of the list. */
3118 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3119 QLIST_REMOVE(block
, next
);
3120 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3122 if (xen_enabled()) {
3123 /* We need to check if the requested address is in the RAM
3124 * because we don't want to map the entire memory in QEMU.
3125 * In that case just map until the end of the page.
3127 if (block
->offset
== 0) {
3128 return xen_map_cache(addr
, 0, 0);
3129 } else if (block
->host
== NULL
) {
3131 xen_map_cache(block
->offset
, block
->length
, 1);
3134 return block
->host
+ (addr
- block
->offset
);
3138 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3144 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3145 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3147 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3151 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3152 if (addr
- block
->offset
< block
->length
) {
3153 if (xen_enabled()) {
3154 /* We need to check if the requested address is in the RAM
3155 * because we don't want to map the entire memory in QEMU.
3156 * In that case just map until the end of the page.
3158 if (block
->offset
== 0) {
3159 return xen_map_cache(addr
, 0, 0);
3160 } else if (block
->host
== NULL
) {
3162 xen_map_cache(block
->offset
, block
->length
, 1);
3165 return block
->host
+ (addr
- block
->offset
);
3169 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3175 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3176 * but takes a size argument */
3177 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3182 if (xen_enabled()) {
3183 return xen_map_cache(addr
, *size
, 1);
3187 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3188 if (addr
- block
->offset
< block
->length
) {
3189 if (addr
- block
->offset
+ *size
> block
->length
)
3190 *size
= block
->length
- addr
+ block
->offset
;
3191 return block
->host
+ (addr
- block
->offset
);
3195 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3200 void qemu_put_ram_ptr(void *addr
)
3202 trace_qemu_put_ram_ptr(addr
);
3205 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3208 uint8_t *host
= ptr
;
3210 if (xen_enabled()) {
3211 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3215 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3216 /* This case append when the block is not mapped. */
3217 if (block
->host
== NULL
) {
3220 if (host
- block
->host
< block
->length
) {
3221 *ram_addr
= block
->offset
+ (host
- block
->host
);
3229 /* Some of the softmmu routines need to translate from a host pointer
3230 (typically a TLB entry) back to a ram offset. */
3231 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3233 ram_addr_t ram_addr
;
3235 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3236 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3242 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3244 #ifdef DEBUG_UNASSIGNED
3245 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3247 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3248 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 1);
3253 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3255 #ifdef DEBUG_UNASSIGNED
3256 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3258 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3259 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 2);
3264 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3266 #ifdef DEBUG_UNASSIGNED
3267 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3269 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3270 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 4);
3275 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3277 #ifdef DEBUG_UNASSIGNED
3278 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3280 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3281 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 1);
3285 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3287 #ifdef DEBUG_UNASSIGNED
3288 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3290 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3291 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 2);
3295 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3297 #ifdef DEBUG_UNASSIGNED
3298 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3300 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3301 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 4);
3305 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3306 unassigned_mem_readb
,
3307 unassigned_mem_readw
,
3308 unassigned_mem_readl
,
3311 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3312 unassigned_mem_writeb
,
3313 unassigned_mem_writew
,
3314 unassigned_mem_writel
,
3317 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3321 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3322 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3323 #if !defined(CONFIG_USER_ONLY)
3324 tb_invalidate_phys_page_fast(ram_addr
, 1);
3325 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3328 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3329 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3330 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3331 /* we remove the notdirty callback only if the code has been
3333 if (dirty_flags
== 0xff)
3334 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3337 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3341 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3342 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3343 #if !defined(CONFIG_USER_ONLY)
3344 tb_invalidate_phys_page_fast(ram_addr
, 2);
3345 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3348 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3349 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3350 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3351 /* we remove the notdirty callback only if the code has been
3353 if (dirty_flags
== 0xff)
3354 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3357 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3361 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3362 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3363 #if !defined(CONFIG_USER_ONLY)
3364 tb_invalidate_phys_page_fast(ram_addr
, 4);
3365 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3368 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3369 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3370 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3371 /* we remove the notdirty callback only if the code has been
3373 if (dirty_flags
== 0xff)
3374 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3377 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3378 NULL
, /* never used */
3379 NULL
, /* never used */
3380 NULL
, /* never used */
3383 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3384 notdirty_mem_writeb
,
3385 notdirty_mem_writew
,
3386 notdirty_mem_writel
,
3389 /* Generate a debug exception if a watchpoint has been hit. */
3390 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3392 CPUState
*env
= cpu_single_env
;
3393 target_ulong pc
, cs_base
;
3394 TranslationBlock
*tb
;
3399 if (env
->watchpoint_hit
) {
3400 /* We re-entered the check after replacing the TB. Now raise
3401 * the debug interrupt so that is will trigger after the
3402 * current instruction. */
3403 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3406 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3407 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3408 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3409 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3410 wp
->flags
|= BP_WATCHPOINT_HIT
;
3411 if (!env
->watchpoint_hit
) {
3412 env
->watchpoint_hit
= wp
;
3413 tb
= tb_find_pc(env
->mem_io_pc
);
3415 cpu_abort(env
, "check_watchpoint: could not find TB for "
3416 "pc=%p", (void *)env
->mem_io_pc
);
3418 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3419 tb_phys_invalidate(tb
, -1);
3420 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3421 env
->exception_index
= EXCP_DEBUG
;
3423 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3424 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3426 cpu_resume_from_signal(env
, NULL
);
3429 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3434 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3435 so these check for a hit then pass through to the normal out-of-line
3437 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3439 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3440 return ldub_phys(addr
);
3443 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3445 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3446 return lduw_phys(addr
);
3449 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3451 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3452 return ldl_phys(addr
);
3455 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3458 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3459 stb_phys(addr
, val
);
3462 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3465 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3466 stw_phys(addr
, val
);
3469 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3472 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3473 stl_phys(addr
, val
);
3476 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3482 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3488 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3489 target_phys_addr_t addr
,
3492 unsigned int idx
= SUBPAGE_IDX(addr
);
3493 #if defined(DEBUG_SUBPAGE)
3494 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3495 mmio
, len
, addr
, idx
);
3498 addr
+= mmio
->region_offset
[idx
];
3499 idx
= mmio
->sub_io_index
[idx
];
3500 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3503 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3504 uint32_t value
, unsigned int len
)
3506 unsigned int idx
= SUBPAGE_IDX(addr
);
3507 #if defined(DEBUG_SUBPAGE)
3508 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3509 __func__
, mmio
, len
, addr
, idx
, value
);
3512 addr
+= mmio
->region_offset
[idx
];
3513 idx
= mmio
->sub_io_index
[idx
];
3514 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3517 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3519 return subpage_readlen(opaque
, addr
, 0);
3522 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3525 subpage_writelen(opaque
, addr
, value
, 0);
3528 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3530 return subpage_readlen(opaque
, addr
, 1);
3533 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3536 subpage_writelen(opaque
, addr
, value
, 1);
3539 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3541 return subpage_readlen(opaque
, addr
, 2);
3544 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3547 subpage_writelen(opaque
, addr
, value
, 2);
3550 static CPUReadMemoryFunc
* const subpage_read
[] = {
3556 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3562 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3563 ram_addr_t memory
, ram_addr_t region_offset
)
3567 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3569 idx
= SUBPAGE_IDX(start
);
3570 eidx
= SUBPAGE_IDX(end
);
3571 #if defined(DEBUG_SUBPAGE)
3572 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3573 mmio
, start
, end
, idx
, eidx
, memory
);
3575 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3576 memory
= IO_MEM_UNASSIGNED
;
3577 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3578 for (; idx
<= eidx
; idx
++) {
3579 mmio
->sub_io_index
[idx
] = memory
;
3580 mmio
->region_offset
[idx
] = region_offset
;
3586 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3587 ram_addr_t orig_memory
,
3588 ram_addr_t region_offset
)
3593 mmio
= qemu_mallocz(sizeof(subpage_t
));
3596 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3597 DEVICE_NATIVE_ENDIAN
);
3598 #if defined(DEBUG_SUBPAGE)
3599 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3600 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3602 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3603 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3608 static int get_free_io_mem_idx(void)
3612 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3613 if (!io_mem_used
[i
]) {
3617 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3622 * Usually, devices operate in little endian mode. There are devices out
3623 * there that operate in big endian too. Each device gets byte swapped
3624 * mmio if plugged onto a CPU that does the other endianness.
3634 typedef struct SwapEndianContainer
{
3635 CPUReadMemoryFunc
*read
[3];
3636 CPUWriteMemoryFunc
*write
[3];
3638 } SwapEndianContainer
;
3640 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3643 SwapEndianContainer
*c
= opaque
;
3644 val
= c
->read
[0](c
->opaque
, addr
);
3648 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3651 SwapEndianContainer
*c
= opaque
;
3652 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3656 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3659 SwapEndianContainer
*c
= opaque
;
3660 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3664 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3665 swapendian_mem_readb
,
3666 swapendian_mem_readw
,
3667 swapendian_mem_readl
3670 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3673 SwapEndianContainer
*c
= opaque
;
3674 c
->write
[0](c
->opaque
, addr
, val
);
3677 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3680 SwapEndianContainer
*c
= opaque
;
3681 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3684 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3687 SwapEndianContainer
*c
= opaque
;
3688 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3691 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3692 swapendian_mem_writeb
,
3693 swapendian_mem_writew
,
3694 swapendian_mem_writel
3697 static void swapendian_init(int io_index
)
3699 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3702 /* Swap mmio for big endian targets */
3703 c
->opaque
= io_mem_opaque
[io_index
];
3704 for (i
= 0; i
< 3; i
++) {
3705 c
->read
[i
] = io_mem_read
[io_index
][i
];
3706 c
->write
[i
] = io_mem_write
[io_index
][i
];
3708 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3709 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3711 io_mem_opaque
[io_index
] = c
;
3714 static void swapendian_del(int io_index
)
3716 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3717 qemu_free(io_mem_opaque
[io_index
]);
3721 /* mem_read and mem_write are arrays of functions containing the
3722 function to access byte (index 0), word (index 1) and dword (index
3723 2). Functions can be omitted with a NULL function pointer.
3724 If io_index is non zero, the corresponding io zone is
3725 modified. If it is zero, a new io zone is allocated. The return
3726 value can be used with cpu_register_physical_memory(). (-1) is
3727 returned if error. */
3728 static int cpu_register_io_memory_fixed(int io_index
,
3729 CPUReadMemoryFunc
* const *mem_read
,
3730 CPUWriteMemoryFunc
* const *mem_write
,
3731 void *opaque
, enum device_endian endian
)
3735 if (io_index
<= 0) {
3736 io_index
= get_free_io_mem_idx();
3740 io_index
>>= IO_MEM_SHIFT
;
3741 if (io_index
>= IO_MEM_NB_ENTRIES
)
3745 for (i
= 0; i
< 3; ++i
) {
3746 io_mem_read
[io_index
][i
]
3747 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3749 for (i
= 0; i
< 3; ++i
) {
3750 io_mem_write
[io_index
][i
]
3751 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3753 io_mem_opaque
[io_index
] = opaque
;
3756 case DEVICE_BIG_ENDIAN
:
3757 #ifndef TARGET_WORDS_BIGENDIAN
3758 swapendian_init(io_index
);
3761 case DEVICE_LITTLE_ENDIAN
:
3762 #ifdef TARGET_WORDS_BIGENDIAN
3763 swapendian_init(io_index
);
3766 case DEVICE_NATIVE_ENDIAN
:
3771 return (io_index
<< IO_MEM_SHIFT
);
3774 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3775 CPUWriteMemoryFunc
* const *mem_write
,
3776 void *opaque
, enum device_endian endian
)
3778 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3781 void cpu_unregister_io_memory(int io_table_address
)
3784 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3786 swapendian_del(io_index
);
3788 for (i
=0;i
< 3; i
++) {
3789 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3790 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3792 io_mem_opaque
[io_index
] = NULL
;
3793 io_mem_used
[io_index
] = 0;
3796 static void io_mem_init(void)
3800 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3801 unassigned_mem_write
, NULL
,
3802 DEVICE_NATIVE_ENDIAN
);
3803 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3804 unassigned_mem_write
, NULL
,
3805 DEVICE_NATIVE_ENDIAN
);
3806 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3807 notdirty_mem_write
, NULL
,
3808 DEVICE_NATIVE_ENDIAN
);
3812 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3813 watch_mem_write
, NULL
,
3814 DEVICE_NATIVE_ENDIAN
);
3817 static void memory_map_init(void)
3819 system_memory
= qemu_malloc(sizeof(*system_memory
));
3820 memory_region_init(system_memory
, "system", UINT64_MAX
);
3821 set_system_memory_map(system_memory
);
3824 MemoryRegion
*get_system_memory(void)
3826 return system_memory
;
3829 #endif /* !defined(CONFIG_USER_ONLY) */
3831 /* physical memory access (slow version, mainly for debug) */
3832 #if defined(CONFIG_USER_ONLY)
3833 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3834 uint8_t *buf
, int len
, int is_write
)
3841 page
= addr
& TARGET_PAGE_MASK
;
3842 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3845 flags
= page_get_flags(page
);
3846 if (!(flags
& PAGE_VALID
))
3849 if (!(flags
& PAGE_WRITE
))
3851 /* XXX: this code should not depend on lock_user */
3852 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3855 unlock_user(p
, addr
, l
);
3857 if (!(flags
& PAGE_READ
))
3859 /* XXX: this code should not depend on lock_user */
3860 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3863 unlock_user(p
, addr
, 0);
3873 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3874 int len
, int is_write
)
3879 target_phys_addr_t page
;
3884 page
= addr
& TARGET_PAGE_MASK
;
3885 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3888 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3890 pd
= IO_MEM_UNASSIGNED
;
3892 pd
= p
->phys_offset
;
3896 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3897 target_phys_addr_t addr1
= addr
;
3898 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3900 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3901 /* XXX: could force cpu_single_env to NULL to avoid
3903 if (l
>= 4 && ((addr1
& 3) == 0)) {
3904 /* 32 bit write access */
3906 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3908 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3909 /* 16 bit write access */
3911 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3914 /* 8 bit write access */
3916 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3920 unsigned long addr1
;
3921 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3923 ptr
= qemu_get_ram_ptr(addr1
);
3924 memcpy(ptr
, buf
, l
);
3925 if (!cpu_physical_memory_is_dirty(addr1
)) {
3926 /* invalidate code */
3927 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3929 cpu_physical_memory_set_dirty_flags(
3930 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3932 qemu_put_ram_ptr(ptr
);
3935 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3936 !(pd
& IO_MEM_ROMD
)) {
3937 target_phys_addr_t addr1
= addr
;
3939 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3941 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3942 if (l
>= 4 && ((addr1
& 3) == 0)) {
3943 /* 32 bit read access */
3944 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3947 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3948 /* 16 bit read access */
3949 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3953 /* 8 bit read access */
3954 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3960 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3961 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3962 qemu_put_ram_ptr(ptr
);
3971 /* used for ROM loading : can write in RAM and ROM */
3972 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3973 const uint8_t *buf
, int len
)
3977 target_phys_addr_t page
;
3982 page
= addr
& TARGET_PAGE_MASK
;
3983 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3986 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3988 pd
= IO_MEM_UNASSIGNED
;
3990 pd
= p
->phys_offset
;
3993 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3994 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3995 !(pd
& IO_MEM_ROMD
)) {
3998 unsigned long addr1
;
3999 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4001 ptr
= qemu_get_ram_ptr(addr1
);
4002 memcpy(ptr
, buf
, l
);
4003 qemu_put_ram_ptr(ptr
);
4013 target_phys_addr_t addr
;
4014 target_phys_addr_t len
;
4017 static BounceBuffer bounce
;
4019 typedef struct MapClient
{
4021 void (*callback
)(void *opaque
);
4022 QLIST_ENTRY(MapClient
) link
;
4025 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
4026 = QLIST_HEAD_INITIALIZER(map_client_list
);
4028 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
4030 MapClient
*client
= qemu_malloc(sizeof(*client
));
4032 client
->opaque
= opaque
;
4033 client
->callback
= callback
;
4034 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
4038 void cpu_unregister_map_client(void *_client
)
4040 MapClient
*client
= (MapClient
*)_client
;
4042 QLIST_REMOVE(client
, link
);
4046 static void cpu_notify_map_clients(void)
4050 while (!QLIST_EMPTY(&map_client_list
)) {
4051 client
= QLIST_FIRST(&map_client_list
);
4052 client
->callback(client
->opaque
);
4053 cpu_unregister_map_client(client
);
4057 /* Map a physical memory region into a host virtual address.
4058 * May map a subset of the requested range, given by and returned in *plen.
4059 * May return NULL if resources needed to perform the mapping are exhausted.
4060 * Use only for reads OR writes - not for read-modify-write operations.
4061 * Use cpu_register_map_client() to know when retrying the map operation is
4062 * likely to succeed.
4064 void *cpu_physical_memory_map(target_phys_addr_t addr
,
4065 target_phys_addr_t
*plen
,
4068 target_phys_addr_t len
= *plen
;
4069 target_phys_addr_t todo
= 0;
4071 target_phys_addr_t page
;
4074 ram_addr_t raddr
= ULONG_MAX
;
4079 page
= addr
& TARGET_PAGE_MASK
;
4080 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4083 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4085 pd
= IO_MEM_UNASSIGNED
;
4087 pd
= p
->phys_offset
;
4090 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4091 if (todo
|| bounce
.buffer
) {
4094 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4098 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4102 return bounce
.buffer
;
4105 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4113 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
4118 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4119 * Will also mark the memory as dirty if is_write == 1. access_len gives
4120 * the amount of memory that was actually read or written by the caller.
4122 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4123 int is_write
, target_phys_addr_t access_len
)
4125 if (buffer
!= bounce
.buffer
) {
4127 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4128 while (access_len
) {
4130 l
= TARGET_PAGE_SIZE
;
4133 if (!cpu_physical_memory_is_dirty(addr1
)) {
4134 /* invalidate code */
4135 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4137 cpu_physical_memory_set_dirty_flags(
4138 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4144 if (xen_enabled()) {
4145 xen_invalidate_map_cache_entry(buffer
);
4150 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4152 qemu_vfree(bounce
.buffer
);
4153 bounce
.buffer
= NULL
;
4154 cpu_notify_map_clients();
4157 /* warning: addr must be aligned */
4158 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4159 enum device_endian endian
)
4167 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4169 pd
= IO_MEM_UNASSIGNED
;
4171 pd
= p
->phys_offset
;
4174 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4175 !(pd
& IO_MEM_ROMD
)) {
4177 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4179 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4180 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4181 #if defined(TARGET_WORDS_BIGENDIAN)
4182 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4186 if (endian
== DEVICE_BIG_ENDIAN
) {
4192 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4193 (addr
& ~TARGET_PAGE_MASK
);
4195 case DEVICE_LITTLE_ENDIAN
:
4196 val
= ldl_le_p(ptr
);
4198 case DEVICE_BIG_ENDIAN
:
4199 val
= ldl_be_p(ptr
);
4209 uint32_t ldl_phys(target_phys_addr_t addr
)
4211 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4214 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4216 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4219 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4221 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4224 /* warning: addr must be aligned */
4225 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4226 enum device_endian endian
)
4234 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4236 pd
= IO_MEM_UNASSIGNED
;
4238 pd
= p
->phys_offset
;
4241 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4242 !(pd
& IO_MEM_ROMD
)) {
4244 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4246 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4248 /* XXX This is broken when device endian != cpu endian.
4249 Fix and add "endian" variable check */
4250 #ifdef TARGET_WORDS_BIGENDIAN
4251 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4252 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4254 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4255 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4259 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4260 (addr
& ~TARGET_PAGE_MASK
);
4262 case DEVICE_LITTLE_ENDIAN
:
4263 val
= ldq_le_p(ptr
);
4265 case DEVICE_BIG_ENDIAN
:
4266 val
= ldq_be_p(ptr
);
4276 uint64_t ldq_phys(target_phys_addr_t addr
)
4278 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4281 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4283 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4286 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4288 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4292 uint32_t ldub_phys(target_phys_addr_t addr
)
4295 cpu_physical_memory_read(addr
, &val
, 1);
4299 /* warning: addr must be aligned */
4300 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4301 enum device_endian endian
)
4309 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4311 pd
= IO_MEM_UNASSIGNED
;
4313 pd
= p
->phys_offset
;
4316 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4317 !(pd
& IO_MEM_ROMD
)) {
4319 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4321 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4322 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4323 #if defined(TARGET_WORDS_BIGENDIAN)
4324 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4328 if (endian
== DEVICE_BIG_ENDIAN
) {
4334 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4335 (addr
& ~TARGET_PAGE_MASK
);
4337 case DEVICE_LITTLE_ENDIAN
:
4338 val
= lduw_le_p(ptr
);
4340 case DEVICE_BIG_ENDIAN
:
4341 val
= lduw_be_p(ptr
);
4351 uint32_t lduw_phys(target_phys_addr_t addr
)
4353 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4356 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4358 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4361 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4363 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4366 /* warning: addr must be aligned. The ram page is not masked as dirty
4367 and the code inside is not invalidated. It is useful if the dirty
4368 bits are used to track modified PTEs */
4369 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4376 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4378 pd
= IO_MEM_UNASSIGNED
;
4380 pd
= p
->phys_offset
;
4383 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4384 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4386 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4387 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4389 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4390 ptr
= qemu_get_ram_ptr(addr1
);
4393 if (unlikely(in_migration
)) {
4394 if (!cpu_physical_memory_is_dirty(addr1
)) {
4395 /* invalidate code */
4396 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4398 cpu_physical_memory_set_dirty_flags(
4399 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4405 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4412 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4414 pd
= IO_MEM_UNASSIGNED
;
4416 pd
= p
->phys_offset
;
4419 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4420 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4422 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4423 #ifdef TARGET_WORDS_BIGENDIAN
4424 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4425 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4427 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4428 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4431 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4432 (addr
& ~TARGET_PAGE_MASK
);
4437 /* warning: addr must be aligned */
4438 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4439 enum device_endian endian
)
4446 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4448 pd
= IO_MEM_UNASSIGNED
;
4450 pd
= p
->phys_offset
;
4453 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4454 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4456 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4457 #if defined(TARGET_WORDS_BIGENDIAN)
4458 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4462 if (endian
== DEVICE_BIG_ENDIAN
) {
4466 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4468 unsigned long addr1
;
4469 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4471 ptr
= qemu_get_ram_ptr(addr1
);
4473 case DEVICE_LITTLE_ENDIAN
:
4476 case DEVICE_BIG_ENDIAN
:
4483 if (!cpu_physical_memory_is_dirty(addr1
)) {
4484 /* invalidate code */
4485 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4487 cpu_physical_memory_set_dirty_flags(addr1
,
4488 (0xff & ~CODE_DIRTY_FLAG
));
4493 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4495 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4498 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4500 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4503 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4505 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4509 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4512 cpu_physical_memory_write(addr
, &v
, 1);
4515 /* warning: addr must be aligned */
4516 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4517 enum device_endian endian
)
4524 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4526 pd
= IO_MEM_UNASSIGNED
;
4528 pd
= p
->phys_offset
;
4531 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4532 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4534 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4535 #if defined(TARGET_WORDS_BIGENDIAN)
4536 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4540 if (endian
== DEVICE_BIG_ENDIAN
) {
4544 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4546 unsigned long addr1
;
4547 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4549 ptr
= qemu_get_ram_ptr(addr1
);
4551 case DEVICE_LITTLE_ENDIAN
:
4554 case DEVICE_BIG_ENDIAN
:
4561 if (!cpu_physical_memory_is_dirty(addr1
)) {
4562 /* invalidate code */
4563 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4565 cpu_physical_memory_set_dirty_flags(addr1
,
4566 (0xff & ~CODE_DIRTY_FLAG
));
4571 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4573 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4576 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4578 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4581 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4583 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4587 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4590 cpu_physical_memory_write(addr
, &val
, 8);
4593 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4595 val
= cpu_to_le64(val
);
4596 cpu_physical_memory_write(addr
, &val
, 8);
4599 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4601 val
= cpu_to_be64(val
);
4602 cpu_physical_memory_write(addr
, &val
, 8);
4605 /* virtual memory access for debug (includes writing to ROM) */
4606 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4607 uint8_t *buf
, int len
, int is_write
)
4610 target_phys_addr_t phys_addr
;
4614 page
= addr
& TARGET_PAGE_MASK
;
4615 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4616 /* if no physical page mapped, return an error */
4617 if (phys_addr
== -1)
4619 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4622 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4624 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4626 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4635 /* in deterministic execution mode, instructions doing device I/Os
4636 must be at the end of the TB */
4637 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4639 TranslationBlock
*tb
;
4641 target_ulong pc
, cs_base
;
4644 tb
= tb_find_pc((unsigned long)retaddr
);
4646 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4649 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4650 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4651 /* Calculate how many instructions had been executed before the fault
4653 n
= n
- env
->icount_decr
.u16
.low
;
4654 /* Generate a new TB ending on the I/O insn. */
4656 /* On MIPS and SH, delay slot instructions can only be restarted if
4657 they were already the first instruction in the TB. If this is not
4658 the first instruction in a TB then re-execute the preceding
4660 #if defined(TARGET_MIPS)
4661 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4662 env
->active_tc
.PC
-= 4;
4663 env
->icount_decr
.u16
.low
++;
4664 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4666 #elif defined(TARGET_SH4)
4667 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4670 env
->icount_decr
.u16
.low
++;
4671 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4674 /* This should never happen. */
4675 if (n
> CF_COUNT_MASK
)
4676 cpu_abort(env
, "TB too big during recompile");
4678 cflags
= n
| CF_LAST_IO
;
4680 cs_base
= tb
->cs_base
;
4682 tb_phys_invalidate(tb
, -1);
4683 /* FIXME: In theory this could raise an exception. In practice
4684 we have already translated the block once so it's probably ok. */
4685 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4686 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4687 the first in the TB) then we end up generating a whole new TB and
4688 repeating the fault, which is horribly inefficient.
4689 Better would be to execute just this insn uncached, or generate a
4691 cpu_resume_from_signal(env
, NULL
);
4694 #if !defined(CONFIG_USER_ONLY)
4696 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4698 int i
, target_code_size
, max_target_code_size
;
4699 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4700 TranslationBlock
*tb
;
4702 target_code_size
= 0;
4703 max_target_code_size
= 0;
4705 direct_jmp_count
= 0;
4706 direct_jmp2_count
= 0;
4707 for(i
= 0; i
< nb_tbs
; i
++) {
4709 target_code_size
+= tb
->size
;
4710 if (tb
->size
> max_target_code_size
)
4711 max_target_code_size
= tb
->size
;
4712 if (tb
->page_addr
[1] != -1)
4714 if (tb
->tb_next_offset
[0] != 0xffff) {
4716 if (tb
->tb_next_offset
[1] != 0xffff) {
4717 direct_jmp2_count
++;
4721 /* XXX: avoid using doubles ? */
4722 cpu_fprintf(f
, "Translation buffer state:\n");
4723 cpu_fprintf(f
, "gen code size %td/%ld\n",
4724 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4725 cpu_fprintf(f
, "TB count %d/%d\n",
4726 nb_tbs
, code_gen_max_blocks
);
4727 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4728 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4729 max_target_code_size
);
4730 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4731 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4732 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4733 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4735 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4736 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4738 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4740 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4741 cpu_fprintf(f
, "\nStatistics:\n");
4742 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4743 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4744 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4745 tcg_dump_info(f
, cpu_fprintf
);
4748 #define MMUSUFFIX _cmmu
4749 #define GETPC() NULL
4750 #define env cpu_single_env
4751 #define SOFTMMU_CODE_ACCESS
4754 #include "softmmu_template.h"
4757 #include "softmmu_template.h"
4760 #include "softmmu_template.h"
4763 #include "softmmu_template.h"