2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
121 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
122 static MemoryRegion io_mem_subpage_ram
;
127 /* current CPU in the current thread. It is only valid inside
129 DEFINE_TLS(CPUState
*,cpu_single_env
);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
135 typedef struct PageDesc
{
136 /* list of TBs intersecting this ram page */
137 TranslationBlock
*first_tb
;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count
;
141 uint8_t *code_bitmap
;
142 #if defined(CONFIG_USER_ONLY)
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_SIZE (1 << L2_BITS)
163 /* The bits remaining after N lower levels of page tables. */
164 #define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 /* Size of the L1 page table. Avoid silly small sizes. */
170 #if P_L1_BITS_REM < 4
171 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
173 #define P_L1_BITS P_L1_BITS_REM
176 #if V_L1_BITS_REM < 4
177 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
179 #define V_L1_BITS V_L1_BITS_REM
182 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
185 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
188 unsigned long qemu_real_host_page_size
;
189 unsigned long qemu_host_page_size
;
190 unsigned long qemu_host_page_mask
;
192 /* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194 static void *l1_map
[V_L1_SIZE
];
196 #if !defined(CONFIG_USER_ONLY)
197 typedef struct PhysPageDesc
{
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset
;
200 ram_addr_t region_offset
;
203 /* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205 static void *l1_phys_map
[P_L1_SIZE
];
207 static void io_mem_init(void);
208 static void memory_map_init(void);
210 /* io memory support */
211 MemoryRegion
*io_mem_region
[IO_MEM_NB_ENTRIES
];
212 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
213 static MemoryRegion io_mem_watch
;
218 static const char *logfilename
= "qemu.log";
220 static const char *logfilename
= "/tmp/qemu.log";
224 static int log_append
= 0;
227 #if !defined(CONFIG_USER_ONLY)
228 static int tlb_flush_count
;
230 static int tb_flush_count
;
231 static int tb_phys_invalidate_count
;
234 static void map_exec(void *addr
, long size
)
237 VirtualProtect(addr
, size
,
238 PAGE_EXECUTE_READWRITE
, &old_protect
);
242 static void map_exec(void *addr
, long size
)
244 unsigned long start
, end
, page_size
;
246 page_size
= getpagesize();
247 start
= (unsigned long)addr
;
248 start
&= ~(page_size
- 1);
250 end
= (unsigned long)addr
+ size
;
251 end
+= page_size
- 1;
252 end
&= ~(page_size
- 1);
254 mprotect((void *)start
, end
- start
,
255 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
259 static void page_init(void)
261 /* NOTE: we can always suppose that qemu_host_page_size >=
265 SYSTEM_INFO system_info
;
267 GetSystemInfo(&system_info
);
268 qemu_real_host_page_size
= system_info
.dwPageSize
;
271 qemu_real_host_page_size
= getpagesize();
273 if (qemu_host_page_size
== 0)
274 qemu_host_page_size
= qemu_real_host_page_size
;
275 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
276 qemu_host_page_size
= TARGET_PAGE_SIZE
;
277 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
279 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 #ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry
*freep
;
285 freep
= kinfo_getvmmap(getpid(), &cnt
);
288 for (i
= 0; i
< cnt
; i
++) {
289 unsigned long startaddr
, endaddr
;
291 startaddr
= freep
[i
].kve_start
;
292 endaddr
= freep
[i
].kve_end
;
293 if (h2g_valid(startaddr
)) {
294 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
296 if (h2g_valid(endaddr
)) {
297 endaddr
= h2g(endaddr
);
298 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
300 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
313 last_brk
= (unsigned long)sbrk(0);
315 f
= fopen("/compat/linux/proc/self/maps", "r");
320 unsigned long startaddr
, endaddr
;
323 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
325 if (n
== 2 && h2g_valid(startaddr
)) {
326 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
328 if (h2g_valid(endaddr
)) {
329 endaddr
= h2g(endaddr
);
333 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
345 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
351 #if defined(CONFIG_USER_ONLY)
352 /* We can't use g_malloc because it may recurse into a locked mutex. */
353 # define ALLOC(P, SIZE) \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
359 # define ALLOC(P, SIZE) \
360 do { P = g_malloc0(SIZE); } while (0)
363 /* Level 1. Always allocated. */
364 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
367 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
374 ALLOC(p
, sizeof(void *) * L2_SIZE
);
378 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
386 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
392 return pd
+ (index
& (L2_SIZE
- 1));
395 static inline PageDesc
*page_find(tb_page_addr_t index
)
397 return page_find_alloc(index
, 0);
400 #if !defined(CONFIG_USER_ONLY)
401 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
407 /* Level 1. Always allocated. */
408 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
411 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
417 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
419 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
425 int first_index
= index
& ~(L2_SIZE
- 1);
431 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
433 for (i
= 0; i
< L2_SIZE
; i
++) {
434 pd
[i
].phys_offset
= io_mem_unassigned
.ram_addr
;
435 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
439 return pd
+ (index
& (L2_SIZE
- 1));
442 static inline PhysPageDesc
phys_page_find(target_phys_addr_t index
)
444 PhysPageDesc
*p
= phys_page_find_alloc(index
, 0);
449 return (PhysPageDesc
) {
450 .phys_offset
= io_mem_unassigned
.ram_addr
,
451 .region_offset
= index
<< TARGET_PAGE_BITS
,
456 static void tlb_protect_code(ram_addr_t ram_addr
);
457 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
459 #define mmap_lock() do { } while(0)
460 #define mmap_unlock() do { } while(0)
463 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
465 #if defined(CONFIG_USER_ONLY)
466 /* Currently it is not recommended to allocate big chunks of data in
467 user mode. It will change when a dedicated libc will be used */
468 #define USE_STATIC_CODE_GEN_BUFFER
471 #ifdef USE_STATIC_CODE_GEN_BUFFER
472 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
473 __attribute__((aligned (CODE_GEN_ALIGN
)));
476 static void code_gen_alloc(unsigned long tb_size
)
478 #ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer
= static_code_gen_buffer
;
480 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
481 map_exec(code_gen_buffer
, code_gen_buffer_size
);
483 code_gen_buffer_size
= tb_size
;
484 if (code_gen_buffer_size
== 0) {
485 #if defined(CONFIG_USER_ONLY)
486 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
488 /* XXX: needs adjustments */
489 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
492 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
493 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496 #if defined(__linux__)
501 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
502 #if defined(__x86_64__)
504 /* Cannot map more than that */
505 if (code_gen_buffer_size
> (800 * 1024 * 1024))
506 code_gen_buffer_size
= (800 * 1024 * 1024);
507 #elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
510 start
= (void *) 0x60000000UL
;
511 if (code_gen_buffer_size
> (512 * 1024 * 1024))
512 code_gen_buffer_size
= (512 * 1024 * 1024);
513 #elif defined(__arm__)
514 /* Keep the buffer no bigger than 16MB to branch between blocks */
515 if (code_gen_buffer_size
> 16 * 1024 * 1024)
516 code_gen_buffer_size
= 16 * 1024 * 1024;
517 #elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
523 start
= (void *)0x90000000UL
;
525 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
526 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
528 if (code_gen_buffer
== MAP_FAILED
) {
529 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
533 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
539 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
540 #if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
544 addr
= (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size
> (800 * 1024 * 1024))
547 code_gen_buffer_size
= (800 * 1024 * 1024);
548 #elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
551 addr
= (void *) 0x60000000UL
;
552 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
553 code_gen_buffer_size
= (512 * 1024 * 1024);
556 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
557 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
559 if (code_gen_buffer
== MAP_FAILED
) {
560 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
565 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
566 map_exec(code_gen_buffer
, code_gen_buffer_size
);
568 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
569 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
570 code_gen_buffer_max_size
= code_gen_buffer_size
-
571 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
572 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
573 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
576 /* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
579 void tcg_exec_init(unsigned long tb_size
)
582 code_gen_alloc(tb_size
);
583 code_gen_ptr
= code_gen_buffer
;
585 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx
);
592 bool tcg_enabled(void)
594 return code_gen_buffer
!= NULL
;
597 void cpu_exec_init_all(void)
599 #if !defined(CONFIG_USER_ONLY)
605 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
607 static int cpu_common_post_load(void *opaque
, int version_id
)
609 CPUState
*env
= opaque
;
611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env
->interrupt_request
&= ~0x01;
619 static const VMStateDescription vmstate_cpu_common
= {
620 .name
= "cpu_common",
622 .minimum_version_id
= 1,
623 .minimum_version_id_old
= 1,
624 .post_load
= cpu_common_post_load
,
625 .fields
= (VMStateField
[]) {
626 VMSTATE_UINT32(halted
, CPUState
),
627 VMSTATE_UINT32(interrupt_request
, CPUState
),
628 VMSTATE_END_OF_LIST()
633 CPUState
*qemu_get_cpu(int cpu
)
635 CPUState
*env
= first_cpu
;
638 if (env
->cpu_index
== cpu
)
646 void cpu_exec_init(CPUState
*env
)
651 #if defined(CONFIG_USER_ONLY)
654 env
->next_cpu
= NULL
;
657 while (*penv
!= NULL
) {
658 penv
= &(*penv
)->next_cpu
;
661 env
->cpu_index
= cpu_index
;
663 QTAILQ_INIT(&env
->breakpoints
);
664 QTAILQ_INIT(&env
->watchpoints
);
665 #ifndef CONFIG_USER_ONLY
666 env
->thread_id
= qemu_get_thread_id();
669 #if defined(CONFIG_USER_ONLY)
672 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
673 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
674 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
675 cpu_save
, cpu_load
, env
);
679 /* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681 static TranslationBlock
*tb_alloc(target_ulong pc
)
683 TranslationBlock
*tb
;
685 if (nb_tbs
>= code_gen_max_blocks
||
686 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
694 void tb_free(TranslationBlock
*tb
)
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
700 code_gen_ptr
= tb
->tc_ptr
;
705 static inline void invalidate_page_bitmap(PageDesc
*p
)
707 if (p
->code_bitmap
) {
708 g_free(p
->code_bitmap
);
709 p
->code_bitmap
= NULL
;
711 p
->code_write_count
= 0;
714 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
716 static void page_flush_tb_1 (int level
, void **lp
)
725 for (i
= 0; i
< L2_SIZE
; ++i
) {
726 pd
[i
].first_tb
= NULL
;
727 invalidate_page_bitmap(pd
+ i
);
731 for (i
= 0; i
< L2_SIZE
; ++i
) {
732 page_flush_tb_1 (level
- 1, pp
+ i
);
737 static void page_flush_tb(void)
740 for (i
= 0; i
< V_L1_SIZE
; i
++) {
741 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
745 /* flush all the translation blocks */
746 /* XXX: tb_flush is currently not thread safe */
747 void tb_flush(CPUState
*env1
)
750 #if defined(DEBUG_FLUSH)
751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
754 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
756 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
757 cpu_abort(env1
, "Internal error: code buffer overflow\n");
761 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
762 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
765 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
768 code_gen_ptr
= code_gen_buffer
;
769 /* XXX: flush processor icache at this point if cache flush is
774 #ifdef DEBUG_TB_CHECK
776 static void tb_invalidate_check(target_ulong address
)
778 TranslationBlock
*tb
;
780 address
&= TARGET_PAGE_MASK
;
781 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
782 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
783 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
784 address
>= tb
->pc
+ tb
->size
)) {
785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
787 address
, (long)tb
->pc
, tb
->size
);
793 /* verify that all the pages have correct rights for code */
794 static void tb_page_check(void)
796 TranslationBlock
*tb
;
797 int i
, flags1
, flags2
;
799 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
800 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
801 flags1
= page_get_flags(tb
->pc
);
802 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
803 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
805 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
813 /* invalidate one TB */
814 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
817 TranslationBlock
*tb1
;
821 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
824 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
828 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
830 TranslationBlock
*tb1
;
836 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
838 *ptb
= tb1
->page_next
[n1
];
841 ptb
= &tb1
->page_next
[n1
];
845 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
847 TranslationBlock
*tb1
, **ptb
;
850 ptb
= &tb
->jmp_next
[n
];
853 /* find tb(n) in circular list */
857 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
858 if (n1
== n
&& tb1
== tb
)
861 ptb
= &tb1
->jmp_first
;
863 ptb
= &tb1
->jmp_next
[n1
];
866 /* now we can suppress tb(n) from the list */
867 *ptb
= tb
->jmp_next
[n
];
869 tb
->jmp_next
[n
] = NULL
;
873 /* reset the jump entry 'n' of a TB so that it is not chained to
875 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
877 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
880 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
885 tb_page_addr_t phys_pc
;
886 TranslationBlock
*tb1
, *tb2
;
888 /* remove the TB from the hash list */
889 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
890 h
= tb_phys_hash_func(phys_pc
);
891 tb_remove(&tb_phys_hash
[h
], tb
,
892 offsetof(TranslationBlock
, phys_hash_next
));
894 /* remove the TB from the page list */
895 if (tb
->page_addr
[0] != page_addr
) {
896 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
897 tb_page_remove(&p
->first_tb
, tb
);
898 invalidate_page_bitmap(p
);
900 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
901 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
902 tb_page_remove(&p
->first_tb
, tb
);
903 invalidate_page_bitmap(p
);
906 tb_invalidated_flag
= 1;
908 /* remove the TB from the hash list */
909 h
= tb_jmp_cache_hash_func(tb
->pc
);
910 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
911 if (env
->tb_jmp_cache
[h
] == tb
)
912 env
->tb_jmp_cache
[h
] = NULL
;
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb
, 0);
917 tb_jmp_remove(tb
, 1);
919 /* suppress any remaining jumps to this TB */
925 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
926 tb2
= tb1
->jmp_next
[n1
];
927 tb_reset_jump(tb1
, n1
);
928 tb1
->jmp_next
[n1
] = NULL
;
931 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
933 tb_phys_invalidate_count
++;
936 static inline void set_bits(uint8_t *tab
, int start
, int len
)
942 mask
= 0xff << (start
& 7);
943 if ((start
& ~7) == (end
& ~7)) {
945 mask
&= ~(0xff << (end
& 7));
950 start
= (start
+ 8) & ~7;
952 while (start
< end1
) {
957 mask
= ~(0xff << (end
& 7));
963 static void build_page_bitmap(PageDesc
*p
)
965 int n
, tb_start
, tb_end
;
966 TranslationBlock
*tb
;
968 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
973 tb
= (TranslationBlock
*)((long)tb
& ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
979 tb_end
= tb_start
+ tb
->size
;
980 if (tb_end
> TARGET_PAGE_SIZE
)
981 tb_end
= TARGET_PAGE_SIZE
;
984 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
986 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
987 tb
= tb
->page_next
[n
];
991 TranslationBlock
*tb_gen_code(CPUState
*env
,
992 target_ulong pc
, target_ulong cs_base
,
993 int flags
, int cflags
)
995 TranslationBlock
*tb
;
997 tb_page_addr_t phys_pc
, phys_page2
;
998 target_ulong virt_page2
;
1001 phys_pc
= get_page_addr_code(env
, pc
);
1004 /* flush must be done */
1006 /* cannot fail at this point */
1008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag
= 1;
1011 tc_ptr
= code_gen_ptr
;
1012 tb
->tc_ptr
= tc_ptr
;
1013 tb
->cs_base
= cs_base
;
1015 tb
->cflags
= cflags
;
1016 cpu_gen_code(env
, tb
, &code_gen_size
);
1017 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1019 /* check next page if needed */
1020 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1022 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1023 phys_page2
= get_page_addr_code(env
, virt_page2
);
1025 tb_link_page(tb
, phys_pc
, phys_page2
);
1029 /* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
1031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
1034 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1035 int is_cpu_write_access
)
1037 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1038 CPUState
*env
= cpu_single_env
;
1039 tb_page_addr_t tb_start
, tb_end
;
1042 #ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found
= is_cpu_write_access
;
1044 TranslationBlock
*current_tb
= NULL
;
1045 int current_tb_modified
= 0;
1046 target_ulong current_pc
= 0;
1047 target_ulong current_cs_base
= 0;
1048 int current_flags
= 0;
1049 #endif /* TARGET_HAS_PRECISE_SMC */
1051 p
= page_find(start
>> TARGET_PAGE_BITS
);
1054 if (!p
->code_bitmap
&&
1055 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1056 is_cpu_write_access
) {
1057 /* build code bitmap */
1058 build_page_bitmap(p
);
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1064 while (tb
!= NULL
) {
1066 tb
= (TranslationBlock
*)((long)tb
& ~3);
1067 tb_next
= tb
->page_next
[n
];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1073 tb_end
= tb_start
+ tb
->size
;
1075 tb_start
= tb
->page_addr
[1];
1076 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1078 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1079 #ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found
) {
1081 current_tb_not_found
= 0;
1083 if (env
->mem_io_pc
) {
1084 /* now we have a real cpu fault */
1085 current_tb
= tb_find_pc(env
->mem_io_pc
);
1088 if (current_tb
== tb
&&
1089 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
1096 current_tb_modified
= 1;
1097 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1098 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1101 #endif /* TARGET_HAS_PRECISE_SMC */
1102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1106 saved_tb
= env
->current_tb
;
1107 env
->current_tb
= NULL
;
1109 tb_phys_invalidate(tb
, -1);
1111 env
->current_tb
= saved_tb
;
1112 if (env
->interrupt_request
&& env
->current_tb
)
1113 cpu_interrupt(env
, env
->interrupt_request
);
1118 #if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1121 invalidate_page_bitmap(p
);
1122 if (is_cpu_write_access
) {
1123 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1127 #ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified
) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1132 env
->current_tb
= NULL
;
1133 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1134 cpu_resume_from_signal(env
, NULL
);
1139 /* len must be <= 8 and start must be a multiple of len */
1140 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env
->mem_io_vaddr
, len
,
1148 cpu_single_env
->eip
,
1149 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1152 p
= page_find(start
>> TARGET_PAGE_BITS
);
1155 if (p
->code_bitmap
) {
1156 offset
= start
& ~TARGET_PAGE_MASK
;
1157 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1158 if (b
& ((1 << len
) - 1))
1162 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1166 #if !defined(CONFIG_SOFTMMU)
1167 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1168 unsigned long pc
, void *puc
)
1170 TranslationBlock
*tb
;
1173 #ifdef TARGET_HAS_PRECISE_SMC
1174 TranslationBlock
*current_tb
= NULL
;
1175 CPUState
*env
= cpu_single_env
;
1176 int current_tb_modified
= 0;
1177 target_ulong current_pc
= 0;
1178 target_ulong current_cs_base
= 0;
1179 int current_flags
= 0;
1182 addr
&= TARGET_PAGE_MASK
;
1183 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1187 #ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb
&& pc
!= 0) {
1189 current_tb
= tb_find_pc(pc
);
1192 while (tb
!= NULL
) {
1194 tb
= (TranslationBlock
*)((long)tb
& ~3);
1195 #ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb
== tb
&&
1197 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
1204 current_tb_modified
= 1;
1205 cpu_restore_state(current_tb
, env
, pc
);
1206 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1209 #endif /* TARGET_HAS_PRECISE_SMC */
1210 tb_phys_invalidate(tb
, addr
);
1211 tb
= tb
->page_next
[n
];
1214 #ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified
) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1219 env
->current_tb
= NULL
;
1220 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1221 cpu_resume_from_signal(env
, puc
);
1227 /* add the tb in the target page and protect it if necessary */
1228 static inline void tb_alloc_page(TranslationBlock
*tb
,
1229 unsigned int n
, tb_page_addr_t page_addr
)
1232 #ifndef CONFIG_USER_ONLY
1233 bool page_already_protected
;
1236 tb
->page_addr
[n
] = page_addr
;
1237 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1238 tb
->page_next
[n
] = p
->first_tb
;
1239 #ifndef CONFIG_USER_ONLY
1240 page_already_protected
= p
->first_tb
!= NULL
;
1242 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1243 invalidate_page_bitmap(p
);
1245 #if defined(TARGET_HAS_SMC) || 1
1247 #if defined(CONFIG_USER_ONLY)
1248 if (p
->flags
& PAGE_WRITE
) {
1253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
1255 page_addr
&= qemu_host_page_mask
;
1257 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1258 addr
+= TARGET_PAGE_SIZE
) {
1260 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1264 p2
->flags
&= ~PAGE_WRITE
;
1266 mprotect(g2h(page_addr
), qemu_host_page_size
,
1267 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1268 #ifdef DEBUG_TB_INVALIDATE
1269 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
1277 if (!page_already_protected
) {
1278 tlb_protect_code(page_addr
);
1282 #endif /* TARGET_HAS_SMC */
1285 /* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
1287 void tb_link_page(TranslationBlock
*tb
,
1288 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1291 TranslationBlock
**ptb
;
1293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1296 /* add in the physical hash table */
1297 h
= tb_phys_hash_func(phys_pc
);
1298 ptb
= &tb_phys_hash
[h
];
1299 tb
->phys_hash_next
= *ptb
;
1302 /* add in the page list */
1303 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1304 if (phys_page2
!= -1)
1305 tb_alloc_page(tb
, 1, phys_page2
);
1307 tb
->page_addr
[1] = -1;
1309 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1310 tb
->jmp_next
[0] = NULL
;
1311 tb
->jmp_next
[1] = NULL
;
1313 /* init original jump addresses */
1314 if (tb
->tb_next_offset
[0] != 0xffff)
1315 tb_reset_jump(tb
, 0);
1316 if (tb
->tb_next_offset
[1] != 0xffff)
1317 tb_reset_jump(tb
, 1);
1319 #ifdef DEBUG_TB_CHECK
1325 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1329 int m_min
, m_max
, m
;
1331 TranslationBlock
*tb
;
1335 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1336 tc_ptr
>= (unsigned long)code_gen_ptr
)
1338 /* binary search (cf Knuth) */
1341 while (m_min
<= m_max
) {
1342 m
= (m_min
+ m_max
) >> 1;
1344 v
= (unsigned long)tb
->tc_ptr
;
1347 else if (tc_ptr
< v
) {
1356 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1358 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1360 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1363 tb1
= tb
->jmp_next
[n
];
1365 /* find head of list */
1368 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1371 tb1
= tb1
->jmp_next
[n1
];
1373 /* we are now sure now that tb jumps to tb1 */
1376 /* remove tb from the jmp_first list */
1377 ptb
= &tb_next
->jmp_first
;
1381 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1382 if (n1
== n
&& tb1
== tb
)
1384 ptb
= &tb1
->jmp_next
[n1
];
1386 *ptb
= tb
->jmp_next
[n
];
1387 tb
->jmp_next
[n
] = NULL
;
1389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb
, n
);
1392 /* suppress jumps in the tb on which we could have jumped */
1393 tb_reset_jump_recursive(tb_next
);
1397 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1399 tb_reset_jump_recursive2(tb
, 0);
1400 tb_reset_jump_recursive2(tb
, 1);
1403 #if defined(TARGET_HAS_ICE)
1404 #if defined(CONFIG_USER_ONLY)
1405 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1407 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1410 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1412 target_phys_addr_t addr
;
1414 ram_addr_t ram_addr
;
1417 addr
= cpu_get_phys_page_debug(env
, pc
);
1418 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1420 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1421 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1424 #endif /* TARGET_HAS_ICE */
1426 #if defined(CONFIG_USER_ONLY)
1427 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1432 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1433 int flags
, CPUWatchpoint
**watchpoint
)
1438 /* Add a watchpoint. */
1439 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1440 int flags
, CPUWatchpoint
**watchpoint
)
1442 target_ulong len_mask
= ~(len
- 1);
1445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1447 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1451 wp
= g_malloc(sizeof(*wp
));
1454 wp
->len_mask
= len_mask
;
1457 /* keep all GDB-injected watchpoints in front */
1459 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1461 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1463 tlb_flush_page(env
, addr
);
1470 /* Remove a specific watchpoint. */
1471 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1474 target_ulong len_mask
= ~(len
- 1);
1477 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1478 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1479 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1480 cpu_watchpoint_remove_by_ref(env
, wp
);
1487 /* Remove a specific watchpoint by reference. */
1488 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1490 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1492 tlb_flush_page(env
, watchpoint
->vaddr
);
1497 /* Remove all matching watchpoints. */
1498 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1500 CPUWatchpoint
*wp
, *next
;
1502 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1503 if (wp
->flags
& mask
)
1504 cpu_watchpoint_remove_by_ref(env
, wp
);
1509 /* Add a breakpoint. */
1510 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1511 CPUBreakpoint
**breakpoint
)
1513 #if defined(TARGET_HAS_ICE)
1516 bp
= g_malloc(sizeof(*bp
));
1521 /* keep all GDB-injected breakpoints in front */
1523 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1525 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1527 breakpoint_invalidate(env
, pc
);
1537 /* Remove a specific breakpoint. */
1538 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1540 #if defined(TARGET_HAS_ICE)
1543 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1544 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1545 cpu_breakpoint_remove_by_ref(env
, bp
);
1555 /* Remove a specific breakpoint by reference. */
1556 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1558 #if defined(TARGET_HAS_ICE)
1559 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1561 breakpoint_invalidate(env
, breakpoint
->pc
);
1567 /* Remove all matching breakpoints. */
1568 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1570 #if defined(TARGET_HAS_ICE)
1571 CPUBreakpoint
*bp
, *next
;
1573 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1574 if (bp
->flags
& mask
)
1575 cpu_breakpoint_remove_by_ref(env
, bp
);
1580 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582 void cpu_single_step(CPUState
*env
, int enabled
)
1584 #if defined(TARGET_HAS_ICE)
1585 if (env
->singlestep_enabled
!= enabled
) {
1586 env
->singlestep_enabled
= enabled
;
1588 kvm_update_guest_debug(env
, 0);
1590 /* must flush all the translated code to avoid inconsistencies */
1591 /* XXX: only flush what is necessary */
1598 /* enable or disable low levels log */
1599 void cpu_set_log(int log_flags
)
1601 loglevel
= log_flags
;
1602 if (loglevel
&& !logfile
) {
1603 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1605 perror(logfilename
);
1608 #if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1611 static char logfile_buf
[4096];
1612 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1614 #elif defined(_WIN32)
1615 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 setvbuf(logfile
, NULL
, _IONBF
, 0);
1618 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1622 if (!loglevel
&& logfile
) {
1628 void cpu_set_log_filename(const char *filename
)
1630 logfilename
= strdup(filename
);
1635 cpu_set_log(loglevel
);
1638 static void cpu_unlink_tb(CPUState
*env
)
1640 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1641 problem and hope the cpu will stop of its own accord. For userspace
1642 emulation this often isn't actually as bad as it sounds. Often
1643 signals are used primarily to interrupt blocking syscalls. */
1644 TranslationBlock
*tb
;
1645 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1647 spin_lock(&interrupt_lock
);
1648 tb
= env
->current_tb
;
1649 /* if the cpu is currently executing code, we must unlink it and
1650 all the potentially executing TB */
1652 env
->current_tb
= NULL
;
1653 tb_reset_jump_recursive(tb
);
1655 spin_unlock(&interrupt_lock
);
1658 #ifndef CONFIG_USER_ONLY
1659 /* mask must never be zero, except for A20 change call */
1660 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1664 old_mask
= env
->interrupt_request
;
1665 env
->interrupt_request
|= mask
;
1668 * If called from iothread context, wake the target cpu in
1671 if (!qemu_cpu_is_self(env
)) {
1677 env
->icount_decr
.u16
.high
= 0xffff;
1679 && (mask
& ~old_mask
) != 0) {
1680 cpu_abort(env
, "Raised interrupt while not in I/O function");
1687 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1689 #else /* CONFIG_USER_ONLY */
1691 void cpu_interrupt(CPUState
*env
, int mask
)
1693 env
->interrupt_request
|= mask
;
1696 #endif /* CONFIG_USER_ONLY */
1698 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1700 env
->interrupt_request
&= ~mask
;
1703 void cpu_exit(CPUState
*env
)
1705 env
->exit_request
= 1;
1709 const CPULogItem cpu_log_items
[] = {
1710 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1711 "show generated host assembly code for each compiled TB" },
1712 { CPU_LOG_TB_IN_ASM
, "in_asm",
1713 "show target assembly code for each compiled TB" },
1714 { CPU_LOG_TB_OP
, "op",
1715 "show micro ops for each compiled TB" },
1716 { CPU_LOG_TB_OP_OPT
, "op_opt",
1719 "before eflags optimization and "
1721 "after liveness analysis" },
1722 { CPU_LOG_INT
, "int",
1723 "show interrupts/exceptions in short format" },
1724 { CPU_LOG_EXEC
, "exec",
1725 "show trace before each executed TB (lots of logs)" },
1726 { CPU_LOG_TB_CPU
, "cpu",
1727 "show CPU state before block translation" },
1729 { CPU_LOG_PCALL
, "pcall",
1730 "show protected mode far calls/returns/exceptions" },
1731 { CPU_LOG_RESET
, "cpu_reset",
1732 "show CPU state before CPU resets" },
1735 { CPU_LOG_IOPORT
, "ioport",
1736 "show all i/o ports accesses" },
1741 static int cmp1(const char *s1
, int n
, const char *s2
)
1743 if (strlen(s2
) != n
)
1745 return memcmp(s1
, s2
, n
) == 0;
1748 /* takes a comma separated list of log masks. Return 0 if error. */
1749 int cpu_str_to_log_mask(const char *str
)
1751 const CPULogItem
*item
;
1758 p1
= strchr(p
, ',');
1761 if(cmp1(p
,p1
-p
,"all")) {
1762 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1766 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1767 if (cmp1(p
, p1
- p
, item
->name
))
1781 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1788 fprintf(stderr
, "qemu: fatal: ");
1789 vfprintf(stderr
, fmt
, ap
);
1790 fprintf(stderr
, "\n");
1792 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1794 cpu_dump_state(env
, stderr
, fprintf
, 0);
1796 if (qemu_log_enabled()) {
1797 qemu_log("qemu: fatal: ");
1798 qemu_log_vprintf(fmt
, ap2
);
1801 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1803 log_cpu_state(env
, 0);
1810 #if defined(CONFIG_USER_ONLY)
1812 struct sigaction act
;
1813 sigfillset(&act
.sa_mask
);
1814 act
.sa_handler
= SIG_DFL
;
1815 sigaction(SIGABRT
, &act
, NULL
);
1821 CPUState
*cpu_copy(CPUState
*env
)
1823 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1824 CPUState
*next_cpu
= new_env
->next_cpu
;
1825 int cpu_index
= new_env
->cpu_index
;
1826 #if defined(TARGET_HAS_ICE)
1831 memcpy(new_env
, env
, sizeof(CPUState
));
1833 /* Preserve chaining and index. */
1834 new_env
->next_cpu
= next_cpu
;
1835 new_env
->cpu_index
= cpu_index
;
1837 /* Clone all break/watchpoints.
1838 Note: Once we support ptrace with hw-debug register access, make sure
1839 BP_CPU break/watchpoints are handled correctly on clone. */
1840 QTAILQ_INIT(&env
->breakpoints
);
1841 QTAILQ_INIT(&env
->watchpoints
);
1842 #if defined(TARGET_HAS_ICE)
1843 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1844 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1846 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1847 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1855 #if !defined(CONFIG_USER_ONLY)
1857 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1861 /* Discard jump cache entries for any tb which might potentially
1862 overlap the flushed page. */
1863 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1864 memset (&env
->tb_jmp_cache
[i
], 0,
1865 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1867 i
= tb_jmp_cache_hash_page(addr
);
1868 memset (&env
->tb_jmp_cache
[i
], 0,
1869 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1872 static CPUTLBEntry s_cputlb_empty_entry
= {
1880 * If flush_global is true (the usual case), flush all tlb entries.
1881 * If flush_global is false, flush (at least) all tlb entries not
1884 * Since QEMU doesn't currently implement a global/not-global flag
1885 * for tlb entries, at the moment tlb_flush() will also flush all
1886 * tlb entries in the flush_global == false case. This is OK because
1887 * CPU architectures generally permit an implementation to drop
1888 * entries from the TLB at any time, so flushing more entries than
1889 * required is only an efficiency issue, not a correctness issue.
1891 void tlb_flush(CPUState
*env
, int flush_global
)
1895 #if defined(DEBUG_TLB)
1896 printf("tlb_flush:\n");
1898 /* must reset current TB so that interrupts cannot modify the
1899 links while we are modifying them */
1900 env
->current_tb
= NULL
;
1902 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1904 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1905 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1909 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1911 env
->tlb_flush_addr
= -1;
1912 env
->tlb_flush_mask
= 0;
1916 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1918 if (addr
== (tlb_entry
->addr_read
&
1919 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1920 addr
== (tlb_entry
->addr_write
&
1921 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1922 addr
== (tlb_entry
->addr_code
&
1923 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1924 *tlb_entry
= s_cputlb_empty_entry
;
1928 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1933 #if defined(DEBUG_TLB)
1934 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1936 /* Check if we need to flush due to large pages. */
1937 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1938 #if defined(DEBUG_TLB)
1939 printf("tlb_flush_page: forced full flush ("
1940 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1941 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1946 /* must reset current TB so that interrupts cannot modify the
1947 links while we are modifying them */
1948 env
->current_tb
= NULL
;
1950 addr
&= TARGET_PAGE_MASK
;
1951 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1952 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1953 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1955 tlb_flush_jmp_cache(env
, addr
);
1958 /* update the TLBs so that writes to code in the virtual page 'addr'
1960 static void tlb_protect_code(ram_addr_t ram_addr
)
1962 cpu_physical_memory_reset_dirty(ram_addr
,
1963 ram_addr
+ TARGET_PAGE_SIZE
,
1967 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1968 tested for self modifying code */
1969 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1972 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1975 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1976 unsigned long start
, unsigned long length
)
1979 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
1980 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1981 if ((addr
- start
) < length
) {
1982 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1987 /* Note: start and end must be within the same ram block. */
1988 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1992 unsigned long length
, start1
;
1995 start
&= TARGET_PAGE_MASK
;
1996 end
= TARGET_PAGE_ALIGN(end
);
1998 length
= end
- start
;
2001 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2003 /* we modify the TLB cache so that the dirty bit will be set again
2004 when accessing the range */
2005 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2006 /* Check that we don't span multiple blocks - this breaks the
2007 address comparisons below. */
2008 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2009 != (end
- 1) - start
) {
2013 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2015 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2016 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2017 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2023 int cpu_physical_memory_set_dirty_tracking(int enable
)
2026 in_migration
= enable
;
2030 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2032 ram_addr_t ram_addr
;
2035 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2036 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2037 + tlb_entry
->addend
);
2038 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2039 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2040 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2045 /* update the TLB according to the current state of the dirty bits */
2046 void cpu_tlb_update_dirty(CPUState
*env
)
2050 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2051 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2052 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2056 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2058 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2059 tlb_entry
->addr_write
= vaddr
;
2062 /* update the TLB corresponding to virtual page vaddr
2063 so that it is no longer dirty */
2064 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2069 vaddr
&= TARGET_PAGE_MASK
;
2070 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2071 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2072 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2075 /* Our TLB does not support large pages, so remember the area covered by
2076 large pages and trigger a full TLB flush if these are invalidated. */
2077 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2080 target_ulong mask
= ~(size
- 1);
2082 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2083 env
->tlb_flush_addr
= vaddr
& mask
;
2084 env
->tlb_flush_mask
= mask
;
2087 /* Extend the existing region to include the new page.
2088 This is a compromise between unnecessary flushes and the cost
2089 of maintaining a full variable size TLB. */
2090 mask
&= env
->tlb_flush_mask
;
2091 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2094 env
->tlb_flush_addr
&= mask
;
2095 env
->tlb_flush_mask
= mask
;
2098 static bool is_ram_rom(ram_addr_t pd
)
2100 pd
&= ~TARGET_PAGE_MASK
;
2101 return pd
== io_mem_ram
.ram_addr
|| pd
== io_mem_rom
.ram_addr
;
2104 static bool is_romd(ram_addr_t pd
)
2108 pd
&= ~TARGET_PAGE_MASK
;
2109 mr
= io_mem_region
[pd
];
2110 return mr
->rom_device
&& mr
->readable
;
2113 static bool is_ram_rom_romd(ram_addr_t pd
)
2115 return is_ram_rom(pd
) || is_romd(pd
);
2118 /* Add a new TLB entry. At most one entry for a given virtual address
2119 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2120 supplied size is only used by tlb_flush_page. */
2121 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2122 target_phys_addr_t paddr
, int prot
,
2123 int mmu_idx
, target_ulong size
)
2128 target_ulong address
;
2129 target_ulong code_address
;
2130 unsigned long addend
;
2133 target_phys_addr_t iotlb
;
2135 assert(size
>= TARGET_PAGE_SIZE
);
2136 if (size
!= TARGET_PAGE_SIZE
) {
2137 tlb_add_large_page(env
, vaddr
, size
);
2139 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2141 #if defined(DEBUG_TLB)
2142 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2143 " prot=%x idx=%d pd=0x%08lx\n",
2144 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2148 if (!is_ram_rom_romd(pd
)) {
2149 /* IO memory case (romd handled later) */
2150 address
|= TLB_MMIO
;
2152 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2153 if (is_ram_rom(pd
)) {
2155 iotlb
= pd
& TARGET_PAGE_MASK
;
2156 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
)
2157 iotlb
|= io_mem_notdirty
.ram_addr
;
2159 iotlb
|= io_mem_rom
.ram_addr
;
2161 /* IO handlers are currently passed a physical address.
2162 It would be nice to pass an offset from the base address
2163 of that region. This would avoid having to special case RAM,
2164 and avoid full address decoding in every device.
2165 We can't use the high bits of pd for this because
2166 IO_MEM_ROMD uses these as a ram address. */
2167 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2168 iotlb
+= p
.region_offset
;
2171 code_address
= address
;
2172 /* Make accesses to pages with watchpoints go via the
2173 watchpoint trap routines. */
2174 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2175 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2176 /* Avoid trapping reads of pages with a write breakpoint. */
2177 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2178 iotlb
= io_mem_watch
.ram_addr
+ paddr
;
2179 address
|= TLB_MMIO
;
2185 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2186 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2187 te
= &env
->tlb_table
[mmu_idx
][index
];
2188 te
->addend
= addend
- vaddr
;
2189 if (prot
& PAGE_READ
) {
2190 te
->addr_read
= address
;
2195 if (prot
& PAGE_EXEC
) {
2196 te
->addr_code
= code_address
;
2200 if (prot
& PAGE_WRITE
) {
2201 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_rom
.ram_addr
|| is_romd(pd
)) {
2202 /* Write access calls the I/O callback. */
2203 te
->addr_write
= address
| TLB_MMIO
;
2204 } else if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
&&
2205 !cpu_physical_memory_is_dirty(pd
)) {
2206 te
->addr_write
= address
| TLB_NOTDIRTY
;
2208 te
->addr_write
= address
;
2211 te
->addr_write
= -1;
2217 void tlb_flush(CPUState
*env
, int flush_global
)
2221 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2226 * Walks guest process memory "regions" one by one
2227 * and calls callback function 'fn' for each region.
2230 struct walk_memory_regions_data
2232 walk_memory_regions_fn fn
;
2234 unsigned long start
;
2238 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2239 abi_ulong end
, int new_prot
)
2241 if (data
->start
!= -1ul) {
2242 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2248 data
->start
= (new_prot
? end
: -1ul);
2249 data
->prot
= new_prot
;
2254 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2255 abi_ulong base
, int level
, void **lp
)
2261 return walk_memory_regions_end(data
, base
, 0);
2266 for (i
= 0; i
< L2_SIZE
; ++i
) {
2267 int prot
= pd
[i
].flags
;
2269 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2270 if (prot
!= data
->prot
) {
2271 rc
= walk_memory_regions_end(data
, pa
, prot
);
2279 for (i
= 0; i
< L2_SIZE
; ++i
) {
2280 pa
= base
| ((abi_ulong
)i
<<
2281 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2282 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2292 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2294 struct walk_memory_regions_data data
;
2302 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2303 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2304 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2310 return walk_memory_regions_end(&data
, 0, 0);
2313 static int dump_region(void *priv
, abi_ulong start
,
2314 abi_ulong end
, unsigned long prot
)
2316 FILE *f
= (FILE *)priv
;
2318 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2319 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2320 start
, end
, end
- start
,
2321 ((prot
& PAGE_READ
) ? 'r' : '-'),
2322 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2323 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2328 /* dump memory mappings */
2329 void page_dump(FILE *f
)
2331 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2332 "start", "end", "size", "prot");
2333 walk_memory_regions(f
, dump_region
);
2336 int page_get_flags(target_ulong address
)
2340 p
= page_find(address
>> TARGET_PAGE_BITS
);
2346 /* Modify the flags of a page and invalidate the code if necessary.
2347 The flag PAGE_WRITE_ORG is positioned automatically depending
2348 on PAGE_WRITE. The mmap_lock should already be held. */
2349 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2351 target_ulong addr
, len
;
2353 /* This function should never be called with addresses outside the
2354 guest address space. If this assert fires, it probably indicates
2355 a missing call to h2g_valid. */
2356 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2357 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2359 assert(start
< end
);
2361 start
= start
& TARGET_PAGE_MASK
;
2362 end
= TARGET_PAGE_ALIGN(end
);
2364 if (flags
& PAGE_WRITE
) {
2365 flags
|= PAGE_WRITE_ORG
;
2368 for (addr
= start
, len
= end
- start
;
2370 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2371 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2373 /* If the write protection bit is set, then we invalidate
2375 if (!(p
->flags
& PAGE_WRITE
) &&
2376 (flags
& PAGE_WRITE
) &&
2378 tb_invalidate_phys_page(addr
, 0, NULL
);
2384 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2390 /* This function should never be called with addresses outside the
2391 guest address space. If this assert fires, it probably indicates
2392 a missing call to h2g_valid. */
2393 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2394 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2400 if (start
+ len
- 1 < start
) {
2401 /* We've wrapped around. */
2405 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2406 start
= start
& TARGET_PAGE_MASK
;
2408 for (addr
= start
, len
= end
- start
;
2410 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2411 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2414 if( !(p
->flags
& PAGE_VALID
) )
2417 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2419 if (flags
& PAGE_WRITE
) {
2420 if (!(p
->flags
& PAGE_WRITE_ORG
))
2422 /* unprotect the page if it was put read-only because it
2423 contains translated code */
2424 if (!(p
->flags
& PAGE_WRITE
)) {
2425 if (!page_unprotect(addr
, 0, NULL
))
2434 /* called from signal handler: invalidate the code and unprotect the
2435 page. Return TRUE if the fault was successfully handled. */
2436 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2440 target_ulong host_start
, host_end
, addr
;
2442 /* Technically this isn't safe inside a signal handler. However we
2443 know this only ever happens in a synchronous SEGV handler, so in
2444 practice it seems to be ok. */
2447 p
= page_find(address
>> TARGET_PAGE_BITS
);
2453 /* if the page was really writable, then we change its
2454 protection back to writable */
2455 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2456 host_start
= address
& qemu_host_page_mask
;
2457 host_end
= host_start
+ qemu_host_page_size
;
2460 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2461 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2462 p
->flags
|= PAGE_WRITE
;
2465 /* and since the content will be modified, we must invalidate
2466 the corresponding translated code. */
2467 tb_invalidate_phys_page(addr
, pc
, puc
);
2468 #ifdef DEBUG_TB_CHECK
2469 tb_invalidate_check(addr
);
2472 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2482 static inline void tlb_set_dirty(CPUState
*env
,
2483 unsigned long addr
, target_ulong vaddr
)
2486 #endif /* defined(CONFIG_USER_ONLY) */
2488 #if !defined(CONFIG_USER_ONLY)
2490 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2491 typedef struct subpage_t
{
2493 target_phys_addr_t base
;
2494 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2495 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2498 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2499 ram_addr_t memory
, ram_addr_t region_offset
);
2500 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2501 ram_addr_t orig_memory
,
2502 ram_addr_t region_offset
);
2503 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2506 if (addr > start_addr) \
2509 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2510 if (start_addr2 > 0) \
2514 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2515 end_addr2 = TARGET_PAGE_SIZE - 1; \
2517 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2518 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2523 /* register physical memory.
2524 For RAM, 'size' must be a multiple of the target page size.
2525 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2526 io memory page. The address used when calling the IO function is
2527 the offset from the start of the region, plus region_offset. Both
2528 start_addr and region_offset are rounded down to a page boundary
2529 before calculating this offset. This should not be a problem unless
2530 the low bits of start_addr and region_offset differ. */
2531 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2534 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2535 ram_addr_t size
= section
->size
;
2536 ram_addr_t phys_offset
= section
->mr
->ram_addr
;
2537 ram_addr_t region_offset
= section
->offset_within_region
;
2538 target_phys_addr_t addr
, end_addr
;
2541 ram_addr_t orig_size
= size
;
2544 if (memory_region_is_ram(section
->mr
)) {
2545 phys_offset
+= region_offset
;
2550 phys_offset
|= io_mem_rom
.ram_addr
;
2555 if (phys_offset
== io_mem_unassigned
.ram_addr
) {
2556 region_offset
= start_addr
;
2558 region_offset
&= TARGET_PAGE_MASK
;
2559 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2560 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2564 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 0);
2565 if (p
&& p
->phys_offset
!= io_mem_unassigned
.ram_addr
) {
2566 ram_addr_t orig_memory
= p
->phys_offset
;
2567 target_phys_addr_t start_addr2
, end_addr2
;
2568 int need_subpage
= 0;
2569 MemoryRegion
*mr
= io_mem_region
[orig_memory
& ~TARGET_PAGE_MASK
];
2571 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2574 if (!(mr
->subpage
)) {
2575 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2576 &p
->phys_offset
, orig_memory
,
2579 subpage
= container_of(mr
, subpage_t
, iomem
);
2581 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2583 p
->region_offset
= 0;
2585 p
->phys_offset
= phys_offset
;
2586 p
->region_offset
= region_offset
;
2587 if (is_ram_rom_romd(phys_offset
))
2588 phys_offset
+= TARGET_PAGE_SIZE
;
2591 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2592 p
->phys_offset
= phys_offset
;
2593 p
->region_offset
= region_offset
;
2594 if (is_ram_rom_romd(phys_offset
)) {
2595 phys_offset
+= TARGET_PAGE_SIZE
;
2597 target_phys_addr_t start_addr2
, end_addr2
;
2598 int need_subpage
= 0;
2600 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2601 end_addr2
, need_subpage
);
2604 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2606 io_mem_unassigned
.ram_addr
,
2607 addr
& TARGET_PAGE_MASK
);
2608 subpage_register(subpage
, start_addr2
, end_addr2
,
2609 phys_offset
, region_offset
);
2610 p
->region_offset
= 0;
2614 region_offset
+= TARGET_PAGE_SIZE
;
2615 addr
+= TARGET_PAGE_SIZE
;
2616 } while (addr
!= end_addr
);
2618 /* since each CPU stores ram addresses in its TLB cache, we must
2619 reset the modified entries */
2621 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2626 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2629 kvm_coalesce_mmio_region(addr
, size
);
2632 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2635 kvm_uncoalesce_mmio_region(addr
, size
);
2638 void qemu_flush_coalesced_mmio_buffer(void)
2641 kvm_flush_coalesced_mmio_buffer();
2644 #if defined(__linux__) && !defined(TARGET_S390X)
2646 #include <sys/vfs.h>
2648 #define HUGETLBFS_MAGIC 0x958458f6
2650 static long gethugepagesize(const char *path
)
2656 ret
= statfs(path
, &fs
);
2657 } while (ret
!= 0 && errno
== EINTR
);
2664 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2665 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2670 static void *file_ram_alloc(RAMBlock
*block
,
2680 unsigned long hpagesize
;
2682 hpagesize
= gethugepagesize(path
);
2687 if (memory
< hpagesize
) {
2691 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2692 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2696 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2700 fd
= mkstemp(filename
);
2702 perror("unable to create backing store for hugepages");
2709 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2712 * ftruncate is not supported by hugetlbfs in older
2713 * hosts, so don't bother bailing out on errors.
2714 * If anything goes wrong with it under other filesystems,
2717 if (ftruncate(fd
, memory
))
2718 perror("ftruncate");
2721 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2722 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2723 * to sidestep this quirk.
2725 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2726 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2728 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2730 if (area
== MAP_FAILED
) {
2731 perror("file_ram_alloc: can't mmap RAM pages");
2740 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2742 RAMBlock
*block
, *next_block
;
2743 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2745 if (QLIST_EMPTY(&ram_list
.blocks
))
2748 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2749 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2751 end
= block
->offset
+ block
->length
;
2753 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2754 if (next_block
->offset
>= end
) {
2755 next
= MIN(next
, next_block
->offset
);
2758 if (next
- end
>= size
&& next
- end
< mingap
) {
2760 mingap
= next
- end
;
2764 if (offset
== RAM_ADDR_MAX
) {
2765 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2773 static ram_addr_t
last_ram_offset(void)
2776 ram_addr_t last
= 0;
2778 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2779 last
= MAX(last
, block
->offset
+ block
->length
);
2784 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2786 RAMBlock
*new_block
, *block
;
2789 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2790 if (block
->offset
== addr
) {
2796 assert(!new_block
->idstr
[0]);
2798 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2799 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2801 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2805 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2807 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2808 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2809 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2816 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2819 RAMBlock
*new_block
;
2821 size
= TARGET_PAGE_ALIGN(size
);
2822 new_block
= g_malloc0(sizeof(*new_block
));
2825 new_block
->offset
= find_ram_offset(size
);
2827 new_block
->host
= host
;
2828 new_block
->flags
|= RAM_PREALLOC_MASK
;
2831 #if defined (__linux__) && !defined(TARGET_S390X)
2832 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2833 if (!new_block
->host
) {
2834 new_block
->host
= qemu_vmalloc(size
);
2835 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2838 fprintf(stderr
, "-mem-path option unsupported\n");
2842 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2843 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2844 an system defined value, which is at least 256GB. Larger systems
2845 have larger values. We put the guest between the end of data
2846 segment (system break) and this value. We use 32GB as a base to
2847 have enough room for the system break to grow. */
2848 new_block
->host
= mmap((void*)0x800000000, size
,
2849 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2850 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2851 if (new_block
->host
== MAP_FAILED
) {
2852 fprintf(stderr
, "Allocating RAM failed\n");
2856 if (xen_enabled()) {
2857 xen_ram_alloc(new_block
->offset
, size
, mr
);
2859 new_block
->host
= qemu_vmalloc(size
);
2862 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2865 new_block
->length
= size
;
2867 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2869 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2870 last_ram_offset() >> TARGET_PAGE_BITS
);
2871 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2872 0xff, size
>> TARGET_PAGE_BITS
);
2875 kvm_setup_guest_memory(new_block
->host
, size
);
2877 return new_block
->offset
;
2880 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2882 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2885 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2889 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2890 if (addr
== block
->offset
) {
2891 QLIST_REMOVE(block
, next
);
2898 void qemu_ram_free(ram_addr_t addr
)
2902 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2903 if (addr
== block
->offset
) {
2904 QLIST_REMOVE(block
, next
);
2905 if (block
->flags
& RAM_PREALLOC_MASK
) {
2907 } else if (mem_path
) {
2908 #if defined (__linux__) && !defined(TARGET_S390X)
2910 munmap(block
->host
, block
->length
);
2913 qemu_vfree(block
->host
);
2919 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2920 munmap(block
->host
, block
->length
);
2922 if (xen_enabled()) {
2923 xen_invalidate_map_cache_entry(block
->host
);
2925 qemu_vfree(block
->host
);
2937 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2944 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2945 offset
= addr
- block
->offset
;
2946 if (offset
< block
->length
) {
2947 vaddr
= block
->host
+ offset
;
2948 if (block
->flags
& RAM_PREALLOC_MASK
) {
2952 munmap(vaddr
, length
);
2954 #if defined(__linux__) && !defined(TARGET_S390X)
2957 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2960 flags
|= MAP_PRIVATE
;
2962 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2963 flags
, block
->fd
, offset
);
2965 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2966 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2973 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2974 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2975 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2978 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2979 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2983 if (area
!= vaddr
) {
2984 fprintf(stderr
, "Could not remap addr: "
2985 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2989 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2995 #endif /* !_WIN32 */
2997 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2998 With the exception of the softmmu code in this file, this should
2999 only be used for local memory (e.g. video ram) that the device owns,
3000 and knows it isn't going to access beyond the end of the block.
3002 It should not be used for general purpose DMA.
3003 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3005 void *qemu_get_ram_ptr(ram_addr_t addr
)
3009 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3010 if (addr
- block
->offset
< block
->length
) {
3011 /* Move this entry to to start of the list. */
3012 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3013 QLIST_REMOVE(block
, next
);
3014 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3016 if (xen_enabled()) {
3017 /* We need to check if the requested address is in the RAM
3018 * because we don't want to map the entire memory in QEMU.
3019 * In that case just map until the end of the page.
3021 if (block
->offset
== 0) {
3022 return xen_map_cache(addr
, 0, 0);
3023 } else if (block
->host
== NULL
) {
3025 xen_map_cache(block
->offset
, block
->length
, 1);
3028 return block
->host
+ (addr
- block
->offset
);
3032 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3038 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3039 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3041 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3045 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3046 if (addr
- block
->offset
< block
->length
) {
3047 if (xen_enabled()) {
3048 /* We need to check if the requested address is in the RAM
3049 * because we don't want to map the entire memory in QEMU.
3050 * In that case just map until the end of the page.
3052 if (block
->offset
== 0) {
3053 return xen_map_cache(addr
, 0, 0);
3054 } else if (block
->host
== NULL
) {
3056 xen_map_cache(block
->offset
, block
->length
, 1);
3059 return block
->host
+ (addr
- block
->offset
);
3063 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3069 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3070 * but takes a size argument */
3071 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3076 if (xen_enabled()) {
3077 return xen_map_cache(addr
, *size
, 1);
3081 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3082 if (addr
- block
->offset
< block
->length
) {
3083 if (addr
- block
->offset
+ *size
> block
->length
)
3084 *size
= block
->length
- addr
+ block
->offset
;
3085 return block
->host
+ (addr
- block
->offset
);
3089 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3094 void qemu_put_ram_ptr(void *addr
)
3096 trace_qemu_put_ram_ptr(addr
);
3099 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3102 uint8_t *host
= ptr
;
3104 if (xen_enabled()) {
3105 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3109 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3110 /* This case append when the block is not mapped. */
3111 if (block
->host
== NULL
) {
3114 if (host
- block
->host
< block
->length
) {
3115 *ram_addr
= block
->offset
+ (host
- block
->host
);
3123 /* Some of the softmmu routines need to translate from a host pointer
3124 (typically a TLB entry) back to a ram offset. */
3125 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3127 ram_addr_t ram_addr
;
3129 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3130 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3136 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
3139 #ifdef DEBUG_UNASSIGNED
3140 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3142 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3143 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
3148 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
3149 uint64_t val
, unsigned size
)
3151 #ifdef DEBUG_UNASSIGNED
3152 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
3154 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3155 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
3159 static const MemoryRegionOps unassigned_mem_ops
= {
3160 .read
= unassigned_mem_read
,
3161 .write
= unassigned_mem_write
,
3162 .endianness
= DEVICE_NATIVE_ENDIAN
,
3165 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
3171 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
3172 uint64_t value
, unsigned size
)
3177 static const MemoryRegionOps error_mem_ops
= {
3178 .read
= error_mem_read
,
3179 .write
= error_mem_write
,
3180 .endianness
= DEVICE_NATIVE_ENDIAN
,
3183 static const MemoryRegionOps rom_mem_ops
= {
3184 .read
= error_mem_read
,
3185 .write
= unassigned_mem_write
,
3186 .endianness
= DEVICE_NATIVE_ENDIAN
,
3189 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
3190 uint64_t val
, unsigned size
)
3193 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3194 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3195 #if !defined(CONFIG_USER_ONLY)
3196 tb_invalidate_phys_page_fast(ram_addr
, size
);
3197 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3202 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3205 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3208 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3213 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3214 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3215 /* we remove the notdirty callback only if the code has been
3217 if (dirty_flags
== 0xff)
3218 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3221 static const MemoryRegionOps notdirty_mem_ops
= {
3222 .read
= error_mem_read
,
3223 .write
= notdirty_mem_write
,
3224 .endianness
= DEVICE_NATIVE_ENDIAN
,
3227 /* Generate a debug exception if a watchpoint has been hit. */
3228 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3230 CPUState
*env
= cpu_single_env
;
3231 target_ulong pc
, cs_base
;
3232 TranslationBlock
*tb
;
3237 if (env
->watchpoint_hit
) {
3238 /* We re-entered the check after replacing the TB. Now raise
3239 * the debug interrupt so that is will trigger after the
3240 * current instruction. */
3241 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3244 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3245 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3246 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3247 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3248 wp
->flags
|= BP_WATCHPOINT_HIT
;
3249 if (!env
->watchpoint_hit
) {
3250 env
->watchpoint_hit
= wp
;
3251 tb
= tb_find_pc(env
->mem_io_pc
);
3253 cpu_abort(env
, "check_watchpoint: could not find TB for "
3254 "pc=%p", (void *)env
->mem_io_pc
);
3256 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3257 tb_phys_invalidate(tb
, -1);
3258 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3259 env
->exception_index
= EXCP_DEBUG
;
3261 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3262 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3264 cpu_resume_from_signal(env
, NULL
);
3267 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3272 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3273 so these check for a hit then pass through to the normal out-of-line
3275 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3278 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3280 case 1: return ldub_phys(addr
);
3281 case 2: return lduw_phys(addr
);
3282 case 4: return ldl_phys(addr
);
3287 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3288 uint64_t val
, unsigned size
)
3290 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3292 case 1: stb_phys(addr
, val
);
3293 case 2: stw_phys(addr
, val
);
3294 case 4: stl_phys(addr
, val
);
3299 static const MemoryRegionOps watch_mem_ops
= {
3300 .read
= watch_mem_read
,
3301 .write
= watch_mem_write
,
3302 .endianness
= DEVICE_NATIVE_ENDIAN
,
3305 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3308 subpage_t
*mmio
= opaque
;
3309 unsigned int idx
= SUBPAGE_IDX(addr
);
3310 #if defined(DEBUG_SUBPAGE)
3311 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3312 mmio
, len
, addr
, idx
);
3315 addr
+= mmio
->region_offset
[idx
];
3316 idx
= mmio
->sub_io_index
[idx
];
3317 return io_mem_read(idx
, addr
, len
);
3320 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3321 uint64_t value
, unsigned len
)
3323 subpage_t
*mmio
= opaque
;
3324 unsigned int idx
= SUBPAGE_IDX(addr
);
3325 #if defined(DEBUG_SUBPAGE)
3326 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3327 " idx %d value %"PRIx64
"\n",
3328 __func__
, mmio
, len
, addr
, idx
, value
);
3331 addr
+= mmio
->region_offset
[idx
];
3332 idx
= mmio
->sub_io_index
[idx
];
3333 io_mem_write(idx
, addr
, value
, len
);
3336 static const MemoryRegionOps subpage_ops
= {
3337 .read
= subpage_read
,
3338 .write
= subpage_write
,
3339 .endianness
= DEVICE_NATIVE_ENDIAN
,
3342 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3345 ram_addr_t raddr
= addr
;
3346 void *ptr
= qemu_get_ram_ptr(raddr
);
3348 case 1: return ldub_p(ptr
);
3349 case 2: return lduw_p(ptr
);
3350 case 4: return ldl_p(ptr
);
3355 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3356 uint64_t value
, unsigned size
)
3358 ram_addr_t raddr
= addr
;
3359 void *ptr
= qemu_get_ram_ptr(raddr
);
3361 case 1: return stb_p(ptr
, value
);
3362 case 2: return stw_p(ptr
, value
);
3363 case 4: return stl_p(ptr
, value
);
3368 static const MemoryRegionOps subpage_ram_ops
= {
3369 .read
= subpage_ram_read
,
3370 .write
= subpage_ram_write
,
3371 .endianness
= DEVICE_NATIVE_ENDIAN
,
3374 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3375 ram_addr_t memory
, ram_addr_t region_offset
)
3379 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3381 idx
= SUBPAGE_IDX(start
);
3382 eidx
= SUBPAGE_IDX(end
);
3383 #if defined(DEBUG_SUBPAGE)
3384 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3385 mmio
, start
, end
, idx
, eidx
, memory
);
3387 if ((memory
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
3388 memory
= io_mem_subpage_ram
.ram_addr
;
3390 memory
&= IO_MEM_NB_ENTRIES
- 1;
3391 for (; idx
<= eidx
; idx
++) {
3392 mmio
->sub_io_index
[idx
] = memory
;
3393 mmio
->region_offset
[idx
] = region_offset
;
3399 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3400 ram_addr_t orig_memory
,
3401 ram_addr_t region_offset
)
3406 mmio
= g_malloc0(sizeof(subpage_t
));
3409 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3410 "subpage", TARGET_PAGE_SIZE
);
3411 mmio
->iomem
.subpage
= true;
3412 subpage_memory
= mmio
->iomem
.ram_addr
;
3413 #if defined(DEBUG_SUBPAGE)
3414 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3415 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3417 *phys
= subpage_memory
;
3418 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3423 static int get_free_io_mem_idx(void)
3427 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3428 if (!io_mem_used
[i
]) {
3432 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3436 /* mem_read and mem_write are arrays of functions containing the
3437 function to access byte (index 0), word (index 1) and dword (index
3438 2). Functions can be omitted with a NULL function pointer.
3439 If io_index is non zero, the corresponding io zone is
3440 modified. If it is zero, a new io zone is allocated. The return
3441 value can be used with cpu_register_physical_memory(). (-1) is
3442 returned if error. */
3443 static int cpu_register_io_memory_fixed(int io_index
, MemoryRegion
*mr
)
3445 if (io_index
<= 0) {
3446 io_index
= get_free_io_mem_idx();
3450 if (io_index
>= IO_MEM_NB_ENTRIES
)
3454 io_mem_region
[io_index
] = mr
;
3459 int cpu_register_io_memory(MemoryRegion
*mr
)
3461 return cpu_register_io_memory_fixed(0, mr
);
3464 void cpu_unregister_io_memory(int io_index
)
3466 io_mem_region
[io_index
] = NULL
;
3467 io_mem_used
[io_index
] = 0;
3470 static void io_mem_init(void)
3474 /* Must be first: */
3475 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3476 assert(io_mem_ram
.ram_addr
== 0);
3477 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3478 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3479 "unassigned", UINT64_MAX
);
3480 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3481 "notdirty", UINT64_MAX
);
3482 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3483 "subpage-ram", UINT64_MAX
);
3487 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3488 "watch", UINT64_MAX
);
3491 static void core_region_add(MemoryListener
*listener
,
3492 MemoryRegionSection
*section
)
3494 cpu_register_physical_memory_log(section
, section
->readonly
);
3497 static void core_region_del(MemoryListener
*listener
,
3498 MemoryRegionSection
*section
)
3500 cpu_register_physical_memory_log(section
, false);
3503 static void core_log_start(MemoryListener
*listener
,
3504 MemoryRegionSection
*section
)
3508 static void core_log_stop(MemoryListener
*listener
,
3509 MemoryRegionSection
*section
)
3513 static void core_log_sync(MemoryListener
*listener
,
3514 MemoryRegionSection
*section
)
3518 static void core_log_global_start(MemoryListener
*listener
)
3520 cpu_physical_memory_set_dirty_tracking(1);
3523 static void core_log_global_stop(MemoryListener
*listener
)
3525 cpu_physical_memory_set_dirty_tracking(0);
3528 static void core_eventfd_add(MemoryListener
*listener
,
3529 MemoryRegionSection
*section
,
3530 bool match_data
, uint64_t data
, int fd
)
3534 static void core_eventfd_del(MemoryListener
*listener
,
3535 MemoryRegionSection
*section
,
3536 bool match_data
, uint64_t data
, int fd
)
3540 static void io_region_add(MemoryListener
*listener
,
3541 MemoryRegionSection
*section
)
3543 iorange_init(§ion
->mr
->iorange
, &memory_region_iorange_ops
,
3544 section
->offset_within_address_space
, section
->size
);
3545 ioport_register(§ion
->mr
->iorange
);
3548 static void io_region_del(MemoryListener
*listener
,
3549 MemoryRegionSection
*section
)
3551 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3554 static void io_log_start(MemoryListener
*listener
,
3555 MemoryRegionSection
*section
)
3559 static void io_log_stop(MemoryListener
*listener
,
3560 MemoryRegionSection
*section
)
3564 static void io_log_sync(MemoryListener
*listener
,
3565 MemoryRegionSection
*section
)
3569 static void io_log_global_start(MemoryListener
*listener
)
3573 static void io_log_global_stop(MemoryListener
*listener
)
3577 static void io_eventfd_add(MemoryListener
*listener
,
3578 MemoryRegionSection
*section
,
3579 bool match_data
, uint64_t data
, int fd
)
3583 static void io_eventfd_del(MemoryListener
*listener
,
3584 MemoryRegionSection
*section
,
3585 bool match_data
, uint64_t data
, int fd
)
3589 static MemoryListener core_memory_listener
= {
3590 .region_add
= core_region_add
,
3591 .region_del
= core_region_del
,
3592 .log_start
= core_log_start
,
3593 .log_stop
= core_log_stop
,
3594 .log_sync
= core_log_sync
,
3595 .log_global_start
= core_log_global_start
,
3596 .log_global_stop
= core_log_global_stop
,
3597 .eventfd_add
= core_eventfd_add
,
3598 .eventfd_del
= core_eventfd_del
,
3602 static MemoryListener io_memory_listener
= {
3603 .region_add
= io_region_add
,
3604 .region_del
= io_region_del
,
3605 .log_start
= io_log_start
,
3606 .log_stop
= io_log_stop
,
3607 .log_sync
= io_log_sync
,
3608 .log_global_start
= io_log_global_start
,
3609 .log_global_stop
= io_log_global_stop
,
3610 .eventfd_add
= io_eventfd_add
,
3611 .eventfd_del
= io_eventfd_del
,
3615 static void memory_map_init(void)
3617 system_memory
= g_malloc(sizeof(*system_memory
));
3618 memory_region_init(system_memory
, "system", INT64_MAX
);
3619 set_system_memory_map(system_memory
);
3621 system_io
= g_malloc(sizeof(*system_io
));
3622 memory_region_init(system_io
, "io", 65536);
3623 set_system_io_map(system_io
);
3625 memory_listener_register(&core_memory_listener
, system_memory
);
3626 memory_listener_register(&io_memory_listener
, system_io
);
3629 MemoryRegion
*get_system_memory(void)
3631 return system_memory
;
3634 MemoryRegion
*get_system_io(void)
3639 #endif /* !defined(CONFIG_USER_ONLY) */
3641 /* physical memory access (slow version, mainly for debug) */
3642 #if defined(CONFIG_USER_ONLY)
3643 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3644 uint8_t *buf
, int len
, int is_write
)
3651 page
= addr
& TARGET_PAGE_MASK
;
3652 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3655 flags
= page_get_flags(page
);
3656 if (!(flags
& PAGE_VALID
))
3659 if (!(flags
& PAGE_WRITE
))
3661 /* XXX: this code should not depend on lock_user */
3662 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3665 unlock_user(p
, addr
, l
);
3667 if (!(flags
& PAGE_READ
))
3669 /* XXX: this code should not depend on lock_user */
3670 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3673 unlock_user(p
, addr
, 0);
3683 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3684 int len
, int is_write
)
3689 target_phys_addr_t page
;
3694 page
= addr
& TARGET_PAGE_MASK
;
3695 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3698 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3702 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3703 target_phys_addr_t addr1
;
3704 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3705 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3706 /* XXX: could force cpu_single_env to NULL to avoid
3708 if (l
>= 4 && ((addr1
& 3) == 0)) {
3709 /* 32 bit write access */
3711 io_mem_write(io_index
, addr1
, val
, 4);
3713 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3714 /* 16 bit write access */
3716 io_mem_write(io_index
, addr1
, val
, 2);
3719 /* 8 bit write access */
3721 io_mem_write(io_index
, addr1
, val
, 1);
3726 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3728 ptr
= qemu_get_ram_ptr(addr1
);
3729 memcpy(ptr
, buf
, l
);
3730 if (!cpu_physical_memory_is_dirty(addr1
)) {
3731 /* invalidate code */
3732 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3734 cpu_physical_memory_set_dirty_flags(
3735 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3737 qemu_put_ram_ptr(ptr
);
3740 if (!is_ram_rom_romd(pd
)) {
3741 target_phys_addr_t addr1
;
3743 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3744 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3745 if (l
>= 4 && ((addr1
& 3) == 0)) {
3746 /* 32 bit read access */
3747 val
= io_mem_read(io_index
, addr1
, 4);
3750 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3751 /* 16 bit read access */
3752 val
= io_mem_read(io_index
, addr1
, 2);
3756 /* 8 bit read access */
3757 val
= io_mem_read(io_index
, addr1
, 1);
3763 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3764 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3765 qemu_put_ram_ptr(ptr
);
3774 /* used for ROM loading : can write in RAM and ROM */
3775 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3776 const uint8_t *buf
, int len
)
3780 target_phys_addr_t page
;
3785 page
= addr
& TARGET_PAGE_MASK
;
3786 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3789 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3792 if (!is_ram_rom_romd(pd
)) {
3795 unsigned long addr1
;
3796 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3798 ptr
= qemu_get_ram_ptr(addr1
);
3799 memcpy(ptr
, buf
, l
);
3800 qemu_put_ram_ptr(ptr
);
3810 target_phys_addr_t addr
;
3811 target_phys_addr_t len
;
3814 static BounceBuffer bounce
;
3816 typedef struct MapClient
{
3818 void (*callback
)(void *opaque
);
3819 QLIST_ENTRY(MapClient
) link
;
3822 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3823 = QLIST_HEAD_INITIALIZER(map_client_list
);
3825 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3827 MapClient
*client
= g_malloc(sizeof(*client
));
3829 client
->opaque
= opaque
;
3830 client
->callback
= callback
;
3831 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3835 void cpu_unregister_map_client(void *_client
)
3837 MapClient
*client
= (MapClient
*)_client
;
3839 QLIST_REMOVE(client
, link
);
3843 static void cpu_notify_map_clients(void)
3847 while (!QLIST_EMPTY(&map_client_list
)) {
3848 client
= QLIST_FIRST(&map_client_list
);
3849 client
->callback(client
->opaque
);
3850 cpu_unregister_map_client(client
);
3854 /* Map a physical memory region into a host virtual address.
3855 * May map a subset of the requested range, given by and returned in *plen.
3856 * May return NULL if resources needed to perform the mapping are exhausted.
3857 * Use only for reads OR writes - not for read-modify-write operations.
3858 * Use cpu_register_map_client() to know when retrying the map operation is
3859 * likely to succeed.
3861 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3862 target_phys_addr_t
*plen
,
3865 target_phys_addr_t len
= *plen
;
3866 target_phys_addr_t todo
= 0;
3868 target_phys_addr_t page
;
3871 ram_addr_t raddr
= RAM_ADDR_MAX
;
3876 page
= addr
& TARGET_PAGE_MASK
;
3877 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3880 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3883 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3884 if (todo
|| bounce
.buffer
) {
3887 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3891 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3895 return bounce
.buffer
;
3898 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3906 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3911 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3912 * Will also mark the memory as dirty if is_write == 1. access_len gives
3913 * the amount of memory that was actually read or written by the caller.
3915 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3916 int is_write
, target_phys_addr_t access_len
)
3918 if (buffer
!= bounce
.buffer
) {
3920 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3921 while (access_len
) {
3923 l
= TARGET_PAGE_SIZE
;
3926 if (!cpu_physical_memory_is_dirty(addr1
)) {
3927 /* invalidate code */
3928 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3930 cpu_physical_memory_set_dirty_flags(
3931 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3937 if (xen_enabled()) {
3938 xen_invalidate_map_cache_entry(buffer
);
3943 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3945 qemu_vfree(bounce
.buffer
);
3946 bounce
.buffer
= NULL
;
3947 cpu_notify_map_clients();
3950 /* warning: addr must be aligned */
3951 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3952 enum device_endian endian
)
3960 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3963 if (!is_ram_rom_romd(pd
)) {
3965 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3966 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3967 val
= io_mem_read(io_index
, addr
, 4);
3968 #if defined(TARGET_WORDS_BIGENDIAN)
3969 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3973 if (endian
== DEVICE_BIG_ENDIAN
) {
3979 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3980 (addr
& ~TARGET_PAGE_MASK
);
3982 case DEVICE_LITTLE_ENDIAN
:
3983 val
= ldl_le_p(ptr
);
3985 case DEVICE_BIG_ENDIAN
:
3986 val
= ldl_be_p(ptr
);
3996 uint32_t ldl_phys(target_phys_addr_t addr
)
3998 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4001 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4003 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4006 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4008 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4011 /* warning: addr must be aligned */
4012 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4013 enum device_endian endian
)
4021 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4024 if (!is_ram_rom_romd(pd
)) {
4026 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4027 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4029 /* XXX This is broken when device endian != cpu endian.
4030 Fix and add "endian" variable check */
4031 #ifdef TARGET_WORDS_BIGENDIAN
4032 val
= io_mem_read(io_index
, addr
, 4) << 32;
4033 val
|= io_mem_read(io_index
, addr
+ 4, 4);
4035 val
= io_mem_read(io_index
, addr
, 4);
4036 val
|= io_mem_read(io_index
, addr
+ 4, 4) << 32;
4040 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4041 (addr
& ~TARGET_PAGE_MASK
);
4043 case DEVICE_LITTLE_ENDIAN
:
4044 val
= ldq_le_p(ptr
);
4046 case DEVICE_BIG_ENDIAN
:
4047 val
= ldq_be_p(ptr
);
4057 uint64_t ldq_phys(target_phys_addr_t addr
)
4059 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4062 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4064 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4067 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4069 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4073 uint32_t ldub_phys(target_phys_addr_t addr
)
4076 cpu_physical_memory_read(addr
, &val
, 1);
4080 /* warning: addr must be aligned */
4081 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4082 enum device_endian endian
)
4090 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4093 if (!is_ram_rom_romd(pd
)) {
4095 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4096 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4097 val
= io_mem_read(io_index
, addr
, 2);
4098 #if defined(TARGET_WORDS_BIGENDIAN)
4099 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4103 if (endian
== DEVICE_BIG_ENDIAN
) {
4109 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4110 (addr
& ~TARGET_PAGE_MASK
);
4112 case DEVICE_LITTLE_ENDIAN
:
4113 val
= lduw_le_p(ptr
);
4115 case DEVICE_BIG_ENDIAN
:
4116 val
= lduw_be_p(ptr
);
4126 uint32_t lduw_phys(target_phys_addr_t addr
)
4128 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4131 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4133 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4136 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4138 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4141 /* warning: addr must be aligned. The ram page is not masked as dirty
4142 and the code inside is not invalidated. It is useful if the dirty
4143 bits are used to track modified PTEs */
4144 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4151 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4154 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4155 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4156 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4157 io_mem_write(io_index
, addr
, val
, 4);
4159 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4160 ptr
= qemu_get_ram_ptr(addr1
);
4163 if (unlikely(in_migration
)) {
4164 if (!cpu_physical_memory_is_dirty(addr1
)) {
4165 /* invalidate code */
4166 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4168 cpu_physical_memory_set_dirty_flags(
4169 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4175 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4182 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4185 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4186 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4187 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4188 #ifdef TARGET_WORDS_BIGENDIAN
4189 io_mem_write(io_index
, addr
, val
>> 32, 4);
4190 io_mem_write(io_index
, addr
+ 4, (uint32_t)val
, 4);
4192 io_mem_write(io_index
, addr
, (uint32_t)val
, 4);
4193 io_mem_write(io_index
, addr
+ 4, val
>> 32, 4);
4196 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4197 (addr
& ~TARGET_PAGE_MASK
);
4202 /* warning: addr must be aligned */
4203 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4204 enum device_endian endian
)
4211 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4214 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4215 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4216 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4217 #if defined(TARGET_WORDS_BIGENDIAN)
4218 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4222 if (endian
== DEVICE_BIG_ENDIAN
) {
4226 io_mem_write(io_index
, addr
, val
, 4);
4228 unsigned long addr1
;
4229 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4231 ptr
= qemu_get_ram_ptr(addr1
);
4233 case DEVICE_LITTLE_ENDIAN
:
4236 case DEVICE_BIG_ENDIAN
:
4243 if (!cpu_physical_memory_is_dirty(addr1
)) {
4244 /* invalidate code */
4245 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4247 cpu_physical_memory_set_dirty_flags(addr1
,
4248 (0xff & ~CODE_DIRTY_FLAG
));
4253 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4255 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4258 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4260 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4263 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4265 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4269 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4272 cpu_physical_memory_write(addr
, &v
, 1);
4275 /* warning: addr must be aligned */
4276 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4277 enum device_endian endian
)
4284 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4287 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4288 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4289 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4290 #if defined(TARGET_WORDS_BIGENDIAN)
4291 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4295 if (endian
== DEVICE_BIG_ENDIAN
) {
4299 io_mem_write(io_index
, addr
, val
, 2);
4301 unsigned long addr1
;
4302 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4304 ptr
= qemu_get_ram_ptr(addr1
);
4306 case DEVICE_LITTLE_ENDIAN
:
4309 case DEVICE_BIG_ENDIAN
:
4316 if (!cpu_physical_memory_is_dirty(addr1
)) {
4317 /* invalidate code */
4318 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4320 cpu_physical_memory_set_dirty_flags(addr1
,
4321 (0xff & ~CODE_DIRTY_FLAG
));
4326 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4328 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4331 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4333 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4336 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4338 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4342 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4345 cpu_physical_memory_write(addr
, &val
, 8);
4348 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4350 val
= cpu_to_le64(val
);
4351 cpu_physical_memory_write(addr
, &val
, 8);
4354 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4356 val
= cpu_to_be64(val
);
4357 cpu_physical_memory_write(addr
, &val
, 8);
4360 /* virtual memory access for debug (includes writing to ROM) */
4361 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4362 uint8_t *buf
, int len
, int is_write
)
4365 target_phys_addr_t phys_addr
;
4369 page
= addr
& TARGET_PAGE_MASK
;
4370 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4371 /* if no physical page mapped, return an error */
4372 if (phys_addr
== -1)
4374 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4377 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4379 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4381 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4390 /* in deterministic execution mode, instructions doing device I/Os
4391 must be at the end of the TB */
4392 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4394 TranslationBlock
*tb
;
4396 target_ulong pc
, cs_base
;
4399 tb
= tb_find_pc((unsigned long)retaddr
);
4401 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4404 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4405 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4406 /* Calculate how many instructions had been executed before the fault
4408 n
= n
- env
->icount_decr
.u16
.low
;
4409 /* Generate a new TB ending on the I/O insn. */
4411 /* On MIPS and SH, delay slot instructions can only be restarted if
4412 they were already the first instruction in the TB. If this is not
4413 the first instruction in a TB then re-execute the preceding
4415 #if defined(TARGET_MIPS)
4416 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4417 env
->active_tc
.PC
-= 4;
4418 env
->icount_decr
.u16
.low
++;
4419 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4421 #elif defined(TARGET_SH4)
4422 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4425 env
->icount_decr
.u16
.low
++;
4426 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4429 /* This should never happen. */
4430 if (n
> CF_COUNT_MASK
)
4431 cpu_abort(env
, "TB too big during recompile");
4433 cflags
= n
| CF_LAST_IO
;
4435 cs_base
= tb
->cs_base
;
4437 tb_phys_invalidate(tb
, -1);
4438 /* FIXME: In theory this could raise an exception. In practice
4439 we have already translated the block once so it's probably ok. */
4440 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4441 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4442 the first in the TB) then we end up generating a whole new TB and
4443 repeating the fault, which is horribly inefficient.
4444 Better would be to execute just this insn uncached, or generate a
4446 cpu_resume_from_signal(env
, NULL
);
4449 #if !defined(CONFIG_USER_ONLY)
4451 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4453 int i
, target_code_size
, max_target_code_size
;
4454 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4455 TranslationBlock
*tb
;
4457 target_code_size
= 0;
4458 max_target_code_size
= 0;
4460 direct_jmp_count
= 0;
4461 direct_jmp2_count
= 0;
4462 for(i
= 0; i
< nb_tbs
; i
++) {
4464 target_code_size
+= tb
->size
;
4465 if (tb
->size
> max_target_code_size
)
4466 max_target_code_size
= tb
->size
;
4467 if (tb
->page_addr
[1] != -1)
4469 if (tb
->tb_next_offset
[0] != 0xffff) {
4471 if (tb
->tb_next_offset
[1] != 0xffff) {
4472 direct_jmp2_count
++;
4476 /* XXX: avoid using doubles ? */
4477 cpu_fprintf(f
, "Translation buffer state:\n");
4478 cpu_fprintf(f
, "gen code size %td/%ld\n",
4479 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4480 cpu_fprintf(f
, "TB count %d/%d\n",
4481 nb_tbs
, code_gen_max_blocks
);
4482 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4483 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4484 max_target_code_size
);
4485 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4486 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4487 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4488 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4490 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4491 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4493 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4495 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4496 cpu_fprintf(f
, "\nStatistics:\n");
4497 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4498 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4499 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4500 tcg_dump_info(f
, cpu_fprintf
);
4503 /* NOTE: this function can trigger an exception */
4504 /* NOTE2: the returned address is not exactly the physical address: it
4505 is the offset relative to phys_ram_base */
4506 tb_page_addr_t
get_page_addr_code(CPUState
*env1
, target_ulong addr
)
4508 int mmu_idx
, page_index
, pd
;
4511 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
4512 mmu_idx
= cpu_mmu_index(env1
);
4513 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
4514 (addr
& TARGET_PAGE_MASK
))) {
4517 pd
= env1
->tlb_table
[mmu_idx
][page_index
].addr_code
& ~TARGET_PAGE_MASK
;
4518 if (pd
!= io_mem_ram
.ram_addr
&& pd
!= io_mem_rom
.ram_addr
4520 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4521 cpu_unassigned_access(env1
, addr
, 0, 1, 0, 4);
4523 cpu_abort(env1
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
4526 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
4527 return qemu_ram_addr_from_host_nofail(p
);
4531 * A helper function for the _utterly broken_ virtio device model to find out if
4532 * it's running on a big endian machine. Don't do this at home kids!
4534 bool virtio_is_big_endian(void);
4535 bool virtio_is_big_endian(void)
4537 #if defined(TARGET_WORDS_BIGENDIAN)
4544 #define MMUSUFFIX _cmmu
4546 #define GETPC() NULL
4547 #define env cpu_single_env
4548 #define SOFTMMU_CODE_ACCESS
4551 #include "softmmu_template.h"
4554 #include "softmmu_template.h"
4557 #include "softmmu_template.h"
4560 #include "softmmu_template.h"