2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 uint8_t *code_gen_prologue
;
90 static uint8_t *code_gen_buffer
;
91 static size_t code_gen_buffer_size
;
92 /* threshold to flush the translated code buffer */
93 static size_t code_gen_buffer_max_size
;
94 static uint8_t *code_gen_ptr
;
96 #if !defined(CONFIG_USER_ONLY)
98 static int in_migration
;
100 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
102 static MemoryRegion
*system_memory
;
103 static MemoryRegion
*system_io
;
105 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
106 static MemoryRegion io_mem_subpage_ram
;
110 CPUArchState
*first_cpu
;
111 /* current CPU in the current thread. It is only valid inside
113 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
114 /* 0 = Do not count executed instructions.
115 1 = Precise instruction counting.
116 2 = Adaptive rate instruction counting. */
119 typedef struct PageDesc
{
120 /* list of TBs intersecting this ram page */
121 TranslationBlock
*first_tb
;
122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count
;
125 uint8_t *code_bitmap
;
126 #if defined(CONFIG_USER_ONLY)
131 /* In system mode we want L1_MAP to be based on ram offsets,
132 while in user mode we want it to be based on virtual addresses. */
133 #if !defined(CONFIG_USER_ONLY)
134 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
135 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
137 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
140 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
143 /* Size of the L2 (and L3, etc) page tables. */
145 #define L2_SIZE (1 << L2_BITS)
147 #define P_L2_LEVELS \
148 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
150 /* The bits remaining after N lower levels of page tables. */
151 #define V_L1_BITS_REM \
152 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
154 #if V_L1_BITS_REM < 4
155 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
157 #define V_L1_BITS V_L1_BITS_REM
160 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
162 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
164 uintptr_t qemu_real_host_page_size
;
165 uintptr_t qemu_host_page_size
;
166 uintptr_t qemu_host_page_mask
;
168 /* This is a multi-level map on the virtual address space.
169 The bottom level has pointers to PageDesc. */
170 static void *l1_map
[V_L1_SIZE
];
172 #if !defined(CONFIG_USER_ONLY)
173 typedef struct PhysPageEntry PhysPageEntry
;
175 static MemoryRegionSection
*phys_sections
;
176 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
177 static uint16_t phys_section_unassigned
;
178 static uint16_t phys_section_notdirty
;
179 static uint16_t phys_section_rom
;
180 static uint16_t phys_section_watch
;
182 struct PhysPageEntry
{
183 uint16_t is_leaf
: 1;
184 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
188 /* Simple allocator for PhysPageEntry nodes */
189 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
190 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
192 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
194 /* This is a multi-level map on the physical address space.
195 The bottom level has pointers to MemoryRegionSections. */
196 static PhysPageEntry phys_map
= { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
198 static void io_mem_init(void);
199 static void memory_map_init(void);
201 static MemoryRegion io_mem_watch
;
205 static int tb_flush_count
;
206 static int tb_phys_invalidate_count
;
209 static inline void map_exec(void *addr
, long size
)
212 VirtualProtect(addr
, size
,
213 PAGE_EXECUTE_READWRITE
, &old_protect
);
217 static inline void map_exec(void *addr
, long size
)
219 unsigned long start
, end
, page_size
;
221 page_size
= getpagesize();
222 start
= (unsigned long)addr
;
223 start
&= ~(page_size
- 1);
225 end
= (unsigned long)addr
+ size
;
226 end
+= page_size
- 1;
227 end
&= ~(page_size
- 1);
229 mprotect((void *)start
, end
- start
,
230 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
234 static void page_init(void)
236 /* NOTE: we can always suppose that qemu_host_page_size >=
240 SYSTEM_INFO system_info
;
242 GetSystemInfo(&system_info
);
243 qemu_real_host_page_size
= system_info
.dwPageSize
;
246 qemu_real_host_page_size
= getpagesize();
248 if (qemu_host_page_size
== 0)
249 qemu_host_page_size
= qemu_real_host_page_size
;
250 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
251 qemu_host_page_size
= TARGET_PAGE_SIZE
;
252 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
256 #ifdef HAVE_KINFO_GETVMMAP
257 struct kinfo_vmentry
*freep
;
260 freep
= kinfo_getvmmap(getpid(), &cnt
);
263 for (i
= 0; i
< cnt
; i
++) {
264 unsigned long startaddr
, endaddr
;
266 startaddr
= freep
[i
].kve_start
;
267 endaddr
= freep
[i
].kve_end
;
268 if (h2g_valid(startaddr
)) {
269 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
271 if (h2g_valid(endaddr
)) {
272 endaddr
= h2g(endaddr
);
273 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
275 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
277 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
288 last_brk
= (unsigned long)sbrk(0);
290 f
= fopen("/compat/linux/proc/self/maps", "r");
295 unsigned long startaddr
, endaddr
;
298 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
300 if (n
== 2 && h2g_valid(startaddr
)) {
301 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
303 if (h2g_valid(endaddr
)) {
304 endaddr
= h2g(endaddr
);
308 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
320 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
326 #if defined(CONFIG_USER_ONLY)
327 /* We can't use g_malloc because it may recurse into a locked mutex. */
328 # define ALLOC(P, SIZE) \
330 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
331 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
334 # define ALLOC(P, SIZE) \
335 do { P = g_malloc0(SIZE); } while (0)
338 /* Level 1. Always allocated. */
339 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
342 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
349 ALLOC(p
, sizeof(void *) * L2_SIZE
);
353 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
361 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
367 return pd
+ (index
& (L2_SIZE
- 1));
370 static inline PageDesc
*page_find(tb_page_addr_t index
)
372 return page_find_alloc(index
, 0);
375 #if !defined(CONFIG_USER_ONLY)
377 static void phys_map_node_reserve(unsigned nodes
)
379 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
380 typedef PhysPageEntry Node
[L2_SIZE
];
381 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
382 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
383 phys_map_nodes_nb
+ nodes
);
384 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
385 phys_map_nodes_nb_alloc
);
389 static uint16_t phys_map_node_alloc(void)
394 ret
= phys_map_nodes_nb
++;
395 assert(ret
!= PHYS_MAP_NODE_NIL
);
396 assert(ret
!= phys_map_nodes_nb_alloc
);
397 for (i
= 0; i
< L2_SIZE
; ++i
) {
398 phys_map_nodes
[ret
][i
].is_leaf
= 0;
399 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
404 static void phys_map_nodes_reset(void)
406 phys_map_nodes_nb
= 0;
410 static void phys_page_set_level(PhysPageEntry
*lp
, target_phys_addr_t
*index
,
411 target_phys_addr_t
*nb
, uint16_t leaf
,
416 target_phys_addr_t step
= (target_phys_addr_t
)1 << (level
* L2_BITS
);
418 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
419 lp
->ptr
= phys_map_node_alloc();
420 p
= phys_map_nodes
[lp
->ptr
];
422 for (i
= 0; i
< L2_SIZE
; i
++) {
424 p
[i
].ptr
= phys_section_unassigned
;
428 p
= phys_map_nodes
[lp
->ptr
];
430 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
432 while (*nb
&& lp
< &p
[L2_SIZE
]) {
433 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
439 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
445 static void phys_page_set(target_phys_addr_t index
, target_phys_addr_t nb
,
448 /* Wildly overreserve - it doesn't matter much. */
449 phys_map_node_reserve(3 * P_L2_LEVELS
);
451 phys_page_set_level(&phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
454 MemoryRegionSection
*phys_page_find(target_phys_addr_t index
)
456 PhysPageEntry lp
= phys_map
;
459 uint16_t s_index
= phys_section_unassigned
;
461 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
462 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
465 p
= phys_map_nodes
[lp
.ptr
];
466 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
471 return &phys_sections
[s_index
];
474 bool memory_region_is_unassigned(MemoryRegion
*mr
)
476 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
477 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
478 && mr
!= &io_mem_watch
;
481 #define mmap_lock() do { } while(0)
482 #define mmap_unlock() do { } while(0)
485 #if defined(CONFIG_USER_ONLY)
486 /* Currently it is not recommended to allocate big chunks of data in
487 user mode. It will change when a dedicated libc will be used. */
488 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
489 region in which the guest needs to run. Revisit this. */
490 #define USE_STATIC_CODE_GEN_BUFFER
493 /* ??? Should configure for this, not list operating systems here. */
494 #if (defined(__linux__) \
495 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
496 || defined(__DragonFly__) || defined(__OpenBSD__) \
497 || defined(__NetBSD__))
501 /* Minimum size of the code gen buffer. This number is randomly chosen,
502 but not so small that we can't have a fair number of TB's live. */
503 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
505 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
506 indicated, this is constrained by the range of direct branches on the
507 host cpu, as used by the TCG implementation of goto_tb. */
508 #if defined(__x86_64__)
509 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
510 #elif defined(__sparc__)
511 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
512 #elif defined(__arm__)
513 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
514 #elif defined(__s390x__)
515 /* We have a +- 4GB range on the branches; leave some slop. */
516 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
518 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
521 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
523 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
524 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
525 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
527 static inline size_t size_code_gen_buffer(size_t tb_size
)
529 /* Size the buffer. */
531 #ifdef USE_STATIC_CODE_GEN_BUFFER
532 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
534 /* ??? Needs adjustments. */
535 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
536 static buffer, we could size this on RESERVED_VA, on the text
537 segment size of the executable, or continue to use the default. */
538 tb_size
= (unsigned long)(ram_size
/ 4);
541 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
542 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
544 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
545 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
547 code_gen_buffer_size
= tb_size
;
551 #ifdef USE_STATIC_CODE_GEN_BUFFER
552 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
553 __attribute__((aligned(CODE_GEN_ALIGN
)));
555 static inline void *alloc_code_gen_buffer(void)
557 map_exec(static_code_gen_buffer
, code_gen_buffer_size
);
558 return static_code_gen_buffer
;
560 #elif defined(USE_MMAP)
561 static inline void *alloc_code_gen_buffer(void)
563 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
567 /* Constrain the position of the buffer based on the host cpu.
568 Note that these addresses are chosen in concert with the
569 addresses assigned in the relevant linker script file. */
570 # if defined(__PIE__) || defined(__PIC__)
571 /* Don't bother setting a preferred location if we're building
572 a position-independent executable. We're more likely to get
573 an address near the main executable if we let the kernel
574 choose the address. */
575 # elif defined(__x86_64__) && defined(MAP_32BIT)
576 /* Force the memory down into low memory with the executable.
577 Leave the choice of exact location with the kernel. */
579 /* Cannot expect to map more than 800MB in low memory. */
580 if (code_gen_buffer_size
> 800u * 1024 * 1024) {
581 code_gen_buffer_size
= 800u * 1024 * 1024;
583 # elif defined(__sparc__)
584 start
= 0x40000000ul
;
585 # elif defined(__s390x__)
586 start
= 0x90000000ul
;
589 buf
= mmap((void *)start
, code_gen_buffer_size
,
590 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
591 return buf
== MAP_FAILED
? NULL
: buf
;
594 static inline void *alloc_code_gen_buffer(void)
596 void *buf
= g_malloc(code_gen_buffer_size
);
598 map_exec(buf
, code_gen_buffer_size
);
602 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
604 static inline void code_gen_alloc(size_t tb_size
)
606 code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
607 code_gen_buffer
= alloc_code_gen_buffer();
608 if (code_gen_buffer
== NULL
) {
609 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
613 /* Steal room for the prologue at the end of the buffer. This ensures
614 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
615 from TB's to the prologue are going to be in range. It also means
616 that we don't need to mark (additional) portions of the data segment
618 code_gen_prologue
= code_gen_buffer
+ code_gen_buffer_size
- 1024;
619 code_gen_buffer_size
-= 1024;
621 code_gen_buffer_max_size
= code_gen_buffer_size
-
622 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
623 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
624 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
627 /* Must be called before using the QEMU cpus. 'tb_size' is the size
628 (in bytes) allocated to the translation buffer. Zero means default
630 void tcg_exec_init(unsigned long tb_size
)
633 code_gen_alloc(tb_size
);
634 code_gen_ptr
= code_gen_buffer
;
635 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
637 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
638 /* There's no guest base to take into account, so go ahead and
639 initialize the prologue now. */
640 tcg_prologue_init(&tcg_ctx
);
644 bool tcg_enabled(void)
646 return code_gen_buffer
!= NULL
;
649 void cpu_exec_init_all(void)
651 #if !defined(CONFIG_USER_ONLY)
657 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
659 static int cpu_common_post_load(void *opaque
, int version_id
)
661 CPUArchState
*env
= opaque
;
663 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
664 version_id is increased. */
665 env
->interrupt_request
&= ~0x01;
671 static const VMStateDescription vmstate_cpu_common
= {
672 .name
= "cpu_common",
674 .minimum_version_id
= 1,
675 .minimum_version_id_old
= 1,
676 .post_load
= cpu_common_post_load
,
677 .fields
= (VMStateField
[]) {
678 VMSTATE_UINT32(halted
, CPUArchState
),
679 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
680 VMSTATE_END_OF_LIST()
685 CPUArchState
*qemu_get_cpu(int cpu
)
687 CPUArchState
*env
= first_cpu
;
690 if (env
->cpu_index
== cpu
)
698 void cpu_exec_init(CPUArchState
*env
)
703 #if defined(CONFIG_USER_ONLY)
706 env
->next_cpu
= NULL
;
709 while (*penv
!= NULL
) {
710 penv
= &(*penv
)->next_cpu
;
713 env
->cpu_index
= cpu_index
;
715 QTAILQ_INIT(&env
->breakpoints
);
716 QTAILQ_INIT(&env
->watchpoints
);
717 #ifndef CONFIG_USER_ONLY
718 env
->thread_id
= qemu_get_thread_id();
721 #if defined(CONFIG_USER_ONLY)
724 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
725 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
726 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
727 cpu_save
, cpu_load
, env
);
731 /* Allocate a new translation block. Flush the translation buffer if
732 too many translation blocks or too much generated code. */
733 static TranslationBlock
*tb_alloc(target_ulong pc
)
735 TranslationBlock
*tb
;
737 if (nb_tbs
>= code_gen_max_blocks
||
738 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
746 void tb_free(TranslationBlock
*tb
)
748 /* In practice this is mostly used for single use temporary TB
749 Ignore the hard cases and just back up if this TB happens to
750 be the last one generated. */
751 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
752 code_gen_ptr
= tb
->tc_ptr
;
757 static inline void invalidate_page_bitmap(PageDesc
*p
)
759 if (p
->code_bitmap
) {
760 g_free(p
->code_bitmap
);
761 p
->code_bitmap
= NULL
;
763 p
->code_write_count
= 0;
766 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
768 static void page_flush_tb_1 (int level
, void **lp
)
777 for (i
= 0; i
< L2_SIZE
; ++i
) {
778 pd
[i
].first_tb
= NULL
;
779 invalidate_page_bitmap(pd
+ i
);
783 for (i
= 0; i
< L2_SIZE
; ++i
) {
784 page_flush_tb_1 (level
- 1, pp
+ i
);
789 static void page_flush_tb(void)
792 for (i
= 0; i
< V_L1_SIZE
; i
++) {
793 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
797 /* flush all the translation blocks */
798 /* XXX: tb_flush is currently not thread safe */
799 void tb_flush(CPUArchState
*env1
)
802 #if defined(DEBUG_FLUSH)
803 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
804 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
806 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
808 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
809 cpu_abort(env1
, "Internal error: code buffer overflow\n");
813 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
814 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
817 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
820 code_gen_ptr
= code_gen_buffer
;
821 /* XXX: flush processor icache at this point if cache flush is
826 #ifdef DEBUG_TB_CHECK
828 static void tb_invalidate_check(target_ulong address
)
830 TranslationBlock
*tb
;
832 address
&= TARGET_PAGE_MASK
;
833 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
834 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
835 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
836 address
>= tb
->pc
+ tb
->size
)) {
837 printf("ERROR invalidate: address=" TARGET_FMT_lx
838 " PC=%08lx size=%04x\n",
839 address
, (long)tb
->pc
, tb
->size
);
845 /* verify that all the pages have correct rights for code */
846 static void tb_page_check(void)
848 TranslationBlock
*tb
;
849 int i
, flags1
, flags2
;
851 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
852 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
853 flags1
= page_get_flags(tb
->pc
);
854 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
855 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
856 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
857 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
865 /* invalidate one TB */
866 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
869 TranslationBlock
*tb1
;
873 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
876 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
880 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
882 TranslationBlock
*tb1
;
887 n1
= (uintptr_t)tb1
& 3;
888 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
890 *ptb
= tb1
->page_next
[n1
];
893 ptb
= &tb1
->page_next
[n1
];
897 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
899 TranslationBlock
*tb1
, **ptb
;
902 ptb
= &tb
->jmp_next
[n
];
905 /* find tb(n) in circular list */
908 n1
= (uintptr_t)tb1
& 3;
909 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
910 if (n1
== n
&& tb1
== tb
)
913 ptb
= &tb1
->jmp_first
;
915 ptb
= &tb1
->jmp_next
[n1
];
918 /* now we can suppress tb(n) from the list */
919 *ptb
= tb
->jmp_next
[n
];
921 tb
->jmp_next
[n
] = NULL
;
925 /* reset the jump entry 'n' of a TB so that it is not chained to
927 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
929 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
932 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
937 tb_page_addr_t phys_pc
;
938 TranslationBlock
*tb1
, *tb2
;
940 /* remove the TB from the hash list */
941 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
942 h
= tb_phys_hash_func(phys_pc
);
943 tb_remove(&tb_phys_hash
[h
], tb
,
944 offsetof(TranslationBlock
, phys_hash_next
));
946 /* remove the TB from the page list */
947 if (tb
->page_addr
[0] != page_addr
) {
948 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
949 tb_page_remove(&p
->first_tb
, tb
);
950 invalidate_page_bitmap(p
);
952 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
953 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
954 tb_page_remove(&p
->first_tb
, tb
);
955 invalidate_page_bitmap(p
);
958 tb_invalidated_flag
= 1;
960 /* remove the TB from the hash list */
961 h
= tb_jmp_cache_hash_func(tb
->pc
);
962 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
963 if (env
->tb_jmp_cache
[h
] == tb
)
964 env
->tb_jmp_cache
[h
] = NULL
;
967 /* suppress this TB from the two jump lists */
968 tb_jmp_remove(tb
, 0);
969 tb_jmp_remove(tb
, 1);
971 /* suppress any remaining jumps to this TB */
974 n1
= (uintptr_t)tb1
& 3;
977 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
978 tb2
= tb1
->jmp_next
[n1
];
979 tb_reset_jump(tb1
, n1
);
980 tb1
->jmp_next
[n1
] = NULL
;
983 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
985 tb_phys_invalidate_count
++;
988 static inline void set_bits(uint8_t *tab
, int start
, int len
)
994 mask
= 0xff << (start
& 7);
995 if ((start
& ~7) == (end
& ~7)) {
997 mask
&= ~(0xff << (end
& 7));
1002 start
= (start
+ 8) & ~7;
1004 while (start
< end1
) {
1009 mask
= ~(0xff << (end
& 7));
1015 static void build_page_bitmap(PageDesc
*p
)
1017 int n
, tb_start
, tb_end
;
1018 TranslationBlock
*tb
;
1020 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1023 while (tb
!= NULL
) {
1024 n
= (uintptr_t)tb
& 3;
1025 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1026 /* NOTE: this is subtle as a TB may span two physical pages */
1028 /* NOTE: tb_end may be after the end of the page, but
1029 it is not a problem */
1030 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1031 tb_end
= tb_start
+ tb
->size
;
1032 if (tb_end
> TARGET_PAGE_SIZE
)
1033 tb_end
= TARGET_PAGE_SIZE
;
1036 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1038 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1039 tb
= tb
->page_next
[n
];
1043 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1044 target_ulong pc
, target_ulong cs_base
,
1045 int flags
, int cflags
)
1047 TranslationBlock
*tb
;
1049 tb_page_addr_t phys_pc
, phys_page2
;
1050 target_ulong virt_page2
;
1053 phys_pc
= get_page_addr_code(env
, pc
);
1056 /* flush must be done */
1058 /* cannot fail at this point */
1060 /* Don't forget to invalidate previous TB info. */
1061 tb_invalidated_flag
= 1;
1063 tc_ptr
= code_gen_ptr
;
1064 tb
->tc_ptr
= tc_ptr
;
1065 tb
->cs_base
= cs_base
;
1067 tb
->cflags
= cflags
;
1068 cpu_gen_code(env
, tb
, &code_gen_size
);
1069 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1070 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1072 /* check next page if needed */
1073 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1075 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1076 phys_page2
= get_page_addr_code(env
, virt_page2
);
1078 tb_link_page(tb
, phys_pc
, phys_page2
);
1083 * Invalidate all TBs which intersect with the target physical address range
1084 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1085 * 'is_cpu_write_access' should be true if called from a real cpu write
1086 * access: the virtual CPU will exit the current TB if code is modified inside
1089 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1090 int is_cpu_write_access
)
1092 while (start
< end
) {
1093 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1094 start
&= TARGET_PAGE_MASK
;
1095 start
+= TARGET_PAGE_SIZE
;
1100 * Invalidate all TBs which intersect with the target physical address range
1101 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1102 * 'is_cpu_write_access' should be true if called from a real cpu write
1103 * access: the virtual CPU will exit the current TB if code is modified inside
1106 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1107 int is_cpu_write_access
)
1109 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1110 CPUArchState
*env
= cpu_single_env
;
1111 tb_page_addr_t tb_start
, tb_end
;
1114 #ifdef TARGET_HAS_PRECISE_SMC
1115 int current_tb_not_found
= is_cpu_write_access
;
1116 TranslationBlock
*current_tb
= NULL
;
1117 int current_tb_modified
= 0;
1118 target_ulong current_pc
= 0;
1119 target_ulong current_cs_base
= 0;
1120 int current_flags
= 0;
1121 #endif /* TARGET_HAS_PRECISE_SMC */
1123 p
= page_find(start
>> TARGET_PAGE_BITS
);
1126 if (!p
->code_bitmap
&&
1127 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1128 is_cpu_write_access
) {
1129 /* build code bitmap */
1130 build_page_bitmap(p
);
1133 /* we remove all the TBs in the range [start, end[ */
1134 /* XXX: see if in some cases it could be faster to invalidate all the code */
1136 while (tb
!= NULL
) {
1137 n
= (uintptr_t)tb
& 3;
1138 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1139 tb_next
= tb
->page_next
[n
];
1140 /* NOTE: this is subtle as a TB may span two physical pages */
1142 /* NOTE: tb_end may be after the end of the page, but
1143 it is not a problem */
1144 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1145 tb_end
= tb_start
+ tb
->size
;
1147 tb_start
= tb
->page_addr
[1];
1148 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1150 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1151 #ifdef TARGET_HAS_PRECISE_SMC
1152 if (current_tb_not_found
) {
1153 current_tb_not_found
= 0;
1155 if (env
->mem_io_pc
) {
1156 /* now we have a real cpu fault */
1157 current_tb
= tb_find_pc(env
->mem_io_pc
);
1160 if (current_tb
== tb
&&
1161 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1162 /* If we are modifying the current TB, we must stop
1163 its execution. We could be more precise by checking
1164 that the modification is after the current PC, but it
1165 would require a specialized function to partially
1166 restore the CPU state */
1168 current_tb_modified
= 1;
1169 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1170 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1173 #endif /* TARGET_HAS_PRECISE_SMC */
1174 /* we need to do that to handle the case where a signal
1175 occurs while doing tb_phys_invalidate() */
1178 saved_tb
= env
->current_tb
;
1179 env
->current_tb
= NULL
;
1181 tb_phys_invalidate(tb
, -1);
1183 env
->current_tb
= saved_tb
;
1184 if (env
->interrupt_request
&& env
->current_tb
)
1185 cpu_interrupt(env
, env
->interrupt_request
);
1190 #if !defined(CONFIG_USER_ONLY)
1191 /* if no code remaining, no need to continue to use slow writes */
1193 invalidate_page_bitmap(p
);
1194 if (is_cpu_write_access
) {
1195 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1199 #ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified
) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1204 env
->current_tb
= NULL
;
1205 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1206 cpu_resume_from_signal(env
, NULL
);
1211 /* len must be <= 8 and start must be a multiple of len */
1212 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1219 cpu_single_env
->mem_io_vaddr
, len
,
1220 cpu_single_env
->eip
,
1221 cpu_single_env
->eip
+
1222 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1225 p
= page_find(start
>> TARGET_PAGE_BITS
);
1228 if (p
->code_bitmap
) {
1229 offset
= start
& ~TARGET_PAGE_MASK
;
1230 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1231 if (b
& ((1 << len
) - 1))
1235 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1239 #if !defined(CONFIG_SOFTMMU)
1240 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1241 uintptr_t pc
, void *puc
)
1243 TranslationBlock
*tb
;
1246 #ifdef TARGET_HAS_PRECISE_SMC
1247 TranslationBlock
*current_tb
= NULL
;
1248 CPUArchState
*env
= cpu_single_env
;
1249 int current_tb_modified
= 0;
1250 target_ulong current_pc
= 0;
1251 target_ulong current_cs_base
= 0;
1252 int current_flags
= 0;
1255 addr
&= TARGET_PAGE_MASK
;
1256 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1260 #ifdef TARGET_HAS_PRECISE_SMC
1261 if (tb
&& pc
!= 0) {
1262 current_tb
= tb_find_pc(pc
);
1265 while (tb
!= NULL
) {
1266 n
= (uintptr_t)tb
& 3;
1267 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1268 #ifdef TARGET_HAS_PRECISE_SMC
1269 if (current_tb
== tb
&&
1270 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1271 /* If we are modifying the current TB, we must stop
1272 its execution. We could be more precise by checking
1273 that the modification is after the current PC, but it
1274 would require a specialized function to partially
1275 restore the CPU state */
1277 current_tb_modified
= 1;
1278 cpu_restore_state(current_tb
, env
, pc
);
1279 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1282 #endif /* TARGET_HAS_PRECISE_SMC */
1283 tb_phys_invalidate(tb
, addr
);
1284 tb
= tb
->page_next
[n
];
1287 #ifdef TARGET_HAS_PRECISE_SMC
1288 if (current_tb_modified
) {
1289 /* we generate a block containing just the instruction
1290 modifying the memory. It will ensure that it cannot modify
1292 env
->current_tb
= NULL
;
1293 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1294 cpu_resume_from_signal(env
, puc
);
1300 /* add the tb in the target page and protect it if necessary */
1301 static inline void tb_alloc_page(TranslationBlock
*tb
,
1302 unsigned int n
, tb_page_addr_t page_addr
)
1305 #ifndef CONFIG_USER_ONLY
1306 bool page_already_protected
;
1309 tb
->page_addr
[n
] = page_addr
;
1310 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1311 tb
->page_next
[n
] = p
->first_tb
;
1312 #ifndef CONFIG_USER_ONLY
1313 page_already_protected
= p
->first_tb
!= NULL
;
1315 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1316 invalidate_page_bitmap(p
);
1318 #if defined(TARGET_HAS_SMC) || 1
1320 #if defined(CONFIG_USER_ONLY)
1321 if (p
->flags
& PAGE_WRITE
) {
1326 /* force the host page as non writable (writes will have a
1327 page fault + mprotect overhead) */
1328 page_addr
&= qemu_host_page_mask
;
1330 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1331 addr
+= TARGET_PAGE_SIZE
) {
1333 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1337 p2
->flags
&= ~PAGE_WRITE
;
1339 mprotect(g2h(page_addr
), qemu_host_page_size
,
1340 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1341 #ifdef DEBUG_TB_INVALIDATE
1342 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1347 /* if some code is already present, then the pages are already
1348 protected. So we handle the case where only the first TB is
1349 allocated in a physical page */
1350 if (!page_already_protected
) {
1351 tlb_protect_code(page_addr
);
1355 #endif /* TARGET_HAS_SMC */
1358 /* add a new TB and link it to the physical page tables. phys_page2 is
1359 (-1) to indicate that only one page contains the TB. */
1360 void tb_link_page(TranslationBlock
*tb
,
1361 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1364 TranslationBlock
**ptb
;
1366 /* Grab the mmap lock to stop another thread invalidating this TB
1367 before we are done. */
1369 /* add in the physical hash table */
1370 h
= tb_phys_hash_func(phys_pc
);
1371 ptb
= &tb_phys_hash
[h
];
1372 tb
->phys_hash_next
= *ptb
;
1375 /* add in the page list */
1376 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1377 if (phys_page2
!= -1)
1378 tb_alloc_page(tb
, 1, phys_page2
);
1380 tb
->page_addr
[1] = -1;
1382 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1383 tb
->jmp_next
[0] = NULL
;
1384 tb
->jmp_next
[1] = NULL
;
1386 /* init original jump addresses */
1387 if (tb
->tb_next_offset
[0] != 0xffff)
1388 tb_reset_jump(tb
, 0);
1389 if (tb
->tb_next_offset
[1] != 0xffff)
1390 tb_reset_jump(tb
, 1);
1392 #ifdef DEBUG_TB_CHECK
1398 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1399 tb[1].tc_ptr. Return NULL if not found */
1400 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1402 int m_min
, m_max
, m
;
1404 TranslationBlock
*tb
;
1408 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1409 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1412 /* binary search (cf Knuth) */
1415 while (m_min
<= m_max
) {
1416 m
= (m_min
+ m_max
) >> 1;
1418 v
= (uintptr_t)tb
->tc_ptr
;
1421 else if (tc_ptr
< v
) {
1430 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1432 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1434 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1437 tb1
= tb
->jmp_next
[n
];
1439 /* find head of list */
1441 n1
= (uintptr_t)tb1
& 3;
1442 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1445 tb1
= tb1
->jmp_next
[n1
];
1447 /* we are now sure now that tb jumps to tb1 */
1450 /* remove tb from the jmp_first list */
1451 ptb
= &tb_next
->jmp_first
;
1454 n1
= (uintptr_t)tb1
& 3;
1455 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1456 if (n1
== n
&& tb1
== tb
)
1458 ptb
= &tb1
->jmp_next
[n1
];
1460 *ptb
= tb
->jmp_next
[n
];
1461 tb
->jmp_next
[n
] = NULL
;
1463 /* suppress the jump to next tb in generated code */
1464 tb_reset_jump(tb
, n
);
1466 /* suppress jumps in the tb on which we could have jumped */
1467 tb_reset_jump_recursive(tb_next
);
1471 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1473 tb_reset_jump_recursive2(tb
, 0);
1474 tb_reset_jump_recursive2(tb
, 1);
1477 #if defined(TARGET_HAS_ICE)
1478 #if defined(CONFIG_USER_ONLY)
1479 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1481 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1484 void tb_invalidate_phys_addr(target_phys_addr_t addr
)
1486 ram_addr_t ram_addr
;
1487 MemoryRegionSection
*section
;
1489 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1490 if (!(memory_region_is_ram(section
->mr
)
1491 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1494 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1495 + memory_region_section_addr(section
, addr
);
1496 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1499 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1501 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
1502 (pc
& ~TARGET_PAGE_MASK
));
1505 #endif /* TARGET_HAS_ICE */
1507 #if defined(CONFIG_USER_ONLY)
1508 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1513 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1514 int flags
, CPUWatchpoint
**watchpoint
)
1519 /* Add a watchpoint. */
1520 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1521 int flags
, CPUWatchpoint
**watchpoint
)
1523 target_ulong len_mask
= ~(len
- 1);
1526 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1527 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1528 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1529 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1530 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1533 wp
= g_malloc(sizeof(*wp
));
1536 wp
->len_mask
= len_mask
;
1539 /* keep all GDB-injected watchpoints in front */
1541 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1543 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1545 tlb_flush_page(env
, addr
);
1552 /* Remove a specific watchpoint. */
1553 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1556 target_ulong len_mask
= ~(len
- 1);
1559 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1560 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1561 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1562 cpu_watchpoint_remove_by_ref(env
, wp
);
1569 /* Remove a specific watchpoint by reference. */
1570 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1572 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1574 tlb_flush_page(env
, watchpoint
->vaddr
);
1579 /* Remove all matching watchpoints. */
1580 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1582 CPUWatchpoint
*wp
, *next
;
1584 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1585 if (wp
->flags
& mask
)
1586 cpu_watchpoint_remove_by_ref(env
, wp
);
1591 /* Add a breakpoint. */
1592 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1593 CPUBreakpoint
**breakpoint
)
1595 #if defined(TARGET_HAS_ICE)
1598 bp
= g_malloc(sizeof(*bp
));
1603 /* keep all GDB-injected breakpoints in front */
1605 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1607 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1609 breakpoint_invalidate(env
, pc
);
1619 /* Remove a specific breakpoint. */
1620 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1622 #if defined(TARGET_HAS_ICE)
1625 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1626 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1627 cpu_breakpoint_remove_by_ref(env
, bp
);
1637 /* Remove a specific breakpoint by reference. */
1638 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1640 #if defined(TARGET_HAS_ICE)
1641 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1643 breakpoint_invalidate(env
, breakpoint
->pc
);
1649 /* Remove all matching breakpoints. */
1650 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1652 #if defined(TARGET_HAS_ICE)
1653 CPUBreakpoint
*bp
, *next
;
1655 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1656 if (bp
->flags
& mask
)
1657 cpu_breakpoint_remove_by_ref(env
, bp
);
1662 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1663 CPU loop after each instruction */
1664 void cpu_single_step(CPUArchState
*env
, int enabled
)
1666 #if defined(TARGET_HAS_ICE)
1667 if (env
->singlestep_enabled
!= enabled
) {
1668 env
->singlestep_enabled
= enabled
;
1670 kvm_update_guest_debug(env
, 0);
1672 /* must flush all the translated code to avoid inconsistencies */
1673 /* XXX: only flush what is necessary */
1680 static void cpu_unlink_tb(CPUArchState
*env
)
1682 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1683 problem and hope the cpu will stop of its own accord. For userspace
1684 emulation this often isn't actually as bad as it sounds. Often
1685 signals are used primarily to interrupt blocking syscalls. */
1686 TranslationBlock
*tb
;
1687 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1689 spin_lock(&interrupt_lock
);
1690 tb
= env
->current_tb
;
1691 /* if the cpu is currently executing code, we must unlink it and
1692 all the potentially executing TB */
1694 env
->current_tb
= NULL
;
1695 tb_reset_jump_recursive(tb
);
1697 spin_unlock(&interrupt_lock
);
1700 #ifndef CONFIG_USER_ONLY
1701 /* mask must never be zero, except for A20 change call */
1702 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1706 old_mask
= env
->interrupt_request
;
1707 env
->interrupt_request
|= mask
;
1710 * If called from iothread context, wake the target cpu in
1713 if (!qemu_cpu_is_self(env
)) {
1719 env
->icount_decr
.u16
.high
= 0xffff;
1721 && (mask
& ~old_mask
) != 0) {
1722 cpu_abort(env
, "Raised interrupt while not in I/O function");
1729 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1731 #else /* CONFIG_USER_ONLY */
1733 void cpu_interrupt(CPUArchState
*env
, int mask
)
1735 env
->interrupt_request
|= mask
;
1738 #endif /* CONFIG_USER_ONLY */
1740 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1742 env
->interrupt_request
&= ~mask
;
1745 void cpu_exit(CPUArchState
*env
)
1747 env
->exit_request
= 1;
1751 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1758 fprintf(stderr
, "qemu: fatal: ");
1759 vfprintf(stderr
, fmt
, ap
);
1760 fprintf(stderr
, "\n");
1761 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1762 if (qemu_log_enabled()) {
1763 qemu_log("qemu: fatal: ");
1764 qemu_log_vprintf(fmt
, ap2
);
1766 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1772 #if defined(CONFIG_USER_ONLY)
1774 struct sigaction act
;
1775 sigfillset(&act
.sa_mask
);
1776 act
.sa_handler
= SIG_DFL
;
1777 sigaction(SIGABRT
, &act
, NULL
);
1783 CPUArchState
*cpu_copy(CPUArchState
*env
)
1785 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1786 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1787 int cpu_index
= new_env
->cpu_index
;
1788 #if defined(TARGET_HAS_ICE)
1793 memcpy(new_env
, env
, sizeof(CPUArchState
));
1795 /* Preserve chaining and index. */
1796 new_env
->next_cpu
= next_cpu
;
1797 new_env
->cpu_index
= cpu_index
;
1799 /* Clone all break/watchpoints.
1800 Note: Once we support ptrace with hw-debug register access, make sure
1801 BP_CPU break/watchpoints are handled correctly on clone. */
1802 QTAILQ_INIT(&env
->breakpoints
);
1803 QTAILQ_INIT(&env
->watchpoints
);
1804 #if defined(TARGET_HAS_ICE)
1805 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1806 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1808 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1809 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1817 #if !defined(CONFIG_USER_ONLY)
1818 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1822 /* Discard jump cache entries for any tb which might potentially
1823 overlap the flushed page. */
1824 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1825 memset (&env
->tb_jmp_cache
[i
], 0,
1826 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1828 i
= tb_jmp_cache_hash_page(addr
);
1829 memset (&env
->tb_jmp_cache
[i
], 0,
1830 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1833 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
1838 /* we modify the TLB cache so that the dirty bit will be set again
1839 when accessing the range */
1840 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1841 /* Check that we don't span multiple blocks - this breaks the
1842 address comparisons below. */
1843 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1844 != (end
- 1) - start
) {
1847 cpu_tlb_reset_dirty_all(start1
, length
);
1851 /* Note: start and end must be within the same ram block. */
1852 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1857 start
&= TARGET_PAGE_MASK
;
1858 end
= TARGET_PAGE_ALIGN(end
);
1860 length
= end
- start
;
1863 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1865 if (tcg_enabled()) {
1866 tlb_reset_dirty_range_all(start
, end
, length
);
1870 int cpu_physical_memory_set_dirty_tracking(int enable
)
1873 in_migration
= enable
;
1877 target_phys_addr_t
memory_region_section_get_iotlb(CPUArchState
*env
,
1878 MemoryRegionSection
*section
,
1880 target_phys_addr_t paddr
,
1882 target_ulong
*address
)
1884 target_phys_addr_t iotlb
;
1887 if (memory_region_is_ram(section
->mr
)) {
1889 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1890 + memory_region_section_addr(section
, paddr
);
1891 if (!section
->readonly
) {
1892 iotlb
|= phys_section_notdirty
;
1894 iotlb
|= phys_section_rom
;
1897 /* IO handlers are currently passed a physical address.
1898 It would be nice to pass an offset from the base address
1899 of that region. This would avoid having to special case RAM,
1900 and avoid full address decoding in every device.
1901 We can't use the high bits of pd for this because
1902 IO_MEM_ROMD uses these as a ram address. */
1903 iotlb
= section
- phys_sections
;
1904 iotlb
+= memory_region_section_addr(section
, paddr
);
1907 /* Make accesses to pages with watchpoints go via the
1908 watchpoint trap routines. */
1909 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1910 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1911 /* Avoid trapping reads of pages with a write breakpoint. */
1912 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1913 iotlb
= phys_section_watch
+ paddr
;
1914 *address
|= TLB_MMIO
;
1925 * Walks guest process memory "regions" one by one
1926 * and calls callback function 'fn' for each region.
1929 struct walk_memory_regions_data
1931 walk_memory_regions_fn fn
;
1937 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1938 abi_ulong end
, int new_prot
)
1940 if (data
->start
!= -1ul) {
1941 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1947 data
->start
= (new_prot
? end
: -1ul);
1948 data
->prot
= new_prot
;
1953 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1954 abi_ulong base
, int level
, void **lp
)
1960 return walk_memory_regions_end(data
, base
, 0);
1965 for (i
= 0; i
< L2_SIZE
; ++i
) {
1966 int prot
= pd
[i
].flags
;
1968 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1969 if (prot
!= data
->prot
) {
1970 rc
= walk_memory_regions_end(data
, pa
, prot
);
1978 for (i
= 0; i
< L2_SIZE
; ++i
) {
1979 pa
= base
| ((abi_ulong
)i
<<
1980 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1981 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1991 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1993 struct walk_memory_regions_data data
;
2001 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2002 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2003 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2009 return walk_memory_regions_end(&data
, 0, 0);
2012 static int dump_region(void *priv
, abi_ulong start
,
2013 abi_ulong end
, unsigned long prot
)
2015 FILE *f
= (FILE *)priv
;
2017 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2018 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2019 start
, end
, end
- start
,
2020 ((prot
& PAGE_READ
) ? 'r' : '-'),
2021 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2022 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2027 /* dump memory mappings */
2028 void page_dump(FILE *f
)
2030 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2031 "start", "end", "size", "prot");
2032 walk_memory_regions(f
, dump_region
);
2035 int page_get_flags(target_ulong address
)
2039 p
= page_find(address
>> TARGET_PAGE_BITS
);
2045 /* Modify the flags of a page and invalidate the code if necessary.
2046 The flag PAGE_WRITE_ORG is positioned automatically depending
2047 on PAGE_WRITE. The mmap_lock should already be held. */
2048 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2050 target_ulong addr
, len
;
2052 /* This function should never be called with addresses outside the
2053 guest address space. If this assert fires, it probably indicates
2054 a missing call to h2g_valid. */
2055 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2056 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2058 assert(start
< end
);
2060 start
= start
& TARGET_PAGE_MASK
;
2061 end
= TARGET_PAGE_ALIGN(end
);
2063 if (flags
& PAGE_WRITE
) {
2064 flags
|= PAGE_WRITE_ORG
;
2067 for (addr
= start
, len
= end
- start
;
2069 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2070 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2072 /* If the write protection bit is set, then we invalidate
2074 if (!(p
->flags
& PAGE_WRITE
) &&
2075 (flags
& PAGE_WRITE
) &&
2077 tb_invalidate_phys_page(addr
, 0, NULL
);
2083 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2089 /* This function should never be called with addresses outside the
2090 guest address space. If this assert fires, it probably indicates
2091 a missing call to h2g_valid. */
2092 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2093 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2099 if (start
+ len
- 1 < start
) {
2100 /* We've wrapped around. */
2104 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2105 start
= start
& TARGET_PAGE_MASK
;
2107 for (addr
= start
, len
= end
- start
;
2109 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2110 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2113 if( !(p
->flags
& PAGE_VALID
) )
2116 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2118 if (flags
& PAGE_WRITE
) {
2119 if (!(p
->flags
& PAGE_WRITE_ORG
))
2121 /* unprotect the page if it was put read-only because it
2122 contains translated code */
2123 if (!(p
->flags
& PAGE_WRITE
)) {
2124 if (!page_unprotect(addr
, 0, NULL
))
2133 /* called from signal handler: invalidate the code and unprotect the
2134 page. Return TRUE if the fault was successfully handled. */
2135 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2139 target_ulong host_start
, host_end
, addr
;
2141 /* Technically this isn't safe inside a signal handler. However we
2142 know this only ever happens in a synchronous SEGV handler, so in
2143 practice it seems to be ok. */
2146 p
= page_find(address
>> TARGET_PAGE_BITS
);
2152 /* if the page was really writable, then we change its
2153 protection back to writable */
2154 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2155 host_start
= address
& qemu_host_page_mask
;
2156 host_end
= host_start
+ qemu_host_page_size
;
2159 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2160 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2161 p
->flags
|= PAGE_WRITE
;
2164 /* and since the content will be modified, we must invalidate
2165 the corresponding translated code. */
2166 tb_invalidate_phys_page(addr
, pc
, puc
);
2167 #ifdef DEBUG_TB_CHECK
2168 tb_invalidate_check(addr
);
2171 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2180 #endif /* defined(CONFIG_USER_ONLY) */
2182 #if !defined(CONFIG_USER_ONLY)
2184 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2185 typedef struct subpage_t
{
2187 target_phys_addr_t base
;
2188 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2191 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2193 static subpage_t
*subpage_init(target_phys_addr_t base
);
2194 static void destroy_page_desc(uint16_t section_index
)
2196 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2197 MemoryRegion
*mr
= section
->mr
;
2200 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2201 memory_region_destroy(&subpage
->iomem
);
2206 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2211 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2215 p
= phys_map_nodes
[lp
->ptr
];
2216 for (i
= 0; i
< L2_SIZE
; ++i
) {
2217 if (!p
[i
].is_leaf
) {
2218 destroy_l2_mapping(&p
[i
], level
- 1);
2220 destroy_page_desc(p
[i
].ptr
);
2224 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2227 static void destroy_all_mappings(void)
2229 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2230 phys_map_nodes_reset();
2233 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2235 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2236 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2237 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2238 phys_sections_nb_alloc
);
2240 phys_sections
[phys_sections_nb
] = *section
;
2241 return phys_sections_nb
++;
2244 static void phys_sections_clear(void)
2246 phys_sections_nb
= 0;
2249 static void register_subpage(MemoryRegionSection
*section
)
2252 target_phys_addr_t base
= section
->offset_within_address_space
2254 MemoryRegionSection
*existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2255 MemoryRegionSection subsection
= {
2256 .offset_within_address_space
= base
,
2257 .size
= TARGET_PAGE_SIZE
,
2259 target_phys_addr_t start
, end
;
2261 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2263 if (!(existing
->mr
->subpage
)) {
2264 subpage
= subpage_init(base
);
2265 subsection
.mr
= &subpage
->iomem
;
2266 phys_page_set(base
>> TARGET_PAGE_BITS
, 1,
2267 phys_section_add(&subsection
));
2269 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2271 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2272 end
= start
+ section
->size
- 1;
2273 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2277 static void register_multipage(MemoryRegionSection
*section
)
2279 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2280 ram_addr_t size
= section
->size
;
2281 target_phys_addr_t addr
;
2282 uint16_t section_index
= phys_section_add(section
);
2287 phys_page_set(addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2291 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2294 MemoryRegionSection now
= *section
, remain
= *section
;
2296 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2297 || (now
.size
< TARGET_PAGE_SIZE
)) {
2298 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2299 - now
.offset_within_address_space
,
2301 register_subpage(&now
);
2302 remain
.size
-= now
.size
;
2303 remain
.offset_within_address_space
+= now
.size
;
2304 remain
.offset_within_region
+= now
.size
;
2306 while (remain
.size
>= TARGET_PAGE_SIZE
) {
2308 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
2309 now
.size
= TARGET_PAGE_SIZE
;
2310 register_subpage(&now
);
2312 now
.size
&= TARGET_PAGE_MASK
;
2313 register_multipage(&now
);
2315 remain
.size
-= now
.size
;
2316 remain
.offset_within_address_space
+= now
.size
;
2317 remain
.offset_within_region
+= now
.size
;
2321 register_subpage(&now
);
2326 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2329 kvm_coalesce_mmio_region(addr
, size
);
2332 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2335 kvm_uncoalesce_mmio_region(addr
, size
);
2338 void qemu_flush_coalesced_mmio_buffer(void)
2341 kvm_flush_coalesced_mmio_buffer();
2344 #if defined(__linux__) && !defined(TARGET_S390X)
2346 #include <sys/vfs.h>
2348 #define HUGETLBFS_MAGIC 0x958458f6
2350 static long gethugepagesize(const char *path
)
2356 ret
= statfs(path
, &fs
);
2357 } while (ret
!= 0 && errno
== EINTR
);
2364 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2365 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2370 static void *file_ram_alloc(RAMBlock
*block
,
2380 unsigned long hpagesize
;
2382 hpagesize
= gethugepagesize(path
);
2387 if (memory
< hpagesize
) {
2391 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2392 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2396 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2400 fd
= mkstemp(filename
);
2402 perror("unable to create backing store for hugepages");
2409 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2412 * ftruncate is not supported by hugetlbfs in older
2413 * hosts, so don't bother bailing out on errors.
2414 * If anything goes wrong with it under other filesystems,
2417 if (ftruncate(fd
, memory
))
2418 perror("ftruncate");
2421 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2422 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2423 * to sidestep this quirk.
2425 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2426 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2428 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2430 if (area
== MAP_FAILED
) {
2431 perror("file_ram_alloc: can't mmap RAM pages");
2440 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2442 RAMBlock
*block
, *next_block
;
2443 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2445 if (QLIST_EMPTY(&ram_list
.blocks
))
2448 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2449 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2451 end
= block
->offset
+ block
->length
;
2453 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2454 if (next_block
->offset
>= end
) {
2455 next
= MIN(next
, next_block
->offset
);
2458 if (next
- end
>= size
&& next
- end
< mingap
) {
2460 mingap
= next
- end
;
2464 if (offset
== RAM_ADDR_MAX
) {
2465 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2473 static ram_addr_t
last_ram_offset(void)
2476 ram_addr_t last
= 0;
2478 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2479 last
= MAX(last
, block
->offset
+ block
->length
);
2484 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
2487 QemuOpts
*machine_opts
;
2489 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2490 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2492 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
2493 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
2495 perror("qemu_madvise");
2496 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
2497 "but dump_guest_core=off specified\n");
2502 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2504 RAMBlock
*new_block
, *block
;
2507 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2508 if (block
->offset
== addr
) {
2514 assert(!new_block
->idstr
[0]);
2517 char *id
= qdev_get_dev_path(dev
);
2519 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2523 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2525 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2526 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2527 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2534 static int memory_try_enable_merging(void *addr
, size_t len
)
2538 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2539 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
2540 /* disabled by the user */
2544 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
2547 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2550 RAMBlock
*new_block
;
2552 size
= TARGET_PAGE_ALIGN(size
);
2553 new_block
= g_malloc0(sizeof(*new_block
));
2556 new_block
->offset
= find_ram_offset(size
);
2558 new_block
->host
= host
;
2559 new_block
->flags
|= RAM_PREALLOC_MASK
;
2562 #if defined (__linux__) && !defined(TARGET_S390X)
2563 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2564 if (!new_block
->host
) {
2565 new_block
->host
= qemu_vmalloc(size
);
2566 memory_try_enable_merging(new_block
->host
, size
);
2569 fprintf(stderr
, "-mem-path option unsupported\n");
2573 if (xen_enabled()) {
2574 xen_ram_alloc(new_block
->offset
, size
, mr
);
2575 } else if (kvm_enabled()) {
2576 /* some s390/kvm configurations have special constraints */
2577 new_block
->host
= kvm_vmalloc(size
);
2579 new_block
->host
= qemu_vmalloc(size
);
2581 memory_try_enable_merging(new_block
->host
, size
);
2584 new_block
->length
= size
;
2586 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2588 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2589 last_ram_offset() >> TARGET_PAGE_BITS
);
2590 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2591 0, size
>> TARGET_PAGE_BITS
);
2592 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
2594 qemu_ram_setup_dump(new_block
->host
, size
);
2597 kvm_setup_guest_memory(new_block
->host
, size
);
2599 return new_block
->offset
;
2602 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2604 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2607 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2611 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2612 if (addr
== block
->offset
) {
2613 QLIST_REMOVE(block
, next
);
2620 void qemu_ram_free(ram_addr_t addr
)
2624 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2625 if (addr
== block
->offset
) {
2626 QLIST_REMOVE(block
, next
);
2627 if (block
->flags
& RAM_PREALLOC_MASK
) {
2629 } else if (mem_path
) {
2630 #if defined (__linux__) && !defined(TARGET_S390X)
2632 munmap(block
->host
, block
->length
);
2635 qemu_vfree(block
->host
);
2641 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2642 munmap(block
->host
, block
->length
);
2644 if (xen_enabled()) {
2645 xen_invalidate_map_cache_entry(block
->host
);
2647 qemu_vfree(block
->host
);
2659 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2666 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2667 offset
= addr
- block
->offset
;
2668 if (offset
< block
->length
) {
2669 vaddr
= block
->host
+ offset
;
2670 if (block
->flags
& RAM_PREALLOC_MASK
) {
2674 munmap(vaddr
, length
);
2676 #if defined(__linux__) && !defined(TARGET_S390X)
2679 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2682 flags
|= MAP_PRIVATE
;
2684 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2685 flags
, block
->fd
, offset
);
2687 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2688 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2695 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2696 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2697 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2700 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2701 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2705 if (area
!= vaddr
) {
2706 fprintf(stderr
, "Could not remap addr: "
2707 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2711 memory_try_enable_merging(vaddr
, length
);
2712 qemu_ram_setup_dump(vaddr
, length
);
2718 #endif /* !_WIN32 */
2720 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2721 With the exception of the softmmu code in this file, this should
2722 only be used for local memory (e.g. video ram) that the device owns,
2723 and knows it isn't going to access beyond the end of the block.
2725 It should not be used for general purpose DMA.
2726 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2728 void *qemu_get_ram_ptr(ram_addr_t addr
)
2732 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2733 if (addr
- block
->offset
< block
->length
) {
2734 /* Move this entry to to start of the list. */
2735 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2736 QLIST_REMOVE(block
, next
);
2737 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2739 if (xen_enabled()) {
2740 /* We need to check if the requested address is in the RAM
2741 * because we don't want to map the entire memory in QEMU.
2742 * In that case just map until the end of the page.
2744 if (block
->offset
== 0) {
2745 return xen_map_cache(addr
, 0, 0);
2746 } else if (block
->host
== NULL
) {
2748 xen_map_cache(block
->offset
, block
->length
, 1);
2751 return block
->host
+ (addr
- block
->offset
);
2755 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2761 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2762 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2764 void *qemu_safe_ram_ptr(ram_addr_t addr
)
2768 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2769 if (addr
- block
->offset
< block
->length
) {
2770 if (xen_enabled()) {
2771 /* We need to check if the requested address is in the RAM
2772 * because we don't want to map the entire memory in QEMU.
2773 * In that case just map until the end of the page.
2775 if (block
->offset
== 0) {
2776 return xen_map_cache(addr
, 0, 0);
2777 } else if (block
->host
== NULL
) {
2779 xen_map_cache(block
->offset
, block
->length
, 1);
2782 return block
->host
+ (addr
- block
->offset
);
2786 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2792 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2793 * but takes a size argument */
2794 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2799 if (xen_enabled()) {
2800 return xen_map_cache(addr
, *size
, 1);
2804 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2805 if (addr
- block
->offset
< block
->length
) {
2806 if (addr
- block
->offset
+ *size
> block
->length
)
2807 *size
= block
->length
- addr
+ block
->offset
;
2808 return block
->host
+ (addr
- block
->offset
);
2812 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2817 void qemu_put_ram_ptr(void *addr
)
2819 trace_qemu_put_ram_ptr(addr
);
2822 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2825 uint8_t *host
= ptr
;
2827 if (xen_enabled()) {
2828 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2832 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2833 /* This case append when the block is not mapped. */
2834 if (block
->host
== NULL
) {
2837 if (host
- block
->host
< block
->length
) {
2838 *ram_addr
= block
->offset
+ (host
- block
->host
);
2846 /* Some of the softmmu routines need to translate from a host pointer
2847 (typically a TLB entry) back to a ram offset. */
2848 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2850 ram_addr_t ram_addr
;
2852 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2853 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2859 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
2862 #ifdef DEBUG_UNASSIGNED
2863 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2865 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2866 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2871 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
2872 uint64_t val
, unsigned size
)
2874 #ifdef DEBUG_UNASSIGNED
2875 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2877 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2878 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2882 static const MemoryRegionOps unassigned_mem_ops
= {
2883 .read
= unassigned_mem_read
,
2884 .write
= unassigned_mem_write
,
2885 .endianness
= DEVICE_NATIVE_ENDIAN
,
2888 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
2894 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
2895 uint64_t value
, unsigned size
)
2900 static const MemoryRegionOps error_mem_ops
= {
2901 .read
= error_mem_read
,
2902 .write
= error_mem_write
,
2903 .endianness
= DEVICE_NATIVE_ENDIAN
,
2906 static const MemoryRegionOps rom_mem_ops
= {
2907 .read
= error_mem_read
,
2908 .write
= unassigned_mem_write
,
2909 .endianness
= DEVICE_NATIVE_ENDIAN
,
2912 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
2913 uint64_t val
, unsigned size
)
2916 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2917 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2918 #if !defined(CONFIG_USER_ONLY)
2919 tb_invalidate_phys_page_fast(ram_addr
, size
);
2920 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2925 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2928 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2931 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2936 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2937 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
2938 /* we remove the notdirty callback only if the code has been
2940 if (dirty_flags
== 0xff)
2941 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2944 static const MemoryRegionOps notdirty_mem_ops
= {
2945 .read
= error_mem_read
,
2946 .write
= notdirty_mem_write
,
2947 .endianness
= DEVICE_NATIVE_ENDIAN
,
2950 /* Generate a debug exception if a watchpoint has been hit. */
2951 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2953 CPUArchState
*env
= cpu_single_env
;
2954 target_ulong pc
, cs_base
;
2955 TranslationBlock
*tb
;
2960 if (env
->watchpoint_hit
) {
2961 /* We re-entered the check after replacing the TB. Now raise
2962 * the debug interrupt so that is will trigger after the
2963 * current instruction. */
2964 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2967 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2968 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2969 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2970 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2971 wp
->flags
|= BP_WATCHPOINT_HIT
;
2972 if (!env
->watchpoint_hit
) {
2973 env
->watchpoint_hit
= wp
;
2974 tb
= tb_find_pc(env
->mem_io_pc
);
2976 cpu_abort(env
, "check_watchpoint: could not find TB for "
2977 "pc=%p", (void *)env
->mem_io_pc
);
2979 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
2980 tb_phys_invalidate(tb
, -1);
2981 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2982 env
->exception_index
= EXCP_DEBUG
;
2985 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2986 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2987 cpu_resume_from_signal(env
, NULL
);
2991 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2996 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2997 so these check for a hit then pass through to the normal out-of-line
2999 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3002 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3004 case 1: return ldub_phys(addr
);
3005 case 2: return lduw_phys(addr
);
3006 case 4: return ldl_phys(addr
);
3011 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3012 uint64_t val
, unsigned size
)
3014 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3017 stb_phys(addr
, val
);
3020 stw_phys(addr
, val
);
3023 stl_phys(addr
, val
);
3029 static const MemoryRegionOps watch_mem_ops
= {
3030 .read
= watch_mem_read
,
3031 .write
= watch_mem_write
,
3032 .endianness
= DEVICE_NATIVE_ENDIAN
,
3035 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3038 subpage_t
*mmio
= opaque
;
3039 unsigned int idx
= SUBPAGE_IDX(addr
);
3040 MemoryRegionSection
*section
;
3041 #if defined(DEBUG_SUBPAGE)
3042 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3043 mmio
, len
, addr
, idx
);
3046 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3048 addr
-= section
->offset_within_address_space
;
3049 addr
+= section
->offset_within_region
;
3050 return io_mem_read(section
->mr
, addr
, len
);
3053 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3054 uint64_t value
, unsigned len
)
3056 subpage_t
*mmio
= opaque
;
3057 unsigned int idx
= SUBPAGE_IDX(addr
);
3058 MemoryRegionSection
*section
;
3059 #if defined(DEBUG_SUBPAGE)
3060 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3061 " idx %d value %"PRIx64
"\n",
3062 __func__
, mmio
, len
, addr
, idx
, value
);
3065 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3067 addr
-= section
->offset_within_address_space
;
3068 addr
+= section
->offset_within_region
;
3069 io_mem_write(section
->mr
, addr
, value
, len
);
3072 static const MemoryRegionOps subpage_ops
= {
3073 .read
= subpage_read
,
3074 .write
= subpage_write
,
3075 .endianness
= DEVICE_NATIVE_ENDIAN
,
3078 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3081 ram_addr_t raddr
= addr
;
3082 void *ptr
= qemu_get_ram_ptr(raddr
);
3084 case 1: return ldub_p(ptr
);
3085 case 2: return lduw_p(ptr
);
3086 case 4: return ldl_p(ptr
);
3091 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3092 uint64_t value
, unsigned size
)
3094 ram_addr_t raddr
= addr
;
3095 void *ptr
= qemu_get_ram_ptr(raddr
);
3097 case 1: return stb_p(ptr
, value
);
3098 case 2: return stw_p(ptr
, value
);
3099 case 4: return stl_p(ptr
, value
);
3104 static const MemoryRegionOps subpage_ram_ops
= {
3105 .read
= subpage_ram_read
,
3106 .write
= subpage_ram_write
,
3107 .endianness
= DEVICE_NATIVE_ENDIAN
,
3110 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3115 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3117 idx
= SUBPAGE_IDX(start
);
3118 eidx
= SUBPAGE_IDX(end
);
3119 #if defined(DEBUG_SUBPAGE)
3120 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3121 mmio
, start
, end
, idx
, eidx
, memory
);
3123 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3124 MemoryRegionSection new_section
= phys_sections
[section
];
3125 new_section
.mr
= &io_mem_subpage_ram
;
3126 section
= phys_section_add(&new_section
);
3128 for (; idx
<= eidx
; idx
++) {
3129 mmio
->sub_section
[idx
] = section
;
3135 static subpage_t
*subpage_init(target_phys_addr_t base
)
3139 mmio
= g_malloc0(sizeof(subpage_t
));
3142 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3143 "subpage", TARGET_PAGE_SIZE
);
3144 mmio
->iomem
.subpage
= true;
3145 #if defined(DEBUG_SUBPAGE)
3146 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3147 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3149 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3154 static uint16_t dummy_section(MemoryRegion
*mr
)
3156 MemoryRegionSection section
= {
3158 .offset_within_address_space
= 0,
3159 .offset_within_region
= 0,
3163 return phys_section_add(§ion
);
3166 MemoryRegion
*iotlb_to_region(target_phys_addr_t index
)
3168 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3171 static void io_mem_init(void)
3173 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3174 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3175 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3176 "unassigned", UINT64_MAX
);
3177 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3178 "notdirty", UINT64_MAX
);
3179 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3180 "subpage-ram", UINT64_MAX
);
3181 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3182 "watch", UINT64_MAX
);
3185 static void core_begin(MemoryListener
*listener
)
3187 destroy_all_mappings();
3188 phys_sections_clear();
3189 phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3190 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3191 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3192 phys_section_rom
= dummy_section(&io_mem_rom
);
3193 phys_section_watch
= dummy_section(&io_mem_watch
);
3196 static void core_commit(MemoryListener
*listener
)
3200 /* since each CPU stores ram addresses in its TLB cache, we must
3201 reset the modified entries */
3203 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3208 static void core_region_add(MemoryListener
*listener
,
3209 MemoryRegionSection
*section
)
3211 cpu_register_physical_memory_log(section
, section
->readonly
);
3214 static void core_region_del(MemoryListener
*listener
,
3215 MemoryRegionSection
*section
)
3219 static void core_region_nop(MemoryListener
*listener
,
3220 MemoryRegionSection
*section
)
3222 cpu_register_physical_memory_log(section
, section
->readonly
);
3225 static void core_log_start(MemoryListener
*listener
,
3226 MemoryRegionSection
*section
)
3230 static void core_log_stop(MemoryListener
*listener
,
3231 MemoryRegionSection
*section
)
3235 static void core_log_sync(MemoryListener
*listener
,
3236 MemoryRegionSection
*section
)
3240 static void core_log_global_start(MemoryListener
*listener
)
3242 cpu_physical_memory_set_dirty_tracking(1);
3245 static void core_log_global_stop(MemoryListener
*listener
)
3247 cpu_physical_memory_set_dirty_tracking(0);
3250 static void core_eventfd_add(MemoryListener
*listener
,
3251 MemoryRegionSection
*section
,
3252 bool match_data
, uint64_t data
, EventNotifier
*e
)
3256 static void core_eventfd_del(MemoryListener
*listener
,
3257 MemoryRegionSection
*section
,
3258 bool match_data
, uint64_t data
, EventNotifier
*e
)
3262 static void io_begin(MemoryListener
*listener
)
3266 static void io_commit(MemoryListener
*listener
)
3270 static void io_region_add(MemoryListener
*listener
,
3271 MemoryRegionSection
*section
)
3273 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3275 mrio
->mr
= section
->mr
;
3276 mrio
->offset
= section
->offset_within_region
;
3277 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3278 section
->offset_within_address_space
, section
->size
);
3279 ioport_register(&mrio
->iorange
);
3282 static void io_region_del(MemoryListener
*listener
,
3283 MemoryRegionSection
*section
)
3285 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3288 static void io_region_nop(MemoryListener
*listener
,
3289 MemoryRegionSection
*section
)
3293 static void io_log_start(MemoryListener
*listener
,
3294 MemoryRegionSection
*section
)
3298 static void io_log_stop(MemoryListener
*listener
,
3299 MemoryRegionSection
*section
)
3303 static void io_log_sync(MemoryListener
*listener
,
3304 MemoryRegionSection
*section
)
3308 static void io_log_global_start(MemoryListener
*listener
)
3312 static void io_log_global_stop(MemoryListener
*listener
)
3316 static void io_eventfd_add(MemoryListener
*listener
,
3317 MemoryRegionSection
*section
,
3318 bool match_data
, uint64_t data
, EventNotifier
*e
)
3322 static void io_eventfd_del(MemoryListener
*listener
,
3323 MemoryRegionSection
*section
,
3324 bool match_data
, uint64_t data
, EventNotifier
*e
)
3328 static MemoryListener core_memory_listener
= {
3329 .begin
= core_begin
,
3330 .commit
= core_commit
,
3331 .region_add
= core_region_add
,
3332 .region_del
= core_region_del
,
3333 .region_nop
= core_region_nop
,
3334 .log_start
= core_log_start
,
3335 .log_stop
= core_log_stop
,
3336 .log_sync
= core_log_sync
,
3337 .log_global_start
= core_log_global_start
,
3338 .log_global_stop
= core_log_global_stop
,
3339 .eventfd_add
= core_eventfd_add
,
3340 .eventfd_del
= core_eventfd_del
,
3344 static MemoryListener io_memory_listener
= {
3346 .commit
= io_commit
,
3347 .region_add
= io_region_add
,
3348 .region_del
= io_region_del
,
3349 .region_nop
= io_region_nop
,
3350 .log_start
= io_log_start
,
3351 .log_stop
= io_log_stop
,
3352 .log_sync
= io_log_sync
,
3353 .log_global_start
= io_log_global_start
,
3354 .log_global_stop
= io_log_global_stop
,
3355 .eventfd_add
= io_eventfd_add
,
3356 .eventfd_del
= io_eventfd_del
,
3360 static void memory_map_init(void)
3362 system_memory
= g_malloc(sizeof(*system_memory
));
3363 memory_region_init(system_memory
, "system", INT64_MAX
);
3364 set_system_memory_map(system_memory
);
3366 system_io
= g_malloc(sizeof(*system_io
));
3367 memory_region_init(system_io
, "io", 65536);
3368 set_system_io_map(system_io
);
3370 memory_listener_register(&core_memory_listener
, system_memory
);
3371 memory_listener_register(&io_memory_listener
, system_io
);
3374 MemoryRegion
*get_system_memory(void)
3376 return system_memory
;
3379 MemoryRegion
*get_system_io(void)
3384 #endif /* !defined(CONFIG_USER_ONLY) */
3386 /* physical memory access (slow version, mainly for debug) */
3387 #if defined(CONFIG_USER_ONLY)
3388 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3389 uint8_t *buf
, int len
, int is_write
)
3396 page
= addr
& TARGET_PAGE_MASK
;
3397 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3400 flags
= page_get_flags(page
);
3401 if (!(flags
& PAGE_VALID
))
3404 if (!(flags
& PAGE_WRITE
))
3406 /* XXX: this code should not depend on lock_user */
3407 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3410 unlock_user(p
, addr
, l
);
3412 if (!(flags
& PAGE_READ
))
3414 /* XXX: this code should not depend on lock_user */
3415 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3418 unlock_user(p
, addr
, 0);
3429 static void invalidate_and_set_dirty(target_phys_addr_t addr
,
3430 target_phys_addr_t length
)
3432 if (!cpu_physical_memory_is_dirty(addr
)) {
3433 /* invalidate code */
3434 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
3436 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
3438 xen_modified_memory(addr
, length
);
3441 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3442 int len
, int is_write
)
3447 target_phys_addr_t page
;
3448 MemoryRegionSection
*section
;
3451 page
= addr
& TARGET_PAGE_MASK
;
3452 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3455 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3458 if (!memory_region_is_ram(section
->mr
)) {
3459 target_phys_addr_t addr1
;
3460 addr1
= memory_region_section_addr(section
, addr
);
3461 /* XXX: could force cpu_single_env to NULL to avoid
3463 if (l
>= 4 && ((addr1
& 3) == 0)) {
3464 /* 32 bit write access */
3466 io_mem_write(section
->mr
, addr1
, val
, 4);
3468 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3469 /* 16 bit write access */
3471 io_mem_write(section
->mr
, addr1
, val
, 2);
3474 /* 8 bit write access */
3476 io_mem_write(section
->mr
, addr1
, val
, 1);
3479 } else if (!section
->readonly
) {
3481 addr1
= memory_region_get_ram_addr(section
->mr
)
3482 + memory_region_section_addr(section
, addr
);
3484 ptr
= qemu_get_ram_ptr(addr1
);
3485 memcpy(ptr
, buf
, l
);
3486 invalidate_and_set_dirty(addr1
, l
);
3487 qemu_put_ram_ptr(ptr
);
3490 if (!(memory_region_is_ram(section
->mr
) ||
3491 memory_region_is_romd(section
->mr
))) {
3492 target_phys_addr_t addr1
;
3494 addr1
= memory_region_section_addr(section
, addr
);
3495 if (l
>= 4 && ((addr1
& 3) == 0)) {
3496 /* 32 bit read access */
3497 val
= io_mem_read(section
->mr
, addr1
, 4);
3500 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3501 /* 16 bit read access */
3502 val
= io_mem_read(section
->mr
, addr1
, 2);
3506 /* 8 bit read access */
3507 val
= io_mem_read(section
->mr
, addr1
, 1);
3513 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3514 + memory_region_section_addr(section
,
3516 memcpy(buf
, ptr
, l
);
3517 qemu_put_ram_ptr(ptr
);
3526 /* used for ROM loading : can write in RAM and ROM */
3527 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3528 const uint8_t *buf
, int len
)
3532 target_phys_addr_t page
;
3533 MemoryRegionSection
*section
;
3536 page
= addr
& TARGET_PAGE_MASK
;
3537 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3540 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3542 if (!(memory_region_is_ram(section
->mr
) ||
3543 memory_region_is_romd(section
->mr
))) {
3546 unsigned long addr1
;
3547 addr1
= memory_region_get_ram_addr(section
->mr
)
3548 + memory_region_section_addr(section
, addr
);
3550 ptr
= qemu_get_ram_ptr(addr1
);
3551 memcpy(ptr
, buf
, l
);
3552 invalidate_and_set_dirty(addr1
, l
);
3553 qemu_put_ram_ptr(ptr
);
3563 target_phys_addr_t addr
;
3564 target_phys_addr_t len
;
3567 static BounceBuffer bounce
;
3569 typedef struct MapClient
{
3571 void (*callback
)(void *opaque
);
3572 QLIST_ENTRY(MapClient
) link
;
3575 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3576 = QLIST_HEAD_INITIALIZER(map_client_list
);
3578 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3580 MapClient
*client
= g_malloc(sizeof(*client
));
3582 client
->opaque
= opaque
;
3583 client
->callback
= callback
;
3584 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3588 void cpu_unregister_map_client(void *_client
)
3590 MapClient
*client
= (MapClient
*)_client
;
3592 QLIST_REMOVE(client
, link
);
3596 static void cpu_notify_map_clients(void)
3600 while (!QLIST_EMPTY(&map_client_list
)) {
3601 client
= QLIST_FIRST(&map_client_list
);
3602 client
->callback(client
->opaque
);
3603 cpu_unregister_map_client(client
);
3607 /* Map a physical memory region into a host virtual address.
3608 * May map a subset of the requested range, given by and returned in *plen.
3609 * May return NULL if resources needed to perform the mapping are exhausted.
3610 * Use only for reads OR writes - not for read-modify-write operations.
3611 * Use cpu_register_map_client() to know when retrying the map operation is
3612 * likely to succeed.
3614 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3615 target_phys_addr_t
*plen
,
3618 target_phys_addr_t len
= *plen
;
3619 target_phys_addr_t todo
= 0;
3621 target_phys_addr_t page
;
3622 MemoryRegionSection
*section
;
3623 ram_addr_t raddr
= RAM_ADDR_MAX
;
3628 page
= addr
& TARGET_PAGE_MASK
;
3629 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3632 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3634 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3635 if (todo
|| bounce
.buffer
) {
3638 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3642 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3646 return bounce
.buffer
;
3649 raddr
= memory_region_get_ram_addr(section
->mr
)
3650 + memory_region_section_addr(section
, addr
);
3658 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3663 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3664 * Will also mark the memory as dirty if is_write == 1. access_len gives
3665 * the amount of memory that was actually read or written by the caller.
3667 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3668 int is_write
, target_phys_addr_t access_len
)
3670 if (buffer
!= bounce
.buffer
) {
3672 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3673 while (access_len
) {
3675 l
= TARGET_PAGE_SIZE
;
3678 invalidate_and_set_dirty(addr1
, l
);
3683 if (xen_enabled()) {
3684 xen_invalidate_map_cache_entry(buffer
);
3689 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3691 qemu_vfree(bounce
.buffer
);
3692 bounce
.buffer
= NULL
;
3693 cpu_notify_map_clients();
3696 /* warning: addr must be aligned */
3697 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3698 enum device_endian endian
)
3702 MemoryRegionSection
*section
;
3704 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3706 if (!(memory_region_is_ram(section
->mr
) ||
3707 memory_region_is_romd(section
->mr
))) {
3709 addr
= memory_region_section_addr(section
, addr
);
3710 val
= io_mem_read(section
->mr
, addr
, 4);
3711 #if defined(TARGET_WORDS_BIGENDIAN)
3712 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3716 if (endian
== DEVICE_BIG_ENDIAN
) {
3722 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3724 + memory_region_section_addr(section
, addr
));
3726 case DEVICE_LITTLE_ENDIAN
:
3727 val
= ldl_le_p(ptr
);
3729 case DEVICE_BIG_ENDIAN
:
3730 val
= ldl_be_p(ptr
);
3740 uint32_t ldl_phys(target_phys_addr_t addr
)
3742 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3745 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3747 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3750 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3752 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3755 /* warning: addr must be aligned */
3756 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3757 enum device_endian endian
)
3761 MemoryRegionSection
*section
;
3763 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3765 if (!(memory_region_is_ram(section
->mr
) ||
3766 memory_region_is_romd(section
->mr
))) {
3768 addr
= memory_region_section_addr(section
, addr
);
3770 /* XXX This is broken when device endian != cpu endian.
3771 Fix and add "endian" variable check */
3772 #ifdef TARGET_WORDS_BIGENDIAN
3773 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3774 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3776 val
= io_mem_read(section
->mr
, addr
, 4);
3777 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3781 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3783 + memory_region_section_addr(section
, addr
));
3785 case DEVICE_LITTLE_ENDIAN
:
3786 val
= ldq_le_p(ptr
);
3788 case DEVICE_BIG_ENDIAN
:
3789 val
= ldq_be_p(ptr
);
3799 uint64_t ldq_phys(target_phys_addr_t addr
)
3801 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3804 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3806 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3809 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3811 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3815 uint32_t ldub_phys(target_phys_addr_t addr
)
3818 cpu_physical_memory_read(addr
, &val
, 1);
3822 /* warning: addr must be aligned */
3823 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3824 enum device_endian endian
)
3828 MemoryRegionSection
*section
;
3830 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3832 if (!(memory_region_is_ram(section
->mr
) ||
3833 memory_region_is_romd(section
->mr
))) {
3835 addr
= memory_region_section_addr(section
, addr
);
3836 val
= io_mem_read(section
->mr
, addr
, 2);
3837 #if defined(TARGET_WORDS_BIGENDIAN)
3838 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3842 if (endian
== DEVICE_BIG_ENDIAN
) {
3848 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3850 + memory_region_section_addr(section
, addr
));
3852 case DEVICE_LITTLE_ENDIAN
:
3853 val
= lduw_le_p(ptr
);
3855 case DEVICE_BIG_ENDIAN
:
3856 val
= lduw_be_p(ptr
);
3866 uint32_t lduw_phys(target_phys_addr_t addr
)
3868 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3871 uint32_t lduw_le_phys(target_phys_addr_t addr
)
3873 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3876 uint32_t lduw_be_phys(target_phys_addr_t addr
)
3878 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3881 /* warning: addr must be aligned. The ram page is not masked as dirty
3882 and the code inside is not invalidated. It is useful if the dirty
3883 bits are used to track modified PTEs */
3884 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3887 MemoryRegionSection
*section
;
3889 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3891 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3892 addr
= memory_region_section_addr(section
, addr
);
3893 if (memory_region_is_ram(section
->mr
)) {
3894 section
= &phys_sections
[phys_section_rom
];
3896 io_mem_write(section
->mr
, addr
, val
, 4);
3898 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3900 + memory_region_section_addr(section
, addr
);
3901 ptr
= qemu_get_ram_ptr(addr1
);
3904 if (unlikely(in_migration
)) {
3905 if (!cpu_physical_memory_is_dirty(addr1
)) {
3906 /* invalidate code */
3907 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3909 cpu_physical_memory_set_dirty_flags(
3910 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3916 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3919 MemoryRegionSection
*section
;
3921 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3923 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3924 addr
= memory_region_section_addr(section
, addr
);
3925 if (memory_region_is_ram(section
->mr
)) {
3926 section
= &phys_sections
[phys_section_rom
];
3928 #ifdef TARGET_WORDS_BIGENDIAN
3929 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
3930 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
3932 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
3933 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
3936 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3938 + memory_region_section_addr(section
, addr
));
3943 /* warning: addr must be aligned */
3944 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3945 enum device_endian endian
)
3948 MemoryRegionSection
*section
;
3950 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3952 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3953 addr
= memory_region_section_addr(section
, addr
);
3954 if (memory_region_is_ram(section
->mr
)) {
3955 section
= &phys_sections
[phys_section_rom
];
3957 #if defined(TARGET_WORDS_BIGENDIAN)
3958 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3962 if (endian
== DEVICE_BIG_ENDIAN
) {
3966 io_mem_write(section
->mr
, addr
, val
, 4);
3968 unsigned long addr1
;
3969 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3970 + memory_region_section_addr(section
, addr
);
3972 ptr
= qemu_get_ram_ptr(addr1
);
3974 case DEVICE_LITTLE_ENDIAN
:
3977 case DEVICE_BIG_ENDIAN
:
3984 invalidate_and_set_dirty(addr1
, 4);
3988 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3990 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3993 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
3995 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3998 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4000 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4004 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4007 cpu_physical_memory_write(addr
, &v
, 1);
4010 /* warning: addr must be aligned */
4011 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4012 enum device_endian endian
)
4015 MemoryRegionSection
*section
;
4017 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4019 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
4020 addr
= memory_region_section_addr(section
, addr
);
4021 if (memory_region_is_ram(section
->mr
)) {
4022 section
= &phys_sections
[phys_section_rom
];
4024 #if defined(TARGET_WORDS_BIGENDIAN)
4025 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4029 if (endian
== DEVICE_BIG_ENDIAN
) {
4033 io_mem_write(section
->mr
, addr
, val
, 2);
4035 unsigned long addr1
;
4036 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4037 + memory_region_section_addr(section
, addr
);
4039 ptr
= qemu_get_ram_ptr(addr1
);
4041 case DEVICE_LITTLE_ENDIAN
:
4044 case DEVICE_BIG_ENDIAN
:
4051 invalidate_and_set_dirty(addr1
, 2);
4055 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4057 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4060 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4062 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4065 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4067 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4071 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4074 cpu_physical_memory_write(addr
, &val
, 8);
4077 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4079 val
= cpu_to_le64(val
);
4080 cpu_physical_memory_write(addr
, &val
, 8);
4083 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4085 val
= cpu_to_be64(val
);
4086 cpu_physical_memory_write(addr
, &val
, 8);
4089 /* virtual memory access for debug (includes writing to ROM) */
4090 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
4091 uint8_t *buf
, int len
, int is_write
)
4094 target_phys_addr_t phys_addr
;
4098 page
= addr
& TARGET_PAGE_MASK
;
4099 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4100 /* if no physical page mapped, return an error */
4101 if (phys_addr
== -1)
4103 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4106 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4108 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4110 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4119 /* in deterministic execution mode, instructions doing device I/Os
4120 must be at the end of the TB */
4121 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
4123 TranslationBlock
*tb
;
4125 target_ulong pc
, cs_base
;
4128 tb
= tb_find_pc(retaddr
);
4130 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4133 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4134 cpu_restore_state(tb
, env
, retaddr
);
4135 /* Calculate how many instructions had been executed before the fault
4137 n
= n
- env
->icount_decr
.u16
.low
;
4138 /* Generate a new TB ending on the I/O insn. */
4140 /* On MIPS and SH, delay slot instructions can only be restarted if
4141 they were already the first instruction in the TB. If this is not
4142 the first instruction in a TB then re-execute the preceding
4144 #if defined(TARGET_MIPS)
4145 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4146 env
->active_tc
.PC
-= 4;
4147 env
->icount_decr
.u16
.low
++;
4148 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4150 #elif defined(TARGET_SH4)
4151 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4154 env
->icount_decr
.u16
.low
++;
4155 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4158 /* This should never happen. */
4159 if (n
> CF_COUNT_MASK
)
4160 cpu_abort(env
, "TB too big during recompile");
4162 cflags
= n
| CF_LAST_IO
;
4164 cs_base
= tb
->cs_base
;
4166 tb_phys_invalidate(tb
, -1);
4167 /* FIXME: In theory this could raise an exception. In practice
4168 we have already translated the block once so it's probably ok. */
4169 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4170 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4171 the first in the TB) then we end up generating a whole new TB and
4172 repeating the fault, which is horribly inefficient.
4173 Better would be to execute just this insn uncached, or generate a
4175 cpu_resume_from_signal(env
, NULL
);
4178 #if !defined(CONFIG_USER_ONLY)
4180 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4182 int i
, target_code_size
, max_target_code_size
;
4183 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4184 TranslationBlock
*tb
;
4186 target_code_size
= 0;
4187 max_target_code_size
= 0;
4189 direct_jmp_count
= 0;
4190 direct_jmp2_count
= 0;
4191 for(i
= 0; i
< nb_tbs
; i
++) {
4193 target_code_size
+= tb
->size
;
4194 if (tb
->size
> max_target_code_size
)
4195 max_target_code_size
= tb
->size
;
4196 if (tb
->page_addr
[1] != -1)
4198 if (tb
->tb_next_offset
[0] != 0xffff) {
4200 if (tb
->tb_next_offset
[1] != 0xffff) {
4201 direct_jmp2_count
++;
4205 /* XXX: avoid using doubles ? */
4206 cpu_fprintf(f
, "Translation buffer state:\n");
4207 cpu_fprintf(f
, "gen code size %td/%zd\n",
4208 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4209 cpu_fprintf(f
, "TB count %d/%d\n",
4210 nb_tbs
, code_gen_max_blocks
);
4211 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4212 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4213 max_target_code_size
);
4214 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4215 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4216 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4217 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4219 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4220 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4222 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4224 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4225 cpu_fprintf(f
, "\nStatistics:\n");
4226 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4227 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4228 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4229 tcg_dump_info(f
, cpu_fprintf
);
4233 * A helper function for the _utterly broken_ virtio device model to find out if
4234 * it's running on a big endian machine. Don't do this at home kids!
4236 bool virtio_is_big_endian(void);
4237 bool virtio_is_big_endian(void)
4239 #if defined(TARGET_WORDS_BIGENDIAN)
4248 #ifndef CONFIG_USER_ONLY
4249 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
)
4251 MemoryRegionSection
*section
;
4253 section
= phys_page_find(phys_addr
>> TARGET_PAGE_BITS
);
4255 return !(memory_region_is_ram(section
->mr
) ||
4256 memory_region_is_romd(section
->mr
));