qapi: drop the sentinel in enum array
[qemu/armbru.git] / accel / tcg / translate-all.c
blob37ecafa9319c81f770c212289d198e64088a9467
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <sys/proc.h>
41 #include <machine/profile.h>
42 #define _KERNEL
43 #include <sys/user.h>
44 #undef _KERNEL
45 #undef sigqueue
46 #include <libutil.h>
47 #endif
48 #endif
49 #else
50 #include "exec/address-spaces.h"
51 #endif
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/error-report.h"
58 #include "qemu/timer.h"
59 #include "qemu/main-loop.h"
60 #include "exec/log.h"
61 #include "sysemu/cpus.h"
63 /* #define DEBUG_TB_INVALIDATE */
64 /* #define DEBUG_TB_FLUSH */
65 /* make various TB consistency checks */
66 /* #define DEBUG_TB_CHECK */
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
70 #undef DEBUG_TB_CHECK
71 #endif
73 /* Access to the various translations structures need to be serialised via locks
74 * for consistency. This is automatic for SoftMMU based system
75 * emulation due to its single threaded nature. In user-mode emulation
76 * access to the memory related structures are protected with the
77 * mmap_lock.
79 #ifdef CONFIG_SOFTMMU
80 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
81 #else
82 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
83 #endif
85 #define SMC_BITMAP_USE_THRESHOLD 10
87 typedef struct PageDesc {
88 /* list of TBs intersecting this ram page */
89 TranslationBlock *first_tb;
90 #ifdef CONFIG_SOFTMMU
91 /* in order to optimize self modifying code, we count the number
92 of lookups we do to a given page to use a bitmap */
93 unsigned int code_write_count;
94 unsigned long *code_bitmap;
95 #else
96 unsigned long flags;
97 #endif
98 } PageDesc;
100 /* In system mode we want L1_MAP to be based on ram offsets,
101 while in user mode we want it to be based on virtual addresses. */
102 #if !defined(CONFIG_USER_ONLY)
103 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
104 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
105 #else
106 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
107 #endif
108 #else
109 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
110 #endif
112 /* Size of the L2 (and L3, etc) page tables. */
113 #define V_L2_BITS 10
114 #define V_L2_SIZE (1 << V_L2_BITS)
116 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
117 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
118 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
119 * BITS_PER_BYTE);
122 * L1 Mapping properties
124 static int v_l1_size;
125 static int v_l1_shift;
126 static int v_l2_levels;
128 /* The bottom level has pointers to PageDesc, and is indexed by
129 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
131 #define V_L1_MIN_BITS 4
132 #define V_L1_MAX_BITS (V_L2_BITS + 3)
133 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
135 static void *l1_map[V_L1_MAX_SIZE];
137 /* code generation context */
138 TCGContext tcg_ctx;
139 bool parallel_cpus;
141 /* translation block context */
142 __thread int have_tb_lock;
144 static void page_table_config_init(void)
146 uint32_t v_l1_bits;
148 assert(TARGET_PAGE_BITS);
149 /* The bits remaining after N lower levels of page tables. */
150 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
151 if (v_l1_bits < V_L1_MIN_BITS) {
152 v_l1_bits += V_L2_BITS;
155 v_l1_size = 1 << v_l1_bits;
156 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
157 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
159 assert(v_l1_bits <= V_L1_MAX_BITS);
160 assert(v_l1_shift % V_L2_BITS == 0);
161 assert(v_l2_levels >= 0);
164 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
165 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
167 void tb_lock(void)
169 assert_tb_unlocked();
170 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
171 have_tb_lock++;
174 void tb_unlock(void)
176 assert_tb_locked();
177 have_tb_lock--;
178 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
181 void tb_lock_reset(void)
183 if (have_tb_lock) {
184 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
185 have_tb_lock = 0;
189 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
191 void cpu_gen_init(void)
193 tcg_context_init(&tcg_ctx);
196 /* Encode VAL as a signed leb128 sequence at P.
197 Return P incremented past the encoded value. */
198 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
200 int more, byte;
202 do {
203 byte = val & 0x7f;
204 val >>= 7;
205 more = !((val == 0 && (byte & 0x40) == 0)
206 || (val == -1 && (byte & 0x40) != 0));
207 if (more) {
208 byte |= 0x80;
210 *p++ = byte;
211 } while (more);
213 return p;
216 /* Decode a signed leb128 sequence at *PP; increment *PP past the
217 decoded value. Return the decoded value. */
218 static target_long decode_sleb128(uint8_t **pp)
220 uint8_t *p = *pp;
221 target_long val = 0;
222 int byte, shift = 0;
224 do {
225 byte = *p++;
226 val |= (target_ulong)(byte & 0x7f) << shift;
227 shift += 7;
228 } while (byte & 0x80);
229 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
230 val |= -(target_ulong)1 << shift;
233 *pp = p;
234 return val;
237 /* Encode the data collected about the instructions while compiling TB.
238 Place the data at BLOCK, and return the number of bytes consumed.
240 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
241 which come from the target's insn_start data, followed by a uintptr_t
242 which comes from the host pc of the end of the code implementing the insn.
244 Each line of the table is encoded as sleb128 deltas from the previous
245 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
246 That is, the first column is seeded with the guest pc, the last column
247 with the host pc, and the middle columns with zeros. */
249 static int encode_search(TranslationBlock *tb, uint8_t *block)
251 uint8_t *highwater = tcg_ctx.code_gen_highwater;
252 uint8_t *p = block;
253 int i, j, n;
255 tb->tc_search = block;
257 for (i = 0, n = tb->icount; i < n; ++i) {
258 target_ulong prev;
260 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
261 if (i == 0) {
262 prev = (j == 0 ? tb->pc : 0);
263 } else {
264 prev = tcg_ctx.gen_insn_data[i - 1][j];
266 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
268 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
269 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
271 /* Test for (pending) buffer overflow. The assumption is that any
272 one row beginning below the high water mark cannot overrun
273 the buffer completely. Thus we can test for overflow after
274 encoding a row without having to check during encoding. */
275 if (unlikely(p > highwater)) {
276 return -1;
280 return p - block;
283 /* The cpu state corresponding to 'searched_pc' is restored.
284 * Called with tb_lock held.
286 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
287 uintptr_t searched_pc)
289 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
290 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
291 CPUArchState *env = cpu->env_ptr;
292 uint8_t *p = tb->tc_search;
293 int i, j, num_insns = tb->icount;
294 #ifdef CONFIG_PROFILER
295 int64_t ti = profile_getclock();
296 #endif
298 searched_pc -= GETPC_ADJ;
300 if (searched_pc < host_pc) {
301 return -1;
304 /* Reconstruct the stored insn data while looking for the point at
305 which the end of the insn exceeds the searched_pc. */
306 for (i = 0; i < num_insns; ++i) {
307 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
308 data[j] += decode_sleb128(&p);
310 host_pc += decode_sleb128(&p);
311 if (host_pc > searched_pc) {
312 goto found;
315 return -1;
317 found:
318 if (tb->cflags & CF_USE_ICOUNT) {
319 assert(use_icount);
320 /* Reset the cycle counter to the start of the block. */
321 cpu->icount_decr.u16.low += num_insns;
322 /* Clear the IO flag. */
323 cpu->can_do_io = 0;
325 cpu->icount_decr.u16.low -= i;
326 restore_state_to_opc(env, tb, data);
328 #ifdef CONFIG_PROFILER
329 tcg_ctx.restore_time += profile_getclock() - ti;
330 tcg_ctx.restore_count++;
331 #endif
332 return 0;
335 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
337 TranslationBlock *tb;
338 bool r = false;
340 /* A retaddr of zero is invalid so we really shouldn't have ended
341 * up here. The target code has likely forgotten to check retaddr
342 * != 0 before attempting to restore state. We return early to
343 * avoid blowing up on a recursive tb_lock(). The target must have
344 * previously survived a failed cpu_restore_state because
345 * tb_find_pc(0) would have failed anyway. It still should be
346 * fixed though.
349 if (!retaddr) {
350 return r;
353 tb_lock();
354 tb = tb_find_pc(retaddr);
355 if (tb) {
356 cpu_restore_state_from_tb(cpu, tb, retaddr);
357 if (tb->cflags & CF_NOCACHE) {
358 /* one-shot translation, invalidate it immediately */
359 tb_phys_invalidate(tb, -1);
360 tb_free(tb);
362 r = true;
364 tb_unlock();
366 return r;
369 static void page_init(void)
371 page_size_init();
372 page_table_config_init();
374 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
376 #ifdef HAVE_KINFO_GETVMMAP
377 struct kinfo_vmentry *freep;
378 int i, cnt;
380 freep = kinfo_getvmmap(getpid(), &cnt);
381 if (freep) {
382 mmap_lock();
383 for (i = 0; i < cnt; i++) {
384 unsigned long startaddr, endaddr;
386 startaddr = freep[i].kve_start;
387 endaddr = freep[i].kve_end;
388 if (h2g_valid(startaddr)) {
389 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
391 if (h2g_valid(endaddr)) {
392 endaddr = h2g(endaddr);
393 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
394 } else {
395 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
396 endaddr = ~0ul;
397 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
398 #endif
402 free(freep);
403 mmap_unlock();
405 #else
406 FILE *f;
408 last_brk = (unsigned long)sbrk(0);
410 f = fopen("/compat/linux/proc/self/maps", "r");
411 if (f) {
412 mmap_lock();
414 do {
415 unsigned long startaddr, endaddr;
416 int n;
418 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
420 if (n == 2 && h2g_valid(startaddr)) {
421 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
423 if (h2g_valid(endaddr)) {
424 endaddr = h2g(endaddr);
425 } else {
426 endaddr = ~0ul;
428 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
430 } while (!feof(f));
432 fclose(f);
433 mmap_unlock();
435 #endif
437 #endif
440 /* If alloc=1:
441 * Called with tb_lock held for system emulation.
442 * Called with mmap_lock held for user-mode emulation.
444 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
446 PageDesc *pd;
447 void **lp;
448 int i;
450 if (alloc) {
451 assert_memory_lock();
454 /* Level 1. Always allocated. */
455 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
457 /* Level 2..N-1. */
458 for (i = v_l2_levels; i > 0; i--) {
459 void **p = atomic_rcu_read(lp);
461 if (p == NULL) {
462 if (!alloc) {
463 return NULL;
465 p = g_new0(void *, V_L2_SIZE);
466 atomic_rcu_set(lp, p);
469 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
472 pd = atomic_rcu_read(lp);
473 if (pd == NULL) {
474 if (!alloc) {
475 return NULL;
477 pd = g_new0(PageDesc, V_L2_SIZE);
478 atomic_rcu_set(lp, pd);
481 return pd + (index & (V_L2_SIZE - 1));
484 static inline PageDesc *page_find(tb_page_addr_t index)
486 return page_find_alloc(index, 0);
489 #if defined(CONFIG_USER_ONLY)
490 /* Currently it is not recommended to allocate big chunks of data in
491 user mode. It will change when a dedicated libc will be used. */
492 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
493 region in which the guest needs to run. Revisit this. */
494 #define USE_STATIC_CODE_GEN_BUFFER
495 #endif
497 /* Minimum size of the code gen buffer. This number is randomly chosen,
498 but not so small that we can't have a fair number of TB's live. */
499 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
501 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
502 indicated, this is constrained by the range of direct branches on the
503 host cpu, as used by the TCG implementation of goto_tb. */
504 #if defined(__x86_64__)
505 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
506 #elif defined(__sparc__)
507 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
508 #elif defined(__powerpc64__)
509 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
510 #elif defined(__powerpc__)
511 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
512 #elif defined(__aarch64__)
513 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
514 #elif defined(__s390x__)
515 /* We have a +- 4GB range on the branches; leave some slop. */
516 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
517 #elif defined(__mips__)
518 /* We have a 256MB branch region, but leave room to make sure the
519 main executable is also within that region. */
520 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
521 #else
522 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
523 #endif
525 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
527 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
528 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
529 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
531 static inline size_t size_code_gen_buffer(size_t tb_size)
533 /* Size the buffer. */
534 if (tb_size == 0) {
535 #ifdef USE_STATIC_CODE_GEN_BUFFER
536 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
537 #else
538 /* ??? Needs adjustments. */
539 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
540 static buffer, we could size this on RESERVED_VA, on the text
541 segment size of the executable, or continue to use the default. */
542 tb_size = (unsigned long)(ram_size / 4);
543 #endif
545 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
546 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
548 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
549 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
551 return tb_size;
554 #ifdef __mips__
555 /* In order to use J and JAL within the code_gen_buffer, we require
556 that the buffer not cross a 256MB boundary. */
557 static inline bool cross_256mb(void *addr, size_t size)
559 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
562 /* We weren't able to allocate a buffer without crossing that boundary,
563 so make do with the larger portion of the buffer that doesn't cross.
564 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
565 static inline void *split_cross_256mb(void *buf1, size_t size1)
567 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
568 size_t size2 = buf1 + size1 - buf2;
570 size1 = buf2 - buf1;
571 if (size1 < size2) {
572 size1 = size2;
573 buf1 = buf2;
576 tcg_ctx.code_gen_buffer_size = size1;
577 return buf1;
579 #endif
581 #ifdef USE_STATIC_CODE_GEN_BUFFER
582 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
583 __attribute__((aligned(CODE_GEN_ALIGN)));
585 # ifdef _WIN32
586 static inline void do_protect(void *addr, long size, int prot)
588 DWORD old_protect;
589 VirtualProtect(addr, size, prot, &old_protect);
592 static inline void map_exec(void *addr, long size)
594 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
597 static inline void map_none(void *addr, long size)
599 do_protect(addr, size, PAGE_NOACCESS);
601 # else
602 static inline void do_protect(void *addr, long size, int prot)
604 uintptr_t start, end;
606 start = (uintptr_t)addr;
607 start &= qemu_real_host_page_mask;
609 end = (uintptr_t)addr + size;
610 end = ROUND_UP(end, qemu_real_host_page_size);
612 mprotect((void *)start, end - start, prot);
615 static inline void map_exec(void *addr, long size)
617 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
620 static inline void map_none(void *addr, long size)
622 do_protect(addr, size, PROT_NONE);
624 # endif /* WIN32 */
626 static inline void *alloc_code_gen_buffer(void)
628 void *buf = static_code_gen_buffer;
629 size_t full_size, size;
631 /* The size of the buffer, rounded down to end on a page boundary. */
632 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
633 & qemu_real_host_page_mask) - (uintptr_t)buf;
635 /* Reserve a guard page. */
636 size = full_size - qemu_real_host_page_size;
638 /* Honor a command-line option limiting the size of the buffer. */
639 if (size > tcg_ctx.code_gen_buffer_size) {
640 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
641 & qemu_real_host_page_mask) - (uintptr_t)buf;
643 tcg_ctx.code_gen_buffer_size = size;
645 #ifdef __mips__
646 if (cross_256mb(buf, size)) {
647 buf = split_cross_256mb(buf, size);
648 size = tcg_ctx.code_gen_buffer_size;
650 #endif
652 map_exec(buf, size);
653 map_none(buf + size, qemu_real_host_page_size);
654 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
656 return buf;
658 #elif defined(_WIN32)
659 static inline void *alloc_code_gen_buffer(void)
661 size_t size = tcg_ctx.code_gen_buffer_size;
662 void *buf1, *buf2;
664 /* Perform the allocation in two steps, so that the guard page
665 is reserved but uncommitted. */
666 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
667 MEM_RESERVE, PAGE_NOACCESS);
668 if (buf1 != NULL) {
669 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
670 assert(buf1 == buf2);
673 return buf1;
675 #else
676 static inline void *alloc_code_gen_buffer(void)
678 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
679 uintptr_t start = 0;
680 size_t size = tcg_ctx.code_gen_buffer_size;
681 void *buf;
683 /* Constrain the position of the buffer based on the host cpu.
684 Note that these addresses are chosen in concert with the
685 addresses assigned in the relevant linker script file. */
686 # if defined(__PIE__) || defined(__PIC__)
687 /* Don't bother setting a preferred location if we're building
688 a position-independent executable. We're more likely to get
689 an address near the main executable if we let the kernel
690 choose the address. */
691 # elif defined(__x86_64__) && defined(MAP_32BIT)
692 /* Force the memory down into low memory with the executable.
693 Leave the choice of exact location with the kernel. */
694 flags |= MAP_32BIT;
695 /* Cannot expect to map more than 800MB in low memory. */
696 if (size > 800u * 1024 * 1024) {
697 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
699 # elif defined(__sparc__)
700 start = 0x40000000ul;
701 # elif defined(__s390x__)
702 start = 0x90000000ul;
703 # elif defined(__mips__)
704 # if _MIPS_SIM == _ABI64
705 start = 0x128000000ul;
706 # else
707 start = 0x08000000ul;
708 # endif
709 # endif
711 buf = mmap((void *)start, size + qemu_real_host_page_size,
712 PROT_NONE, flags, -1, 0);
713 if (buf == MAP_FAILED) {
714 return NULL;
717 #ifdef __mips__
718 if (cross_256mb(buf, size)) {
719 /* Try again, with the original still mapped, to avoid re-acquiring
720 that 256mb crossing. This time don't specify an address. */
721 size_t size2;
722 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
723 PROT_NONE, flags, -1, 0);
724 switch ((int)(buf2 != MAP_FAILED)) {
725 case 1:
726 if (!cross_256mb(buf2, size)) {
727 /* Success! Use the new buffer. */
728 munmap(buf, size + qemu_real_host_page_size);
729 break;
731 /* Failure. Work with what we had. */
732 munmap(buf2, size + qemu_real_host_page_size);
733 /* fallthru */
734 default:
735 /* Split the original buffer. Free the smaller half. */
736 buf2 = split_cross_256mb(buf, size);
737 size2 = tcg_ctx.code_gen_buffer_size;
738 if (buf == buf2) {
739 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
740 } else {
741 munmap(buf, size - size2);
743 size = size2;
744 break;
746 buf = buf2;
748 #endif
750 /* Make the final buffer accessible. The guard page at the end
751 will remain inaccessible with PROT_NONE. */
752 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
754 /* Request large pages for the buffer. */
755 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
757 return buf;
759 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
761 static inline void code_gen_alloc(size_t tb_size)
763 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
764 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
765 if (tcg_ctx.code_gen_buffer == NULL) {
766 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
767 exit(1);
770 /* size this conservatively -- realloc later if needed */
771 tcg_ctx.tb_ctx.tbs_size =
772 tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8;
773 if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) {
774 tcg_ctx.tb_ctx.tbs_size = 64 * 1024;
776 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size);
778 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
781 static void tb_htable_init(void)
783 unsigned int mode = QHT_MODE_AUTO_RESIZE;
785 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
788 /* Must be called before using the QEMU cpus. 'tb_size' is the size
789 (in bytes) allocated to the translation buffer. Zero means default
790 size. */
791 void tcg_exec_init(unsigned long tb_size)
793 tcg_allowed = true;
794 cpu_gen_init();
795 page_init();
796 tb_htable_init();
797 code_gen_alloc(tb_size);
798 #if defined(CONFIG_SOFTMMU)
799 /* There's no guest base to take into account, so go ahead and
800 initialize the prologue now. */
801 tcg_prologue_init(&tcg_ctx);
802 #endif
806 * Allocate a new translation block. Flush the translation buffer if
807 * too many translation blocks or too much generated code.
809 * Called with tb_lock held.
811 static TranslationBlock *tb_alloc(target_ulong pc)
813 TranslationBlock *tb;
814 TBContext *ctx;
816 assert_tb_locked();
818 tb = tcg_tb_alloc(&tcg_ctx);
819 if (unlikely(tb == NULL)) {
820 return NULL;
822 ctx = &tcg_ctx.tb_ctx;
823 if (unlikely(ctx->nb_tbs == ctx->tbs_size)) {
824 ctx->tbs_size *= 2;
825 ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size);
827 ctx->tbs[ctx->nb_tbs++] = tb;
828 return tb;
831 /* Called with tb_lock held. */
832 void tb_free(TranslationBlock *tb)
834 assert_tb_locked();
836 /* In practice this is mostly used for single use temporary TB
837 Ignore the hard cases and just back up if this TB happens to
838 be the last one generated. */
839 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
840 tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
841 size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
843 tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size;
844 tcg_ctx.tb_ctx.nb_tbs--;
848 static inline void invalidate_page_bitmap(PageDesc *p)
850 #ifdef CONFIG_SOFTMMU
851 g_free(p->code_bitmap);
852 p->code_bitmap = NULL;
853 p->code_write_count = 0;
854 #endif
857 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
858 static void page_flush_tb_1(int level, void **lp)
860 int i;
862 if (*lp == NULL) {
863 return;
865 if (level == 0) {
866 PageDesc *pd = *lp;
868 for (i = 0; i < V_L2_SIZE; ++i) {
869 pd[i].first_tb = NULL;
870 invalidate_page_bitmap(pd + i);
872 } else {
873 void **pp = *lp;
875 for (i = 0; i < V_L2_SIZE; ++i) {
876 page_flush_tb_1(level - 1, pp + i);
881 static void page_flush_tb(void)
883 int i, l1_sz = v_l1_size;
885 for (i = 0; i < l1_sz; i++) {
886 page_flush_tb_1(v_l2_levels, l1_map + i);
890 /* flush all the translation blocks */
891 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
893 tb_lock();
895 /* If it is already been done on request of another CPU,
896 * just retry.
898 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
899 goto done;
902 #if defined(DEBUG_TB_FLUSH)
903 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
904 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
905 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
906 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
907 tcg_ctx.tb_ctx.nb_tbs : 0);
908 #endif
909 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
910 > tcg_ctx.code_gen_buffer_size) {
911 cpu_abort(cpu, "Internal error: code buffer overflow\n");
914 CPU_FOREACH(cpu) {
915 cpu_tb_jmp_cache_clear(cpu);
918 tcg_ctx.tb_ctx.nb_tbs = 0;
919 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
920 page_flush_tb();
922 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
923 /* XXX: flush processor icache at this point if cache flush is
924 expensive */
925 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
926 tcg_ctx.tb_ctx.tb_flush_count + 1);
928 done:
929 tb_unlock();
932 void tb_flush(CPUState *cpu)
934 if (tcg_enabled()) {
935 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
936 async_safe_run_on_cpu(cpu, do_tb_flush,
937 RUN_ON_CPU_HOST_INT(tb_flush_count));
941 #ifdef DEBUG_TB_CHECK
943 static void
944 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
946 TranslationBlock *tb = p;
947 target_ulong addr = *(target_ulong *)userp;
949 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
950 printf("ERROR invalidate: address=" TARGET_FMT_lx
951 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
955 /* verify that all the pages have correct rights for code
957 * Called with tb_lock held.
959 static void tb_invalidate_check(target_ulong address)
961 address &= TARGET_PAGE_MASK;
962 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
965 static void
966 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
968 TranslationBlock *tb = p;
969 int flags1, flags2;
971 flags1 = page_get_flags(tb->pc);
972 flags2 = page_get_flags(tb->pc + tb->size - 1);
973 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
974 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
975 (long)tb->pc, tb->size, flags1, flags2);
979 /* verify that all the pages have correct rights for code */
980 static void tb_page_check(void)
982 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
985 #endif
987 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
989 TranslationBlock *tb1;
990 unsigned int n1;
992 for (;;) {
993 tb1 = *ptb;
994 n1 = (uintptr_t)tb1 & 3;
995 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
996 if (tb1 == tb) {
997 *ptb = tb1->page_next[n1];
998 break;
1000 ptb = &tb1->page_next[n1];
1004 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1005 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1007 TranslationBlock *tb1;
1008 uintptr_t *ptb, ntb;
1009 unsigned int n1;
1011 ptb = &tb->jmp_list_next[n];
1012 if (*ptb) {
1013 /* find tb(n) in circular list */
1014 for (;;) {
1015 ntb = *ptb;
1016 n1 = ntb & 3;
1017 tb1 = (TranslationBlock *)(ntb & ~3);
1018 if (n1 == n && tb1 == tb) {
1019 break;
1021 if (n1 == 2) {
1022 ptb = &tb1->jmp_list_first;
1023 } else {
1024 ptb = &tb1->jmp_list_next[n1];
1027 /* now we can suppress tb(n) from the list */
1028 *ptb = tb->jmp_list_next[n];
1030 tb->jmp_list_next[n] = (uintptr_t)NULL;
1034 /* reset the jump entry 'n' of a TB so that it is not chained to
1035 another TB */
1036 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1038 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1039 tb_set_jmp_target(tb, n, addr);
1042 /* remove any jumps to the TB */
1043 static inline void tb_jmp_unlink(TranslationBlock *tb)
1045 TranslationBlock *tb1;
1046 uintptr_t *ptb, ntb;
1047 unsigned int n1;
1049 ptb = &tb->jmp_list_first;
1050 for (;;) {
1051 ntb = *ptb;
1052 n1 = ntb & 3;
1053 tb1 = (TranslationBlock *)(ntb & ~3);
1054 if (n1 == 2) {
1055 break;
1057 tb_reset_jump(tb1, n1);
1058 *ptb = tb1->jmp_list_next[n1];
1059 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1063 /* invalidate one TB
1065 * Called with tb_lock held.
1067 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1069 CPUState *cpu;
1070 PageDesc *p;
1071 uint32_t h;
1072 tb_page_addr_t phys_pc;
1074 assert_tb_locked();
1076 atomic_set(&tb->invalid, true);
1078 /* remove the TB from the hash list */
1079 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1080 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
1081 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1083 /* remove the TB from the page list */
1084 if (tb->page_addr[0] != page_addr) {
1085 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1086 tb_page_remove(&p->first_tb, tb);
1087 invalidate_page_bitmap(p);
1089 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1090 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1091 tb_page_remove(&p->first_tb, tb);
1092 invalidate_page_bitmap(p);
1095 /* remove the TB from the hash list */
1096 h = tb_jmp_cache_hash_func(tb->pc);
1097 CPU_FOREACH(cpu) {
1098 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1099 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1103 /* suppress this TB from the two jump lists */
1104 tb_remove_from_jmp_list(tb, 0);
1105 tb_remove_from_jmp_list(tb, 1);
1107 /* suppress any remaining jumps to this TB */
1108 tb_jmp_unlink(tb);
1110 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1113 #ifdef CONFIG_SOFTMMU
1114 static void build_page_bitmap(PageDesc *p)
1116 int n, tb_start, tb_end;
1117 TranslationBlock *tb;
1119 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1121 tb = p->first_tb;
1122 while (tb != NULL) {
1123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1125 /* NOTE: this is subtle as a TB may span two physical pages */
1126 if (n == 0) {
1127 /* NOTE: tb_end may be after the end of the page, but
1128 it is not a problem */
1129 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1130 tb_end = tb_start + tb->size;
1131 if (tb_end > TARGET_PAGE_SIZE) {
1132 tb_end = TARGET_PAGE_SIZE;
1134 } else {
1135 tb_start = 0;
1136 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1138 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1139 tb = tb->page_next[n];
1142 #endif
1144 /* add the tb in the target page and protect it if necessary
1146 * Called with mmap_lock held for user-mode emulation.
1148 static inline void tb_alloc_page(TranslationBlock *tb,
1149 unsigned int n, tb_page_addr_t page_addr)
1151 PageDesc *p;
1152 #ifndef CONFIG_USER_ONLY
1153 bool page_already_protected;
1154 #endif
1156 assert_memory_lock();
1158 tb->page_addr[n] = page_addr;
1159 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1160 tb->page_next[n] = p->first_tb;
1161 #ifndef CONFIG_USER_ONLY
1162 page_already_protected = p->first_tb != NULL;
1163 #endif
1164 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1165 invalidate_page_bitmap(p);
1167 #if defined(CONFIG_USER_ONLY)
1168 if (p->flags & PAGE_WRITE) {
1169 target_ulong addr;
1170 PageDesc *p2;
1171 int prot;
1173 /* force the host page as non writable (writes will have a
1174 page fault + mprotect overhead) */
1175 page_addr &= qemu_host_page_mask;
1176 prot = 0;
1177 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1178 addr += TARGET_PAGE_SIZE) {
1180 p2 = page_find(addr >> TARGET_PAGE_BITS);
1181 if (!p2) {
1182 continue;
1184 prot |= p2->flags;
1185 p2->flags &= ~PAGE_WRITE;
1187 mprotect(g2h(page_addr), qemu_host_page_size,
1188 (prot & PAGE_BITS) & ~PAGE_WRITE);
1189 #ifdef DEBUG_TB_INVALIDATE
1190 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1191 page_addr);
1192 #endif
1194 #else
1195 /* if some code is already present, then the pages are already
1196 protected. So we handle the case where only the first TB is
1197 allocated in a physical page */
1198 if (!page_already_protected) {
1199 tlb_protect_code(page_addr);
1201 #endif
1204 /* add a new TB and link it to the physical page tables. phys_page2 is
1205 * (-1) to indicate that only one page contains the TB.
1207 * Called with mmap_lock held for user-mode emulation.
1209 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1210 tb_page_addr_t phys_page2)
1212 uint32_t h;
1214 assert_memory_lock();
1216 /* add in the page list */
1217 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1218 if (phys_page2 != -1) {
1219 tb_alloc_page(tb, 1, phys_page2);
1220 } else {
1221 tb->page_addr[1] = -1;
1224 /* add in the hash table */
1225 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
1226 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1228 #ifdef DEBUG_TB_CHECK
1229 tb_page_check();
1230 #endif
1233 /* Called with mmap_lock held for user mode emulation. */
1234 TranslationBlock *tb_gen_code(CPUState *cpu,
1235 target_ulong pc, target_ulong cs_base,
1236 uint32_t flags, int cflags)
1238 CPUArchState *env = cpu->env_ptr;
1239 TranslationBlock *tb;
1240 tb_page_addr_t phys_pc, phys_page2;
1241 target_ulong virt_page2;
1242 tcg_insn_unit *gen_code_buf;
1243 int gen_code_size, search_size;
1244 #ifdef CONFIG_PROFILER
1245 int64_t ti;
1246 #endif
1247 assert_memory_lock();
1249 phys_pc = get_page_addr_code(env, pc);
1250 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1251 cflags |= CF_USE_ICOUNT;
1254 tb = tb_alloc(pc);
1255 if (unlikely(!tb)) {
1256 buffer_overflow:
1257 /* flush must be done */
1258 tb_flush(cpu);
1259 mmap_unlock();
1260 /* Make the execution loop process the flush as soon as possible. */
1261 cpu->exception_index = EXCP_INTERRUPT;
1262 cpu_loop_exit(cpu);
1265 gen_code_buf = tcg_ctx.code_gen_ptr;
1266 tb->tc_ptr = gen_code_buf;
1267 tb->pc = pc;
1268 tb->cs_base = cs_base;
1269 tb->flags = flags;
1270 tb->cflags = cflags;
1271 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1272 tb->invalid = false;
1274 #ifdef CONFIG_PROFILER
1275 tcg_ctx.tb_count1++; /* includes aborted translations because of
1276 exceptions */
1277 ti = profile_getclock();
1278 #endif
1280 tcg_func_start(&tcg_ctx);
1282 tcg_ctx.cpu = ENV_GET_CPU(env);
1283 gen_intermediate_code(cpu, tb);
1284 tcg_ctx.cpu = NULL;
1286 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1288 /* generate machine code */
1289 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1290 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1291 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1292 #ifdef USE_DIRECT_JUMP
1293 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1294 tcg_ctx.tb_jmp_target_addr = NULL;
1295 #else
1296 tcg_ctx.tb_jmp_insn_offset = NULL;
1297 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1298 #endif
1300 #ifdef CONFIG_PROFILER
1301 tcg_ctx.tb_count++;
1302 tcg_ctx.interm_time += profile_getclock() - ti;
1303 tcg_ctx.code_time -= profile_getclock();
1304 #endif
1306 /* ??? Overflow could be handled better here. In particular, we
1307 don't need to re-do gen_intermediate_code, nor should we re-do
1308 the tcg optimization currently hidden inside tcg_gen_code. All
1309 that should be required is to flush the TBs, allocate a new TB,
1310 re-initialize it per above, and re-do the actual code generation. */
1311 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1312 if (unlikely(gen_code_size < 0)) {
1313 goto buffer_overflow;
1315 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1316 if (unlikely(search_size < 0)) {
1317 goto buffer_overflow;
1320 #ifdef CONFIG_PROFILER
1321 tcg_ctx.code_time += profile_getclock();
1322 tcg_ctx.code_in_len += tb->size;
1323 tcg_ctx.code_out_len += gen_code_size;
1324 tcg_ctx.search_out_len += search_size;
1325 #endif
1327 #ifdef DEBUG_DISAS
1328 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1329 qemu_log_in_addr_range(tb->pc)) {
1330 qemu_log_lock();
1331 qemu_log("OUT: [size=%d]\n", gen_code_size);
1332 log_disas(tb->tc_ptr, gen_code_size);
1333 qemu_log("\n");
1334 qemu_log_flush();
1335 qemu_log_unlock();
1337 #endif
1339 tcg_ctx.code_gen_ptr = (void *)
1340 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1341 CODE_GEN_ALIGN);
1343 /* init jump list */
1344 assert(((uintptr_t)tb & 3) == 0);
1345 tb->jmp_list_first = (uintptr_t)tb | 2;
1346 tb->jmp_list_next[0] = (uintptr_t)NULL;
1347 tb->jmp_list_next[1] = (uintptr_t)NULL;
1349 /* init original jump addresses wich has been set during tcg_gen_code() */
1350 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1351 tb_reset_jump(tb, 0);
1353 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1354 tb_reset_jump(tb, 1);
1357 /* check next page if needed */
1358 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1359 phys_page2 = -1;
1360 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1361 phys_page2 = get_page_addr_code(env, virt_page2);
1363 /* As long as consistency of the TB stuff is provided by tb_lock in user
1364 * mode and is implicit in single-threaded softmmu emulation, no explicit
1365 * memory barrier is required before tb_link_page() makes the TB visible
1366 * through the physical hash table and physical page list.
1368 tb_link_page(tb, phys_pc, phys_page2);
1369 return tb;
1373 * Invalidate all TBs which intersect with the target physical address range
1374 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1375 * 'is_cpu_write_access' should be true if called from a real cpu write
1376 * access: the virtual CPU will exit the current TB if code is modified inside
1377 * this TB.
1379 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1380 * Called with tb_lock held for system-mode emulation
1382 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1384 while (start < end) {
1385 tb_invalidate_phys_page_range(start, end, 0);
1386 start &= TARGET_PAGE_MASK;
1387 start += TARGET_PAGE_SIZE;
1391 #ifdef CONFIG_SOFTMMU
1392 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1394 assert_tb_locked();
1395 tb_invalidate_phys_range_1(start, end);
1397 #else
1398 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1400 assert_memory_lock();
1401 tb_lock();
1402 tb_invalidate_phys_range_1(start, end);
1403 tb_unlock();
1405 #endif
1407 * Invalidate all TBs which intersect with the target physical address range
1408 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1409 * 'is_cpu_write_access' should be true if called from a real cpu write
1410 * access: the virtual CPU will exit the current TB if code is modified inside
1411 * this TB.
1413 * Called with tb_lock/mmap_lock held for user-mode emulation
1414 * Called with tb_lock held for system-mode emulation
1416 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1417 int is_cpu_write_access)
1419 TranslationBlock *tb, *tb_next;
1420 #if defined(TARGET_HAS_PRECISE_SMC)
1421 CPUState *cpu = current_cpu;
1422 CPUArchState *env = NULL;
1423 #endif
1424 tb_page_addr_t tb_start, tb_end;
1425 PageDesc *p;
1426 int n;
1427 #ifdef TARGET_HAS_PRECISE_SMC
1428 int current_tb_not_found = is_cpu_write_access;
1429 TranslationBlock *current_tb = NULL;
1430 int current_tb_modified = 0;
1431 target_ulong current_pc = 0;
1432 target_ulong current_cs_base = 0;
1433 uint32_t current_flags = 0;
1434 #endif /* TARGET_HAS_PRECISE_SMC */
1436 assert_memory_lock();
1437 assert_tb_locked();
1439 p = page_find(start >> TARGET_PAGE_BITS);
1440 if (!p) {
1441 return;
1443 #if defined(TARGET_HAS_PRECISE_SMC)
1444 if (cpu != NULL) {
1445 env = cpu->env_ptr;
1447 #endif
1449 /* we remove all the TBs in the range [start, end[ */
1450 /* XXX: see if in some cases it could be faster to invalidate all
1451 the code */
1452 tb = p->first_tb;
1453 while (tb != NULL) {
1454 n = (uintptr_t)tb & 3;
1455 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1456 tb_next = tb->page_next[n];
1457 /* NOTE: this is subtle as a TB may span two physical pages */
1458 if (n == 0) {
1459 /* NOTE: tb_end may be after the end of the page, but
1460 it is not a problem */
1461 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1462 tb_end = tb_start + tb->size;
1463 } else {
1464 tb_start = tb->page_addr[1];
1465 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1467 if (!(tb_end <= start || tb_start >= end)) {
1468 #ifdef TARGET_HAS_PRECISE_SMC
1469 if (current_tb_not_found) {
1470 current_tb_not_found = 0;
1471 current_tb = NULL;
1472 if (cpu->mem_io_pc) {
1473 /* now we have a real cpu fault */
1474 current_tb = tb_find_pc(cpu->mem_io_pc);
1477 if (current_tb == tb &&
1478 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1479 /* If we are modifying the current TB, we must stop
1480 its execution. We could be more precise by checking
1481 that the modification is after the current PC, but it
1482 would require a specialized function to partially
1483 restore the CPU state */
1485 current_tb_modified = 1;
1486 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1487 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1488 &current_flags);
1490 #endif /* TARGET_HAS_PRECISE_SMC */
1491 tb_phys_invalidate(tb, -1);
1493 tb = tb_next;
1495 #if !defined(CONFIG_USER_ONLY)
1496 /* if no code remaining, no need to continue to use slow writes */
1497 if (!p->first_tb) {
1498 invalidate_page_bitmap(p);
1499 tlb_unprotect_code(start);
1501 #endif
1502 #ifdef TARGET_HAS_PRECISE_SMC
1503 if (current_tb_modified) {
1504 /* we generate a block containing just the instruction
1505 modifying the memory. It will ensure that it cannot modify
1506 itself */
1507 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1508 cpu_loop_exit_noexc(cpu);
1510 #endif
1513 #ifdef CONFIG_SOFTMMU
1514 /* len must be <= 8 and start must be a multiple of len.
1515 * Called via softmmu_template.h when code areas are written to with
1516 * iothread mutex not held.
1518 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1520 PageDesc *p;
1522 #if 0
1523 if (1) {
1524 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1525 cpu_single_env->mem_io_vaddr, len,
1526 cpu_single_env->eip,
1527 cpu_single_env->eip +
1528 (intptr_t)cpu_single_env->segs[R_CS].base);
1530 #endif
1531 assert_memory_lock();
1533 p = page_find(start >> TARGET_PAGE_BITS);
1534 if (!p) {
1535 return;
1537 if (!p->code_bitmap &&
1538 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1539 /* build code bitmap. FIXME: writes should be protected by
1540 * tb_lock, reads by tb_lock or RCU.
1542 build_page_bitmap(p);
1544 if (p->code_bitmap) {
1545 unsigned int nr;
1546 unsigned long b;
1548 nr = start & ~TARGET_PAGE_MASK;
1549 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1550 if (b & ((1 << len) - 1)) {
1551 goto do_invalidate;
1553 } else {
1554 do_invalidate:
1555 tb_invalidate_phys_page_range(start, start + len, 1);
1558 #else
1559 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1560 * host PC of the faulting store instruction that caused this invalidate.
1561 * Returns true if the caller needs to abort execution of the current
1562 * TB (because it was modified by this store and the guest CPU has
1563 * precise-SMC semantics).
1565 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1567 TranslationBlock *tb;
1568 PageDesc *p;
1569 int n;
1570 #ifdef TARGET_HAS_PRECISE_SMC
1571 TranslationBlock *current_tb = NULL;
1572 CPUState *cpu = current_cpu;
1573 CPUArchState *env = NULL;
1574 int current_tb_modified = 0;
1575 target_ulong current_pc = 0;
1576 target_ulong current_cs_base = 0;
1577 uint32_t current_flags = 0;
1578 #endif
1580 assert_memory_lock();
1582 addr &= TARGET_PAGE_MASK;
1583 p = page_find(addr >> TARGET_PAGE_BITS);
1584 if (!p) {
1585 return false;
1588 tb_lock();
1589 tb = p->first_tb;
1590 #ifdef TARGET_HAS_PRECISE_SMC
1591 if (tb && pc != 0) {
1592 current_tb = tb_find_pc(pc);
1594 if (cpu != NULL) {
1595 env = cpu->env_ptr;
1597 #endif
1598 while (tb != NULL) {
1599 n = (uintptr_t)tb & 3;
1600 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1601 #ifdef TARGET_HAS_PRECISE_SMC
1602 if (current_tb == tb &&
1603 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1604 /* If we are modifying the current TB, we must stop
1605 its execution. We could be more precise by checking
1606 that the modification is after the current PC, but it
1607 would require a specialized function to partially
1608 restore the CPU state */
1610 current_tb_modified = 1;
1611 cpu_restore_state_from_tb(cpu, current_tb, pc);
1612 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1613 &current_flags);
1615 #endif /* TARGET_HAS_PRECISE_SMC */
1616 tb_phys_invalidate(tb, addr);
1617 tb = tb->page_next[n];
1619 p->first_tb = NULL;
1620 #ifdef TARGET_HAS_PRECISE_SMC
1621 if (current_tb_modified) {
1622 /* we generate a block containing just the instruction
1623 modifying the memory. It will ensure that it cannot modify
1624 itself */
1625 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1626 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1627 * back into the cpu_exec loop. */
1628 return true;
1630 #endif
1631 tb_unlock();
1633 return false;
1635 #endif
1637 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1638 tb[1].tc_ptr. Return NULL if not found */
1639 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1641 int m_min, m_max, m;
1642 uintptr_t v;
1643 TranslationBlock *tb;
1645 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1646 return NULL;
1648 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1649 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1650 return NULL;
1652 /* binary search (cf Knuth) */
1653 m_min = 0;
1654 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1655 while (m_min <= m_max) {
1656 m = (m_min + m_max) >> 1;
1657 tb = tcg_ctx.tb_ctx.tbs[m];
1658 v = (uintptr_t)tb->tc_ptr;
1659 if (v == tc_ptr) {
1660 return tb;
1661 } else if (tc_ptr < v) {
1662 m_max = m - 1;
1663 } else {
1664 m_min = m + 1;
1667 return tcg_ctx.tb_ctx.tbs[m_max];
1670 #if !defined(CONFIG_USER_ONLY)
1671 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1673 ram_addr_t ram_addr;
1674 MemoryRegion *mr;
1675 hwaddr l = 1;
1677 rcu_read_lock();
1678 mr = address_space_translate(as, addr, &addr, &l, false);
1679 if (!(memory_region_is_ram(mr)
1680 || memory_region_is_romd(mr))) {
1681 rcu_read_unlock();
1682 return;
1684 ram_addr = memory_region_get_ram_addr(mr) + addr;
1685 tb_lock();
1686 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1687 tb_unlock();
1688 rcu_read_unlock();
1690 #endif /* !defined(CONFIG_USER_ONLY) */
1692 /* Called with tb_lock held. */
1693 void tb_check_watchpoint(CPUState *cpu)
1695 TranslationBlock *tb;
1697 tb = tb_find_pc(cpu->mem_io_pc);
1698 if (tb) {
1699 /* We can use retranslation to find the PC. */
1700 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1701 tb_phys_invalidate(tb, -1);
1702 } else {
1703 /* The exception probably happened in a helper. The CPU state should
1704 have been saved before calling it. Fetch the PC from there. */
1705 CPUArchState *env = cpu->env_ptr;
1706 target_ulong pc, cs_base;
1707 tb_page_addr_t addr;
1708 uint32_t flags;
1710 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1711 addr = get_page_addr_code(env, pc);
1712 tb_invalidate_phys_range(addr, addr + 1);
1716 #ifndef CONFIG_USER_ONLY
1717 /* in deterministic execution mode, instructions doing device I/Os
1718 * must be at the end of the TB.
1720 * Called by softmmu_template.h, with iothread mutex not held.
1722 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1724 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1725 CPUArchState *env = cpu->env_ptr;
1726 #endif
1727 TranslationBlock *tb;
1728 uint32_t n, cflags;
1729 target_ulong pc, cs_base;
1730 uint32_t flags;
1732 tb_lock();
1733 tb = tb_find_pc(retaddr);
1734 if (!tb) {
1735 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1736 (void *)retaddr);
1738 n = cpu->icount_decr.u16.low + tb->icount;
1739 cpu_restore_state_from_tb(cpu, tb, retaddr);
1740 /* Calculate how many instructions had been executed before the fault
1741 occurred. */
1742 n = n - cpu->icount_decr.u16.low;
1743 /* Generate a new TB ending on the I/O insn. */
1744 n++;
1745 /* On MIPS and SH, delay slot instructions can only be restarted if
1746 they were already the first instruction in the TB. If this is not
1747 the first instruction in a TB then re-execute the preceding
1748 branch. */
1749 #if defined(TARGET_MIPS)
1750 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1751 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1752 cpu->icount_decr.u16.low++;
1753 env->hflags &= ~MIPS_HFLAG_BMASK;
1755 #elif defined(TARGET_SH4)
1756 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1757 && n > 1) {
1758 env->pc -= 2;
1759 cpu->icount_decr.u16.low++;
1760 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1762 #endif
1763 /* This should never happen. */
1764 if (n > CF_COUNT_MASK) {
1765 cpu_abort(cpu, "TB too big during recompile");
1768 cflags = n | CF_LAST_IO;
1769 pc = tb->pc;
1770 cs_base = tb->cs_base;
1771 flags = tb->flags;
1772 tb_phys_invalidate(tb, -1);
1773 if (tb->cflags & CF_NOCACHE) {
1774 if (tb->orig_tb) {
1775 /* Invalidate original TB if this TB was generated in
1776 * cpu_exec_nocache() */
1777 tb_phys_invalidate(tb->orig_tb, -1);
1779 tb_free(tb);
1781 /* FIXME: In theory this could raise an exception. In practice
1782 we have already translated the block once so it's probably ok. */
1783 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1785 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1786 * the first in the TB) then we end up generating a whole new TB and
1787 * repeating the fault, which is horribly inefficient.
1788 * Better would be to execute just this insn uncached, or generate a
1789 * second new TB.
1791 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1792 * tb_lock gets reset.
1794 cpu_loop_exit_noexc(cpu);
1797 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1799 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1801 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1802 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1806 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1808 /* Discard jump cache entries for any tb which might potentially
1809 overlap the flushed page. */
1810 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1811 tb_jmp_cache_clear_page(cpu, addr);
1814 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1815 struct qht_stats hst)
1817 uint32_t hgram_opts;
1818 size_t hgram_bins;
1819 char *hgram;
1821 if (!hst.head_buckets) {
1822 return;
1824 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1825 hst.used_head_buckets, hst.head_buckets,
1826 (double)hst.used_head_buckets / hst.head_buckets * 100);
1828 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1829 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1830 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1831 hgram_opts |= QDIST_PR_NODECIMAL;
1833 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1834 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1835 qdist_avg(&hst.occupancy) * 100, hgram);
1836 g_free(hgram);
1838 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1839 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1840 if (hgram_bins > 10) {
1841 hgram_bins = 10;
1842 } else {
1843 hgram_bins = 0;
1844 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1846 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1847 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1848 qdist_avg(&hst.chain), hgram);
1849 g_free(hgram);
1852 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1854 int i, target_code_size, max_target_code_size;
1855 int direct_jmp_count, direct_jmp2_count, cross_page;
1856 TranslationBlock *tb;
1857 struct qht_stats hst;
1859 tb_lock();
1861 target_code_size = 0;
1862 max_target_code_size = 0;
1863 cross_page = 0;
1864 direct_jmp_count = 0;
1865 direct_jmp2_count = 0;
1866 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1867 tb = tcg_ctx.tb_ctx.tbs[i];
1868 target_code_size += tb->size;
1869 if (tb->size > max_target_code_size) {
1870 max_target_code_size = tb->size;
1872 if (tb->page_addr[1] != -1) {
1873 cross_page++;
1875 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1876 direct_jmp_count++;
1877 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1878 direct_jmp2_count++;
1882 /* XXX: avoid using doubles ? */
1883 cpu_fprintf(f, "Translation buffer state:\n");
1884 cpu_fprintf(f, "gen code size %td/%zd\n",
1885 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1886 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1887 cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs);
1888 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1889 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1890 tcg_ctx.tb_ctx.nb_tbs : 0,
1891 max_target_code_size);
1892 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1893 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1894 tcg_ctx.code_gen_buffer) /
1895 tcg_ctx.tb_ctx.nb_tbs : 0,
1896 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1897 tcg_ctx.code_gen_buffer) /
1898 target_code_size : 0);
1899 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1900 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1901 tcg_ctx.tb_ctx.nb_tbs : 0);
1902 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1903 direct_jmp_count,
1904 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1905 tcg_ctx.tb_ctx.nb_tbs : 0,
1906 direct_jmp2_count,
1907 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1908 tcg_ctx.tb_ctx.nb_tbs : 0);
1910 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1911 print_qht_statistics(f, cpu_fprintf, hst);
1912 qht_statistics_destroy(&hst);
1914 cpu_fprintf(f, "\nStatistics:\n");
1915 cpu_fprintf(f, "TB flush count %u\n",
1916 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1917 cpu_fprintf(f, "TB invalidate count %d\n",
1918 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1919 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1920 tcg_dump_info(f, cpu_fprintf);
1922 tb_unlock();
1925 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1927 tcg_dump_op_count(f, cpu_fprintf);
1930 #else /* CONFIG_USER_ONLY */
1932 void cpu_interrupt(CPUState *cpu, int mask)
1934 g_assert(qemu_mutex_iothread_locked());
1935 cpu->interrupt_request |= mask;
1936 cpu->icount_decr.u16.high = -1;
1940 * Walks guest process memory "regions" one by one
1941 * and calls callback function 'fn' for each region.
1943 struct walk_memory_regions_data {
1944 walk_memory_regions_fn fn;
1945 void *priv;
1946 target_ulong start;
1947 int prot;
1950 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1951 target_ulong end, int new_prot)
1953 if (data->start != -1u) {
1954 int rc = data->fn(data->priv, data->start, end, data->prot);
1955 if (rc != 0) {
1956 return rc;
1960 data->start = (new_prot ? end : -1u);
1961 data->prot = new_prot;
1963 return 0;
1966 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1967 target_ulong base, int level, void **lp)
1969 target_ulong pa;
1970 int i, rc;
1972 if (*lp == NULL) {
1973 return walk_memory_regions_end(data, base, 0);
1976 if (level == 0) {
1977 PageDesc *pd = *lp;
1979 for (i = 0; i < V_L2_SIZE; ++i) {
1980 int prot = pd[i].flags;
1982 pa = base | (i << TARGET_PAGE_BITS);
1983 if (prot != data->prot) {
1984 rc = walk_memory_regions_end(data, pa, prot);
1985 if (rc != 0) {
1986 return rc;
1990 } else {
1991 void **pp = *lp;
1993 for (i = 0; i < V_L2_SIZE; ++i) {
1994 pa = base | ((target_ulong)i <<
1995 (TARGET_PAGE_BITS + V_L2_BITS * level));
1996 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1997 if (rc != 0) {
1998 return rc;
2003 return 0;
2006 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2008 struct walk_memory_regions_data data;
2009 uintptr_t i, l1_sz = v_l1_size;
2011 data.fn = fn;
2012 data.priv = priv;
2013 data.start = -1u;
2014 data.prot = 0;
2016 for (i = 0; i < l1_sz; i++) {
2017 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2018 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2019 if (rc != 0) {
2020 return rc;
2024 return walk_memory_regions_end(&data, 0, 0);
2027 static int dump_region(void *priv, target_ulong start,
2028 target_ulong end, unsigned long prot)
2030 FILE *f = (FILE *)priv;
2032 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2033 " "TARGET_FMT_lx" %c%c%c\n",
2034 start, end, end - start,
2035 ((prot & PAGE_READ) ? 'r' : '-'),
2036 ((prot & PAGE_WRITE) ? 'w' : '-'),
2037 ((prot & PAGE_EXEC) ? 'x' : '-'));
2039 return 0;
2042 /* dump memory mappings */
2043 void page_dump(FILE *f)
2045 const int length = sizeof(target_ulong) * 2;
2046 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2047 length, "start", length, "end", length, "size", "prot");
2048 walk_memory_regions(f, dump_region);
2051 int page_get_flags(target_ulong address)
2053 PageDesc *p;
2055 p = page_find(address >> TARGET_PAGE_BITS);
2056 if (!p) {
2057 return 0;
2059 return p->flags;
2062 /* Modify the flags of a page and invalidate the code if necessary.
2063 The flag PAGE_WRITE_ORG is positioned automatically depending
2064 on PAGE_WRITE. The mmap_lock should already be held. */
2065 void page_set_flags(target_ulong start, target_ulong end, int flags)
2067 target_ulong addr, len;
2069 /* This function should never be called with addresses outside the
2070 guest address space. If this assert fires, it probably indicates
2071 a missing call to h2g_valid. */
2072 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2073 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2074 #endif
2075 assert(start < end);
2076 assert_memory_lock();
2078 start = start & TARGET_PAGE_MASK;
2079 end = TARGET_PAGE_ALIGN(end);
2081 if (flags & PAGE_WRITE) {
2082 flags |= PAGE_WRITE_ORG;
2085 for (addr = start, len = end - start;
2086 len != 0;
2087 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2088 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2090 /* If the write protection bit is set, then we invalidate
2091 the code inside. */
2092 if (!(p->flags & PAGE_WRITE) &&
2093 (flags & PAGE_WRITE) &&
2094 p->first_tb) {
2095 tb_invalidate_phys_page(addr, 0);
2097 p->flags = flags;
2101 int page_check_range(target_ulong start, target_ulong len, int flags)
2103 PageDesc *p;
2104 target_ulong end;
2105 target_ulong addr;
2107 /* This function should never be called with addresses outside the
2108 guest address space. If this assert fires, it probably indicates
2109 a missing call to h2g_valid. */
2110 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2111 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2112 #endif
2114 if (len == 0) {
2115 return 0;
2117 if (start + len - 1 < start) {
2118 /* We've wrapped around. */
2119 return -1;
2122 /* must do before we loose bits in the next step */
2123 end = TARGET_PAGE_ALIGN(start + len);
2124 start = start & TARGET_PAGE_MASK;
2126 for (addr = start, len = end - start;
2127 len != 0;
2128 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2129 p = page_find(addr >> TARGET_PAGE_BITS);
2130 if (!p) {
2131 return -1;
2133 if (!(p->flags & PAGE_VALID)) {
2134 return -1;
2137 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2138 return -1;
2140 if (flags & PAGE_WRITE) {
2141 if (!(p->flags & PAGE_WRITE_ORG)) {
2142 return -1;
2144 /* unprotect the page if it was put read-only because it
2145 contains translated code */
2146 if (!(p->flags & PAGE_WRITE)) {
2147 if (!page_unprotect(addr, 0)) {
2148 return -1;
2153 return 0;
2156 /* called from signal handler: invalidate the code and unprotect the
2157 * page. Return 0 if the fault was not handled, 1 if it was handled,
2158 * and 2 if it was handled but the caller must cause the TB to be
2159 * immediately exited. (We can only return 2 if the 'pc' argument is
2160 * non-zero.)
2162 int page_unprotect(target_ulong address, uintptr_t pc)
2164 unsigned int prot;
2165 bool current_tb_invalidated;
2166 PageDesc *p;
2167 target_ulong host_start, host_end, addr;
2169 /* Technically this isn't safe inside a signal handler. However we
2170 know this only ever happens in a synchronous SEGV handler, so in
2171 practice it seems to be ok. */
2172 mmap_lock();
2174 p = page_find(address >> TARGET_PAGE_BITS);
2175 if (!p) {
2176 mmap_unlock();
2177 return 0;
2180 /* if the page was really writable, then we change its
2181 protection back to writable */
2182 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2183 host_start = address & qemu_host_page_mask;
2184 host_end = host_start + qemu_host_page_size;
2186 prot = 0;
2187 current_tb_invalidated = false;
2188 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2189 p = page_find(addr >> TARGET_PAGE_BITS);
2190 p->flags |= PAGE_WRITE;
2191 prot |= p->flags;
2193 /* and since the content will be modified, we must invalidate
2194 the corresponding translated code. */
2195 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2196 #ifdef DEBUG_TB_CHECK
2197 tb_invalidate_check(addr);
2198 #endif
2200 mprotect((void *)g2h(host_start), qemu_host_page_size,
2201 prot & PAGE_BITS);
2203 mmap_unlock();
2204 /* If current TB was invalidated return to main loop */
2205 return current_tb_invalidated ? 2 : 1;
2207 mmap_unlock();
2208 return 0;
2210 #endif /* CONFIG_USER_ONLY */
2212 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2213 void tcg_flush_softmmu_tlb(CPUState *cs)
2215 #ifdef CONFIG_SOFTMMU
2216 tlb_flush(cs);
2217 #endif