Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / accel / tcg / internal-target.h
blobfe109724c689074b0cf01711a5c20841cfa207b7
1 /*
2 * Internal execution defines for qemu (target specific)
4 * Copyright (c) 2003 Fabrice Bellard
6 * SPDX-License-Identifier: LGPL-2.1-or-later
7 */
9 #ifndef ACCEL_TCG_INTERNAL_TARGET_H
10 #define ACCEL_TCG_INTERNAL_TARGET_H
12 #include "exec/exec-all.h"
13 #include "exec/translate-all.h"
16 * Access to the various translations structures need to be serialised
17 * via locks for consistency. In user-mode emulation access to the
18 * memory related structures are protected with mmap_lock.
19 * In !user-mode we use per-page locks.
21 #ifdef CONFIG_USER_ONLY
22 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
23 #else
24 #define assert_memory_lock()
25 #endif
27 #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
28 void assert_no_pages_locked(void);
29 #else
30 static inline void assert_no_pages_locked(void) { }
31 #endif
33 #ifdef CONFIG_USER_ONLY
34 static inline void page_table_config_init(void) { }
35 #else
36 void page_table_config_init(void);
37 #endif
39 #ifdef CONFIG_USER_ONLY
41 * For user-only, page_protect sets the page read-only.
42 * Since most execution is already on read-only pages, and we'd need to
43 * account for other TBs on the same page, defer undoing any page protection
44 * until we receive the write fault.
46 static inline void tb_lock_page0(tb_page_addr_t p0)
48 page_protect(p0);
51 static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
53 page_protect(p1);
56 static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
57 static inline void tb_unlock_pages(TranslationBlock *tb) { }
58 #else
59 void tb_lock_page0(tb_page_addr_t);
60 void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
61 void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
62 void tb_unlock_pages(TranslationBlock *);
63 #endif
65 #ifdef CONFIG_SOFTMMU
66 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
67 unsigned size,
68 uintptr_t retaddr);
69 G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
70 #endif /* CONFIG_SOFTMMU */
72 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
74 /* Return the current PC from CPU, which may be cached in TB. */
75 static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
77 if (tb_cflags(tb) & CF_PCREL) {
78 return cpu->cc->get_pc(cpu);
79 } else {
80 return tb->pc;
84 /**
85 * tcg_req_mo:
86 * @type: TCGBar
88 * Filter @type to the barrier that is required for the guest
89 * memory ordering vs the host memory ordering. A non-zero
90 * result indicates that some barrier is required.
92 * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
93 * guest requires strict ordering.
95 * This is a macro so that it's constant even without optimization.
97 #ifdef TCG_GUEST_DEFAULT_MO
98 # define tcg_req_mo(type) \
99 ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
100 #else
101 # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
102 #endif
105 * cpu_req_mo:
106 * @type: TCGBar
108 * If tcg_req_mo indicates a barrier for @type is required
109 * for the guest memory model, issue a host memory barrier.
111 #define cpu_req_mo(type) \
112 do { \
113 if (tcg_req_mo(type)) { \
114 smp_mb(); \
116 } while (0)
118 #endif /* ACCEL_TCG_INTERNAL_H */