2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
31 #include "qemu/error-report.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
42 # define DEBUG_TLB_GATE 1
44 # define DEBUG_TLB_LOG_GATE 1
46 # define DEBUG_TLB_LOG_GATE 0
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
53 #define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
62 #define assert_cpu_is_self(cpu) do { \
63 if (DEBUG_TLB_GATE) { \
64 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69 * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong
) > sizeof(run_on_cpu_data
));
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
77 static inline size_t sizeof_tlb(CPUArchState
*env
, uintptr_t mmu_idx
)
79 return env_tlb(env
)->f
[mmu_idx
].mask
+ (1 << CPU_TLB_ENTRY_BITS
);
82 static void tlb_window_reset(CPUTLBDesc
*desc
, int64_t ns
,
85 desc
->window_begin_ns
= ns
;
86 desc
->window_max_entries
= max_entries
;
89 static void tlb_dyn_init(CPUArchState
*env
)
93 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
94 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[i
];
95 size_t n_entries
= 1 << CPU_TLB_DYN_DEFAULT_BITS
;
97 tlb_window_reset(desc
, get_clock_realtime(), 0);
98 desc
->n_used_entries
= 0;
99 env_tlb(env
)->f
[i
].mask
= (n_entries
- 1) << CPU_TLB_ENTRY_BITS
;
100 env_tlb(env
)->f
[i
].table
= g_new(CPUTLBEntry
, n_entries
);
101 env_tlb(env
)->d
[i
].iotlb
= g_new(CPUIOTLBEntry
, n_entries
);
106 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107 * @env: CPU that owns the TLB
108 * @mmu_idx: MMU index of the TLB
110 * Called with tlb_lock_held.
112 * We have two main constraints when resizing a TLB: (1) we only resize it
113 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114 * the array or unnecessarily flushing it), which means we do not control how
115 * frequently the resizing can occur; (2) we don't have access to the guest's
116 * future scheduling decisions, and therefore have to decide the magnitude of
117 * the resize based on past observations.
119 * In general, a memory-hungry process can benefit greatly from an appropriately
120 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121 * we just have to make the TLB as large as possible; while an oversized TLB
122 * results in minimal TLB miss rates, it also takes longer to be flushed
123 * (flushes can be _very_ frequent), and the reduced locality can also hurt
126 * To achieve near-optimal performance for all kinds of workloads, we:
128 * 1. Aggressively increase the size of the TLB when the use rate of the
129 * TLB being flushed is high, since it is likely that in the near future this
130 * memory-hungry process will execute again, and its memory hungriness will
131 * probably be similar.
133 * 2. Slowly reduce the size of the TLB as the use rate declines over a
134 * reasonably large time window. The rationale is that if in such a time window
135 * we have not observed a high TLB use rate, it is likely that we won't observe
136 * it in the near future. In that case, once a time window expires we downsize
137 * the TLB to match the maximum use rate observed in the window.
139 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140 * since in that range performance is likely near-optimal. Recall that the TLB
141 * is direct mapped, so we want the use rate to be low (or at least not too
142 * high), since otherwise we are likely to have a significant amount of
145 static void tlb_mmu_resize_locked(CPUArchState
*env
, int mmu_idx
)
147 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[mmu_idx
];
148 size_t old_size
= tlb_n_entries(env
, mmu_idx
);
150 size_t new_size
= old_size
;
151 int64_t now
= get_clock_realtime();
152 int64_t window_len_ms
= 100;
153 int64_t window_len_ns
= window_len_ms
* 1000 * 1000;
154 bool window_expired
= now
> desc
->window_begin_ns
+ window_len_ns
;
156 if (desc
->n_used_entries
> desc
->window_max_entries
) {
157 desc
->window_max_entries
= desc
->n_used_entries
;
159 rate
= desc
->window_max_entries
* 100 / old_size
;
162 new_size
= MIN(old_size
<< 1, 1 << CPU_TLB_DYN_MAX_BITS
);
163 } else if (rate
< 30 && window_expired
) {
164 size_t ceil
= pow2ceil(desc
->window_max_entries
);
165 size_t expected_rate
= desc
->window_max_entries
* 100 / ceil
;
168 * Avoid undersizing when the max number of entries seen is just below
169 * a pow2. For instance, if max_entries == 1025, the expected use rate
170 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172 * later. Thus, make sure that the expected use rate remains below 70%.
173 * (and since we double the size, that means the lowest rate we'd
174 * expect to get is 35%, which is still in the 30-70% range where
175 * we consider that the size is appropriate.)
177 if (expected_rate
> 70) {
180 new_size
= MAX(ceil
, 1 << CPU_TLB_DYN_MIN_BITS
);
183 if (new_size
== old_size
) {
184 if (window_expired
) {
185 tlb_window_reset(desc
, now
, desc
->n_used_entries
);
190 g_free(env_tlb(env
)->f
[mmu_idx
].table
);
191 g_free(env_tlb(env
)->d
[mmu_idx
].iotlb
);
193 tlb_window_reset(desc
, now
, 0);
194 /* desc->n_used_entries is cleared by the caller */
195 env_tlb(env
)->f
[mmu_idx
].mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
196 env_tlb(env
)->f
[mmu_idx
].table
= g_try_new(CPUTLBEntry
, new_size
);
197 env_tlb(env
)->d
[mmu_idx
].iotlb
= g_try_new(CPUIOTLBEntry
, new_size
);
199 * If the allocations fail, try smaller sizes. We just freed some
200 * memory, so going back to half of new_size has a good chance of working.
201 * Increased memory pressure elsewhere in the system might cause the
202 * allocations to fail though, so we progressively reduce the allocation
203 * size, aborting if we cannot even allocate the smallest TLB we support.
205 while (env_tlb(env
)->f
[mmu_idx
].table
== NULL
||
206 env_tlb(env
)->d
[mmu_idx
].iotlb
== NULL
) {
207 if (new_size
== (1 << CPU_TLB_DYN_MIN_BITS
)) {
208 error_report("%s: %s", __func__
, strerror(errno
));
211 new_size
= MAX(new_size
>> 1, 1 << CPU_TLB_DYN_MIN_BITS
);
212 env_tlb(env
)->f
[mmu_idx
].mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
214 g_free(env_tlb(env
)->f
[mmu_idx
].table
);
215 g_free(env_tlb(env
)->d
[mmu_idx
].iotlb
);
216 env_tlb(env
)->f
[mmu_idx
].table
= g_try_new(CPUTLBEntry
, new_size
);
217 env_tlb(env
)->d
[mmu_idx
].iotlb
= g_try_new(CPUIOTLBEntry
, new_size
);
221 static inline void tlb_table_flush_by_mmuidx(CPUArchState
*env
, int mmu_idx
)
223 tlb_mmu_resize_locked(env
, mmu_idx
);
224 memset(env_tlb(env
)->f
[mmu_idx
].table
, -1, sizeof_tlb(env
, mmu_idx
));
225 env_tlb(env
)->d
[mmu_idx
].n_used_entries
= 0;
228 static inline void tlb_n_used_entries_inc(CPUArchState
*env
, uintptr_t mmu_idx
)
230 env_tlb(env
)->d
[mmu_idx
].n_used_entries
++;
233 static inline void tlb_n_used_entries_dec(CPUArchState
*env
, uintptr_t mmu_idx
)
235 env_tlb(env
)->d
[mmu_idx
].n_used_entries
--;
238 void tlb_init(CPUState
*cpu
)
240 CPUArchState
*env
= cpu
->env_ptr
;
242 qemu_spin_init(&env_tlb(env
)->c
.lock
);
244 /* Ensure that cpu_reset performs a full flush. */
245 env_tlb(env
)->c
.dirty
= ALL_MMUIDX_BITS
;
250 /* flush_all_helper: run fn across all cpus
252 * If the wait flag is set then the src cpu's helper will be queued as
253 * "safe" work and the loop exited creating a synchronisation point
254 * where all queued work will be finished before execution starts
257 static void flush_all_helper(CPUState
*src
, run_on_cpu_func fn
,
264 async_run_on_cpu(cpu
, fn
, d
);
269 void tlb_flush_counts(size_t *pfull
, size_t *ppart
, size_t *pelide
)
272 size_t full
= 0, part
= 0, elide
= 0;
275 CPUArchState
*env
= cpu
->env_ptr
;
277 full
+= atomic_read(&env_tlb(env
)->c
.full_flush_count
);
278 part
+= atomic_read(&env_tlb(env
)->c
.part_flush_count
);
279 elide
+= atomic_read(&env_tlb(env
)->c
.elide_flush_count
);
286 static void tlb_flush_one_mmuidx_locked(CPUArchState
*env
, int mmu_idx
)
288 tlb_table_flush_by_mmuidx(env
, mmu_idx
);
289 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= -1;
290 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= -1;
291 env_tlb(env
)->d
[mmu_idx
].vindex
= 0;
292 memset(env_tlb(env
)->d
[mmu_idx
].vtable
, -1,
293 sizeof(env_tlb(env
)->d
[0].vtable
));
296 static void tlb_flush_by_mmuidx_async_work(CPUState
*cpu
, run_on_cpu_data data
)
298 CPUArchState
*env
= cpu
->env_ptr
;
299 uint16_t asked
= data
.host_int
;
300 uint16_t all_dirty
, work
, to_clean
;
302 assert_cpu_is_self(cpu
);
304 tlb_debug("mmu_idx:0x%04" PRIx16
"\n", asked
);
306 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
308 all_dirty
= env_tlb(env
)->c
.dirty
;
309 to_clean
= asked
& all_dirty
;
310 all_dirty
&= ~to_clean
;
311 env_tlb(env
)->c
.dirty
= all_dirty
;
313 for (work
= to_clean
; work
!= 0; work
&= work
- 1) {
314 int mmu_idx
= ctz32(work
);
315 tlb_flush_one_mmuidx_locked(env
, mmu_idx
);
318 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
320 cpu_tb_jmp_cache_clear(cpu
);
322 if (to_clean
== ALL_MMUIDX_BITS
) {
323 atomic_set(&env_tlb(env
)->c
.full_flush_count
,
324 env_tlb(env
)->c
.full_flush_count
+ 1);
326 atomic_set(&env_tlb(env
)->c
.part_flush_count
,
327 env_tlb(env
)->c
.part_flush_count
+ ctpop16(to_clean
));
328 if (to_clean
!= asked
) {
329 atomic_set(&env_tlb(env
)->c
.elide_flush_count
,
330 env_tlb(env
)->c
.elide_flush_count
+
331 ctpop16(asked
& ~to_clean
));
336 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
338 tlb_debug("mmu_idx: 0x%" PRIx16
"\n", idxmap
);
340 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
341 async_run_on_cpu(cpu
, tlb_flush_by_mmuidx_async_work
,
342 RUN_ON_CPU_HOST_INT(idxmap
));
344 tlb_flush_by_mmuidx_async_work(cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
348 void tlb_flush(CPUState
*cpu
)
350 tlb_flush_by_mmuidx(cpu
, ALL_MMUIDX_BITS
);
353 void tlb_flush_by_mmuidx_all_cpus(CPUState
*src_cpu
, uint16_t idxmap
)
355 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
357 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
359 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
360 fn(src_cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
363 void tlb_flush_all_cpus(CPUState
*src_cpu
)
365 tlb_flush_by_mmuidx_all_cpus(src_cpu
, ALL_MMUIDX_BITS
);
368 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
, uint16_t idxmap
)
370 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
372 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
374 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
375 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
378 void tlb_flush_all_cpus_synced(CPUState
*src_cpu
)
380 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, ALL_MMUIDX_BITS
);
383 static inline bool tlb_hit_page_anyprot(CPUTLBEntry
*tlb_entry
,
386 return tlb_hit_page(tlb_entry
->addr_read
, page
) ||
387 tlb_hit_page(tlb_addr_write(tlb_entry
), page
) ||
388 tlb_hit_page(tlb_entry
->addr_code
, page
);
392 * tlb_entry_is_empty - return true if the entry is not in use
393 * @te: pointer to CPUTLBEntry
395 static inline bool tlb_entry_is_empty(const CPUTLBEntry
*te
)
397 return te
->addr_read
== -1 && te
->addr_write
== -1 && te
->addr_code
== -1;
400 /* Called with tlb_c.lock held */
401 static inline bool tlb_flush_entry_locked(CPUTLBEntry
*tlb_entry
,
404 if (tlb_hit_page_anyprot(tlb_entry
, page
)) {
405 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
411 /* Called with tlb_c.lock held */
412 static inline void tlb_flush_vtlb_page_locked(CPUArchState
*env
, int mmu_idx
,
415 CPUTLBDesc
*d
= &env_tlb(env
)->d
[mmu_idx
];
418 assert_cpu_is_self(env_cpu(env
));
419 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
420 if (tlb_flush_entry_locked(&d
->vtable
[k
], page
)) {
421 tlb_n_used_entries_dec(env
, mmu_idx
);
426 static void tlb_flush_page_locked(CPUArchState
*env
, int midx
,
429 target_ulong lp_addr
= env_tlb(env
)->d
[midx
].large_page_addr
;
430 target_ulong lp_mask
= env_tlb(env
)->d
[midx
].large_page_mask
;
432 /* Check if we need to flush due to large pages. */
433 if ((page
& lp_mask
) == lp_addr
) {
434 tlb_debug("forcing full flush midx %d ("
435 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
436 midx
, lp_addr
, lp_mask
);
437 tlb_flush_one_mmuidx_locked(env
, midx
);
439 if (tlb_flush_entry_locked(tlb_entry(env
, midx
, page
), page
)) {
440 tlb_n_used_entries_dec(env
, midx
);
442 tlb_flush_vtlb_page_locked(env
, midx
, page
);
446 /* As we are going to hijack the bottom bits of the page address for a
447 * mmuidx bit mask we need to fail to build if we can't do that
449 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> TARGET_PAGE_BITS_MIN
);
451 static void tlb_flush_page_by_mmuidx_async_work(CPUState
*cpu
,
452 run_on_cpu_data data
)
454 CPUArchState
*env
= cpu
->env_ptr
;
455 target_ulong addr_and_mmuidx
= (target_ulong
) data
.target_ptr
;
456 target_ulong addr
= addr_and_mmuidx
& TARGET_PAGE_MASK
;
457 unsigned long mmu_idx_bitmap
= addr_and_mmuidx
& ALL_MMUIDX_BITS
;
460 assert_cpu_is_self(cpu
);
462 tlb_debug("page addr:" TARGET_FMT_lx
" mmu_map:0x%lx\n",
463 addr
, mmu_idx_bitmap
);
465 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
466 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
467 if (test_bit(mmu_idx
, &mmu_idx_bitmap
)) {
468 tlb_flush_page_locked(env
, mmu_idx
, addr
);
471 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
473 tb_flush_jmp_cache(cpu
, addr
);
476 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, uint16_t idxmap
)
478 target_ulong addr_and_mmu_idx
;
480 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%" PRIx16
"\n", addr
, idxmap
);
482 /* This should already be page aligned */
483 addr_and_mmu_idx
= addr
& TARGET_PAGE_MASK
;
484 addr_and_mmu_idx
|= idxmap
;
486 if (!qemu_cpu_is_self(cpu
)) {
487 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_work
,
488 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
490 tlb_flush_page_by_mmuidx_async_work(
491 cpu
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
495 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
497 tlb_flush_page_by_mmuidx(cpu
, addr
, ALL_MMUIDX_BITS
);
500 void tlb_flush_page_by_mmuidx_all_cpus(CPUState
*src_cpu
, target_ulong addr
,
503 const run_on_cpu_func fn
= tlb_flush_page_by_mmuidx_async_work
;
504 target_ulong addr_and_mmu_idx
;
506 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
508 /* This should already be page aligned */
509 addr_and_mmu_idx
= addr
& TARGET_PAGE_MASK
;
510 addr_and_mmu_idx
|= idxmap
;
512 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
513 fn(src_cpu
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
516 void tlb_flush_page_all_cpus(CPUState
*src
, target_ulong addr
)
518 tlb_flush_page_by_mmuidx_all_cpus(src
, addr
, ALL_MMUIDX_BITS
);
521 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
525 const run_on_cpu_func fn
= tlb_flush_page_by_mmuidx_async_work
;
526 target_ulong addr_and_mmu_idx
;
528 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
530 /* This should already be page aligned */
531 addr_and_mmu_idx
= addr
& TARGET_PAGE_MASK
;
532 addr_and_mmu_idx
|= idxmap
;
534 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
535 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
538 void tlb_flush_page_all_cpus_synced(CPUState
*src
, target_ulong addr
)
540 tlb_flush_page_by_mmuidx_all_cpus_synced(src
, addr
, ALL_MMUIDX_BITS
);
543 /* update the TLBs so that writes to code in the virtual page 'addr'
545 void tlb_protect_code(ram_addr_t ram_addr
)
547 cpu_physical_memory_test_and_clear_dirty(ram_addr
, TARGET_PAGE_SIZE
,
551 /* update the TLB so that writes in physical page 'phys_addr' are no longer
552 tested for self modifying code */
553 void tlb_unprotect_code(ram_addr_t ram_addr
)
555 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
560 * Dirty write flag handling
562 * When the TCG code writes to a location it looks up the address in
563 * the TLB and uses that data to compute the final address. If any of
564 * the lower bits of the address are set then the slow path is forced.
565 * There are a number of reasons to do this but for normal RAM the
566 * most usual is detecting writes to code regions which may invalidate
569 * Other vCPUs might be reading their TLBs during guest execution, so we update
570 * te->addr_write with atomic_set. We don't need to worry about this for
571 * oversized guests as MTTCG is disabled for them.
573 * Called with tlb_c.lock held.
575 static void tlb_reset_dirty_range_locked(CPUTLBEntry
*tlb_entry
,
576 uintptr_t start
, uintptr_t length
)
578 uintptr_t addr
= tlb_entry
->addr_write
;
580 if ((addr
& (TLB_INVALID_MASK
| TLB_MMIO
| TLB_NOTDIRTY
)) == 0) {
581 addr
&= TARGET_PAGE_MASK
;
582 addr
+= tlb_entry
->addend
;
583 if ((addr
- start
) < length
) {
584 #if TCG_OVERSIZED_GUEST
585 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
587 atomic_set(&tlb_entry
->addr_write
,
588 tlb_entry
->addr_write
| TLB_NOTDIRTY
);
595 * Called with tlb_c.lock held.
596 * Called only from the vCPU context, i.e. the TLB's owner thread.
598 static inline void copy_tlb_helper_locked(CPUTLBEntry
*d
, const CPUTLBEntry
*s
)
603 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
605 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
606 * thing actually updated is the target TLB entry ->addr_write flags.
608 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
615 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
616 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
618 unsigned int n
= tlb_n_entries(env
, mmu_idx
);
620 for (i
= 0; i
< n
; i
++) {
621 tlb_reset_dirty_range_locked(&env_tlb(env
)->f
[mmu_idx
].table
[i
],
625 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
626 tlb_reset_dirty_range_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[i
],
630 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
633 /* Called with tlb_c.lock held */
634 static inline void tlb_set_dirty1_locked(CPUTLBEntry
*tlb_entry
,
637 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
638 tlb_entry
->addr_write
= vaddr
;
642 /* update the TLB corresponding to virtual page vaddr
643 so that it is no longer dirty */
644 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
646 CPUArchState
*env
= cpu
->env_ptr
;
649 assert_cpu_is_self(cpu
);
651 vaddr
&= TARGET_PAGE_MASK
;
652 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
653 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
654 tlb_set_dirty1_locked(tlb_entry(env
, mmu_idx
, vaddr
), vaddr
);
657 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
659 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
660 tlb_set_dirty1_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[k
], vaddr
);
663 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
666 /* Our TLB does not support large pages, so remember the area covered by
667 large pages and trigger a full TLB flush if these are invalidated. */
668 static void tlb_add_large_page(CPUArchState
*env
, int mmu_idx
,
669 target_ulong vaddr
, target_ulong size
)
671 target_ulong lp_addr
= env_tlb(env
)->d
[mmu_idx
].large_page_addr
;
672 target_ulong lp_mask
= ~(size
- 1);
674 if (lp_addr
== (target_ulong
)-1) {
675 /* No previous large page. */
678 /* Extend the existing region to include the new page.
679 This is a compromise between unnecessary flushes and
680 the cost of maintaining a full variable size TLB. */
681 lp_mask
&= env_tlb(env
)->d
[mmu_idx
].large_page_mask
;
682 while (((lp_addr
^ vaddr
) & lp_mask
) != 0) {
686 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= lp_addr
& lp_mask
;
687 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= lp_mask
;
690 /* Add a new TLB entry. At most one entry for a given virtual address
691 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
692 * supplied size is only used by tlb_flush_page.
694 * Called from TCG-generated code, which is under an RCU read-side
697 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
698 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
699 int mmu_idx
, target_ulong size
)
701 CPUArchState
*env
= cpu
->env_ptr
;
702 CPUTLB
*tlb
= env_tlb(env
);
703 CPUTLBDesc
*desc
= &tlb
->d
[mmu_idx
];
704 MemoryRegionSection
*section
;
706 target_ulong address
;
707 target_ulong code_address
;
710 hwaddr iotlb
, xlat
, sz
, paddr_page
;
711 target_ulong vaddr_page
;
712 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
714 assert_cpu_is_self(cpu
);
716 if (size
<= TARGET_PAGE_SIZE
) {
717 sz
= TARGET_PAGE_SIZE
;
719 tlb_add_large_page(env
, mmu_idx
, vaddr
, size
);
722 vaddr_page
= vaddr
& TARGET_PAGE_MASK
;
723 paddr_page
= paddr
& TARGET_PAGE_MASK
;
725 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr_page
,
726 &xlat
, &sz
, attrs
, &prot
);
727 assert(sz
>= TARGET_PAGE_SIZE
);
729 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
731 vaddr
, paddr
, prot
, mmu_idx
);
733 address
= vaddr_page
;
734 if (size
< TARGET_PAGE_SIZE
) {
736 * Slow-path the TLB entries; we will repeat the MMU check and TLB
737 * fill on every access.
739 address
|= TLB_RECHECK
;
741 if (!memory_region_is_ram(section
->mr
) &&
742 !memory_region_is_romd(section
->mr
)) {
747 /* TLB_MMIO for rom/romd handled below */
748 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
751 code_address
= address
;
752 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr_page
,
753 paddr_page
, xlat
, prot
, &address
);
755 index
= tlb_index(env
, mmu_idx
, vaddr_page
);
756 te
= tlb_entry(env
, mmu_idx
, vaddr_page
);
759 * Hold the TLB lock for the rest of the function. We could acquire/release
760 * the lock several times in the function, but it is faster to amortize the
761 * acquisition cost by acquiring it just once. Note that this leads to
762 * a longer critical section, but this is not a concern since the TLB lock
763 * is unlikely to be contended.
765 qemu_spin_lock(&tlb
->c
.lock
);
767 /* Note that the tlb is no longer clean. */
768 tlb
->c
.dirty
|= 1 << mmu_idx
;
770 /* Make sure there's no cached translation for the new page. */
771 tlb_flush_vtlb_page_locked(env
, mmu_idx
, vaddr_page
);
774 * Only evict the old entry to the victim tlb if it's for a
775 * different page; otherwise just overwrite the stale data.
777 if (!tlb_hit_page_anyprot(te
, vaddr_page
) && !tlb_entry_is_empty(te
)) {
778 unsigned vidx
= desc
->vindex
++ % CPU_VTLB_SIZE
;
779 CPUTLBEntry
*tv
= &desc
->vtable
[vidx
];
781 /* Evict the old entry into the victim tlb. */
782 copy_tlb_helper_locked(tv
, te
);
783 desc
->viotlb
[vidx
] = desc
->iotlb
[index
];
784 tlb_n_used_entries_dec(env
, mmu_idx
);
789 * At this point iotlb contains a physical section number in the lower
790 * TARGET_PAGE_BITS, and either
791 * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
792 * + the offset within section->mr of the page base (otherwise)
793 * We subtract the vaddr_page (which is page aligned and thus won't
794 * disturb the low bits) to give an offset which can be added to the
795 * (non-page-aligned) vaddr of the eventual memory access to get
796 * the MemoryRegion offset for the access. Note that the vaddr we
797 * subtract here is that of the page base, and not the same as the
798 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
800 desc
->iotlb
[index
].addr
= iotlb
- vaddr_page
;
801 desc
->iotlb
[index
].attrs
= attrs
;
803 /* Now calculate the new entry */
804 tn
.addend
= addend
- vaddr_page
;
805 if (prot
& PAGE_READ
) {
806 tn
.addr_read
= address
;
811 if (prot
& PAGE_EXEC
) {
812 tn
.addr_code
= code_address
;
818 if (prot
& PAGE_WRITE
) {
819 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
820 || memory_region_is_romd(section
->mr
)) {
821 /* Write access calls the I/O callback. */
822 tn
.addr_write
= address
| TLB_MMIO
;
823 } else if (memory_region_is_ram(section
->mr
)
824 && cpu_physical_memory_is_clean(
825 memory_region_get_ram_addr(section
->mr
) + xlat
)) {
826 tn
.addr_write
= address
| TLB_NOTDIRTY
;
828 tn
.addr_write
= address
;
830 if (prot
& PAGE_WRITE_INV
) {
831 tn
.addr_write
|= TLB_INVALID_MASK
;
835 copy_tlb_helper_locked(te
, &tn
);
836 tlb_n_used_entries_inc(env
, mmu_idx
);
837 qemu_spin_unlock(&tlb
->c
.lock
);
840 /* Add a new TLB entry, but without specifying the memory
841 * transaction attributes to be used.
843 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
844 hwaddr paddr
, int prot
,
845 int mmu_idx
, target_ulong size
)
847 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
848 prot
, mmu_idx
, size
);
851 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
855 ram_addr
= qemu_ram_addr_from_host(ptr
);
856 if (ram_addr
== RAM_ADDR_INVALID
) {
857 error_report("Bad ram pointer %p", ptr
);
864 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
865 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
866 * be discarded and looked up again (e.g. via tlb_entry()).
868 static void tlb_fill(CPUState
*cpu
, target_ulong addr
, int size
,
869 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
871 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
875 * This is not a probe, so only valid return is success; failure
876 * should result in exception + longjmp to the cpu loop.
878 ok
= cc
->tlb_fill(cpu
, addr
, size
, access_type
, mmu_idx
, false, retaddr
);
882 static uint64_t io_readx(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
883 int mmu_idx
, target_ulong addr
, uintptr_t retaddr
,
884 MMUAccessType access_type
, int size
)
886 CPUState
*cpu
= env_cpu(env
);
888 MemoryRegionSection
*section
;
894 section
= iotlb_to_section(cpu
, iotlbentry
->addr
, iotlbentry
->attrs
);
896 mr_offset
= (iotlbentry
->addr
& TARGET_PAGE_MASK
) + addr
;
897 cpu
->mem_io_pc
= retaddr
;
898 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
899 cpu_io_recompile(cpu
, retaddr
);
902 cpu
->mem_io_vaddr
= addr
;
903 cpu
->mem_io_access_type
= access_type
;
905 if (mr
->global_locking
&& !qemu_mutex_iothread_locked()) {
906 qemu_mutex_lock_iothread();
909 r
= memory_region_dispatch_read(mr
, mr_offset
,
910 &val
, size
, iotlbentry
->attrs
);
912 hwaddr physaddr
= mr_offset
+
913 section
->offset_within_address_space
-
914 section
->offset_within_region
;
916 cpu_transaction_failed(cpu
, physaddr
, addr
, size
, access_type
,
917 mmu_idx
, iotlbentry
->attrs
, r
, retaddr
);
920 qemu_mutex_unlock_iothread();
926 static void io_writex(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
927 int mmu_idx
, uint64_t val
, target_ulong addr
,
928 uintptr_t retaddr
, int size
)
930 CPUState
*cpu
= env_cpu(env
);
932 MemoryRegionSection
*section
;
937 section
= iotlb_to_section(cpu
, iotlbentry
->addr
, iotlbentry
->attrs
);
939 mr_offset
= (iotlbentry
->addr
& TARGET_PAGE_MASK
) + addr
;
940 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
941 cpu_io_recompile(cpu
, retaddr
);
943 cpu
->mem_io_vaddr
= addr
;
944 cpu
->mem_io_pc
= retaddr
;
946 if (mr
->global_locking
&& !qemu_mutex_iothread_locked()) {
947 qemu_mutex_lock_iothread();
950 r
= memory_region_dispatch_write(mr
, mr_offset
,
951 val
, size
, iotlbentry
->attrs
);
953 hwaddr physaddr
= mr_offset
+
954 section
->offset_within_address_space
-
955 section
->offset_within_region
;
957 cpu_transaction_failed(cpu
, physaddr
, addr
, size
, MMU_DATA_STORE
,
958 mmu_idx
, iotlbentry
->attrs
, r
, retaddr
);
961 qemu_mutex_unlock_iothread();
965 static inline target_ulong
tlb_read_ofs(CPUTLBEntry
*entry
, size_t ofs
)
967 #if TCG_OVERSIZED_GUEST
968 return *(target_ulong
*)((uintptr_t)entry
+ ofs
);
970 /* ofs might correspond to .addr_write, so use atomic_read */
971 return atomic_read((target_ulong
*)((uintptr_t)entry
+ ofs
));
975 /* Return true if ADDR is present in the victim tlb, and has been copied
976 back to the main tlb. */
977 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
978 size_t elt_ofs
, target_ulong page
)
982 assert_cpu_is_self(env_cpu(env
));
983 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
984 CPUTLBEntry
*vtlb
= &env_tlb(env
)->d
[mmu_idx
].vtable
[vidx
];
987 /* elt_ofs might correspond to .addr_write, so use atomic_read */
988 #if TCG_OVERSIZED_GUEST
989 cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
991 cmp
= atomic_read((target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
));
995 /* Found entry in victim tlb, swap tlb and iotlb. */
996 CPUTLBEntry tmptlb
, *tlb
= &env_tlb(env
)->f
[mmu_idx
].table
[index
];
998 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
999 copy_tlb_helper_locked(&tmptlb
, tlb
);
1000 copy_tlb_helper_locked(tlb
, vtlb
);
1001 copy_tlb_helper_locked(vtlb
, &tmptlb
);
1002 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1004 CPUIOTLBEntry tmpio
, *io
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1005 CPUIOTLBEntry
*vio
= &env_tlb(env
)->d
[mmu_idx
].viotlb
[vidx
];
1006 tmpio
= *io
; *io
= *vio
; *vio
= tmpio
;
1013 /* Macro to call the above, with local variables from the use context. */
1014 #define VICTIM_TLB_HIT(TY, ADDR) \
1015 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1016 (ADDR) & TARGET_PAGE_MASK)
1018 /* NOTE: this function can trigger an exception */
1019 /* NOTE2: the returned address is not exactly the physical address: it
1020 * is actually a ram_addr_t (in system mode; the user mode emulation
1021 * version of this function returns a guest virtual address).
1023 tb_page_addr_t
get_page_addr_code(CPUArchState
*env
, target_ulong addr
)
1025 uintptr_t mmu_idx
= cpu_mmu_index(env
, true);
1026 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1027 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1030 if (unlikely(!tlb_hit(entry
->addr_code
, addr
))) {
1031 if (!VICTIM_TLB_HIT(addr_code
, addr
)) {
1032 tlb_fill(env_cpu(env
), addr
, 0, MMU_INST_FETCH
, mmu_idx
, 0);
1033 index
= tlb_index(env
, mmu_idx
, addr
);
1034 entry
= tlb_entry(env
, mmu_idx
, addr
);
1036 assert(tlb_hit(entry
->addr_code
, addr
));
1039 if (unlikely(entry
->addr_code
& (TLB_RECHECK
| TLB_MMIO
))) {
1041 * Return -1 if we can't translate and execute from an entire
1042 * page of RAM here, which will cause us to execute by loading
1043 * and translating one insn at a time, without caching:
1044 * - TLB_RECHECK: means the MMU protection covers a smaller range
1045 * than a target page, so we must redo the MMU check every insn
1046 * - TLB_MMIO: region is not backed by RAM
1051 p
= (void *)((uintptr_t)addr
+ entry
->addend
);
1052 return qemu_ram_addr_from_host_nofail(p
);
1055 /* Probe for whether the specified guest write access is permitted.
1056 * If it is not permitted then an exception will be taken in the same
1057 * way as if this were a real write access (and we will not return).
1058 * Otherwise the function will return, and there will be a valid
1059 * entry in the TLB for this access.
1061 void probe_write(CPUArchState
*env
, target_ulong addr
, int size
, int mmu_idx
,
1064 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1065 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1067 if (!tlb_hit(tlb_addr_write(entry
), addr
)) {
1068 /* TLB entry is for a different page */
1069 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1070 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_STORE
,
1076 void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
1077 MMUAccessType access_type
, int mmu_idx
)
1079 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1080 uintptr_t tlb_addr
, page
;
1083 switch (access_type
) {
1085 elt_ofs
= offsetof(CPUTLBEntry
, addr_read
);
1087 case MMU_DATA_STORE
:
1088 elt_ofs
= offsetof(CPUTLBEntry
, addr_write
);
1090 case MMU_INST_FETCH
:
1091 elt_ofs
= offsetof(CPUTLBEntry
, addr_code
);
1094 g_assert_not_reached();
1097 page
= addr
& TARGET_PAGE_MASK
;
1098 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1100 if (!tlb_hit_page(tlb_addr
, page
)) {
1101 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1103 if (!victim_tlb_hit(env
, mmu_idx
, index
, elt_ofs
, page
)) {
1104 CPUState
*cs
= env_cpu(env
);
1105 CPUClass
*cc
= CPU_GET_CLASS(cs
);
1107 if (!cc
->tlb_fill(cs
, addr
, 0, access_type
, mmu_idx
, true, 0)) {
1108 /* Non-faulting page table read failed. */
1112 /* TLB resize via tlb_fill may have moved the entry. */
1113 entry
= tlb_entry(env
, mmu_idx
, addr
);
1115 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1118 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
1123 return (void *)((uintptr_t)addr
+ entry
->addend
);
1126 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
1127 * operations, or io operations to proceed. Return the host address. */
1128 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1129 TCGMemOpIdx oi
, uintptr_t retaddr
,
1132 size_t mmu_idx
= get_mmuidx(oi
);
1133 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1134 CPUTLBEntry
*tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1135 target_ulong tlb_addr
= tlb_addr_write(tlbe
);
1136 TCGMemOp mop
= get_memop(oi
);
1137 int a_bits
= get_alignment_bits(mop
);
1138 int s_bits
= mop
& MO_SIZE
;
1141 /* Adjust the given return address. */
1142 retaddr
-= GETPC_ADJ
;
1144 /* Enforce guest required alignment. */
1145 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
1146 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1147 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1151 /* Enforce qemu required alignment. */
1152 if (unlikely(addr
& ((1 << s_bits
) - 1))) {
1153 /* We get here if guest alignment was not requested,
1154 or was not enforced by cpu_unaligned_access above.
1155 We might widen the access and emulate, but for now
1156 mark an exception and exit the cpu loop. */
1157 goto stop_the_world
;
1160 /* Check TLB entry and enforce page permissions. */
1161 if (!tlb_hit(tlb_addr
, addr
)) {
1162 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1163 tlb_fill(env_cpu(env
), addr
, 1 << s_bits
, MMU_DATA_STORE
,
1165 index
= tlb_index(env
, mmu_idx
, addr
);
1166 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1168 tlb_addr
= tlb_addr_write(tlbe
) & ~TLB_INVALID_MASK
;
1171 /* Notice an IO access or a needs-MMU-lookup access */
1172 if (unlikely(tlb_addr
& (TLB_MMIO
| TLB_RECHECK
))) {
1173 /* There's really nothing that can be done to
1174 support this apart from stop-the-world. */
1175 goto stop_the_world
;
1178 /* Let the guest notice RMW on a write-only page. */
1179 if (unlikely(tlbe
->addr_read
!= (tlb_addr
& ~TLB_NOTDIRTY
))) {
1180 tlb_fill(env_cpu(env
), addr
, 1 << s_bits
, MMU_DATA_LOAD
,
1182 /* Since we don't support reads and writes to different addresses,
1183 and we do have the proper page loaded for write, this shouldn't
1184 ever return. But just in case, handle via stop-the-world. */
1185 goto stop_the_world
;
1188 hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1190 ndi
->active
= false;
1191 if (unlikely(tlb_addr
& TLB_NOTDIRTY
)) {
1193 memory_notdirty_write_prepare(ndi
, env_cpu(env
), addr
,
1194 qemu_ram_addr_from_host_nofail(hostaddr
),
1201 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1204 #ifdef TARGET_WORDS_BIGENDIAN
1205 #define NEED_BE_BSWAP 0
1206 #define NEED_LE_BSWAP 1
1208 #define NEED_BE_BSWAP 1
1209 #define NEED_LE_BSWAP 0
1215 * This should all dead code away depending on the build host and
1219 static inline uint64_t handle_bswap(uint64_t val
, int size
, bool big_endian
)
1221 if ((big_endian
&& NEED_BE_BSWAP
) || (!big_endian
&& NEED_LE_BSWAP
)) {
1224 case 2: return bswap16(val
);
1225 case 4: return bswap32(val
);
1226 case 8: return bswap64(val
);
1228 g_assert_not_reached();
1238 * We support two different access types. SOFTMMU_CODE_ACCESS is
1239 * specifically for reading instructions from system memory. It is
1240 * called by the translation loop and in some helpers where the code
1241 * is disassembled. It shouldn't be called directly by guest code.
1244 typedef uint64_t FullLoadHelper(CPUArchState
*env
, target_ulong addr
,
1245 TCGMemOpIdx oi
, uintptr_t retaddr
);
1247 static inline uint64_t __attribute__((always_inline
))
1248 load_helper(CPUArchState
*env
, target_ulong addr
, TCGMemOpIdx oi
,
1249 uintptr_t retaddr
, size_t size
, bool big_endian
, bool code_read
,
1250 FullLoadHelper
*full_load
)
1252 uintptr_t mmu_idx
= get_mmuidx(oi
);
1253 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1254 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1255 target_ulong tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1256 const size_t tlb_off
= code_read
?
1257 offsetof(CPUTLBEntry
, addr_code
) : offsetof(CPUTLBEntry
, addr_read
);
1258 const MMUAccessType access_type
=
1259 code_read
? MMU_INST_FETCH
: MMU_DATA_LOAD
;
1260 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
1264 /* Handle CPU specific unaligned behaviour */
1265 if (addr
& ((1 << a_bits
) - 1)) {
1266 cpu_unaligned_access(env_cpu(env
), addr
, access_type
,
1270 /* If the TLB entry is for a different page, reload and try again. */
1271 if (!tlb_hit(tlb_addr
, addr
)) {
1272 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
1273 addr
& TARGET_PAGE_MASK
)) {
1274 tlb_fill(env_cpu(env
), addr
, size
,
1275 access_type
, mmu_idx
, retaddr
);
1276 index
= tlb_index(env
, mmu_idx
, addr
);
1277 entry
= tlb_entry(env
, mmu_idx
, addr
);
1279 tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1282 /* Handle an IO access. */
1283 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1284 if ((addr
& (size
- 1)) != 0) {
1285 goto do_unaligned_access
;
1288 if (tlb_addr
& TLB_RECHECK
) {
1290 * This is a TLB_RECHECK access, where the MMU protection
1291 * covers a smaller range than a target page, and we must
1292 * repeat the MMU check here. This tlb_fill() call might
1293 * longjump out if this access should cause a guest exception.
1295 tlb_fill(env_cpu(env
), addr
, size
,
1296 access_type
, mmu_idx
, retaddr
);
1297 index
= tlb_index(env
, mmu_idx
, addr
);
1298 entry
= tlb_entry(env
, mmu_idx
, addr
);
1300 tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1301 tlb_addr
&= ~TLB_RECHECK
;
1302 if (!(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1304 goto do_aligned_access
;
1308 res
= io_readx(env
, &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
],
1309 mmu_idx
, addr
, retaddr
, access_type
, size
);
1310 return handle_bswap(res
, size
, big_endian
);
1313 /* Handle slow unaligned access (it spans two pages or IO). */
1315 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
1316 >= TARGET_PAGE_SIZE
)) {
1317 target_ulong addr1
, addr2
;
1320 do_unaligned_access
:
1321 addr1
= addr
& ~((target_ulong
)size
- 1);
1322 addr2
= addr1
+ size
;
1323 r1
= full_load(env
, addr1
, oi
, retaddr
);
1324 r2
= full_load(env
, addr2
, oi
, retaddr
);
1325 shift
= (addr
& (size
- 1)) * 8;
1328 /* Big-endian combine. */
1329 res
= (r1
<< shift
) | (r2
>> ((size
* 8) - shift
));
1331 /* Little-endian combine. */
1332 res
= (r1
>> shift
) | (r2
<< ((size
* 8) - shift
));
1334 return res
& MAKE_64BIT_MASK(0, size
* 8);
1338 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1341 res
= ldub_p(haddr
);
1345 res
= lduw_be_p(haddr
);
1347 res
= lduw_le_p(haddr
);
1352 res
= (uint32_t)ldl_be_p(haddr
);
1354 res
= (uint32_t)ldl_le_p(haddr
);
1359 res
= ldq_be_p(haddr
);
1361 res
= ldq_le_p(haddr
);
1365 g_assert_not_reached();
1372 * For the benefit of TCG generated code, we want to avoid the
1373 * complication of ABI-specific return type promotion and always
1374 * return a value extended to the register size of the host. This is
1375 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1376 * data, and for that we always have uint64_t.
1378 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1381 static uint64_t full_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1382 TCGMemOpIdx oi
, uintptr_t retaddr
)
1384 return load_helper(env
, addr
, oi
, retaddr
, 1, false, false,
1388 tcg_target_ulong
helper_ret_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1389 TCGMemOpIdx oi
, uintptr_t retaddr
)
1391 return full_ldub_mmu(env
, addr
, oi
, retaddr
);
1394 static uint64_t full_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1395 TCGMemOpIdx oi
, uintptr_t retaddr
)
1397 return load_helper(env
, addr
, oi
, retaddr
, 2, false, false,
1401 tcg_target_ulong
helper_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1402 TCGMemOpIdx oi
, uintptr_t retaddr
)
1404 return full_le_lduw_mmu(env
, addr
, oi
, retaddr
);
1407 static uint64_t full_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1408 TCGMemOpIdx oi
, uintptr_t retaddr
)
1410 return load_helper(env
, addr
, oi
, retaddr
, 2, true, false,
1414 tcg_target_ulong
helper_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1415 TCGMemOpIdx oi
, uintptr_t retaddr
)
1417 return full_be_lduw_mmu(env
, addr
, oi
, retaddr
);
1420 static uint64_t full_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1421 TCGMemOpIdx oi
, uintptr_t retaddr
)
1423 return load_helper(env
, addr
, oi
, retaddr
, 4, false, false,
1427 tcg_target_ulong
helper_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1428 TCGMemOpIdx oi
, uintptr_t retaddr
)
1430 return full_le_ldul_mmu(env
, addr
, oi
, retaddr
);
1433 static uint64_t full_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1434 TCGMemOpIdx oi
, uintptr_t retaddr
)
1436 return load_helper(env
, addr
, oi
, retaddr
, 4, true, false,
1440 tcg_target_ulong
helper_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1441 TCGMemOpIdx oi
, uintptr_t retaddr
)
1443 return full_be_ldul_mmu(env
, addr
, oi
, retaddr
);
1446 uint64_t helper_le_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1447 TCGMemOpIdx oi
, uintptr_t retaddr
)
1449 return load_helper(env
, addr
, oi
, retaddr
, 8, false, false,
1453 uint64_t helper_be_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1454 TCGMemOpIdx oi
, uintptr_t retaddr
)
1456 return load_helper(env
, addr
, oi
, retaddr
, 8, true, false,
1461 * Provide signed versions of the load routines as well. We can of course
1462 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1466 tcg_target_ulong
helper_ret_ldsb_mmu(CPUArchState
*env
, target_ulong addr
,
1467 TCGMemOpIdx oi
, uintptr_t retaddr
)
1469 return (int8_t)helper_ret_ldub_mmu(env
, addr
, oi
, retaddr
);
1472 tcg_target_ulong
helper_le_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1473 TCGMemOpIdx oi
, uintptr_t retaddr
)
1475 return (int16_t)helper_le_lduw_mmu(env
, addr
, oi
, retaddr
);
1478 tcg_target_ulong
helper_be_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1479 TCGMemOpIdx oi
, uintptr_t retaddr
)
1481 return (int16_t)helper_be_lduw_mmu(env
, addr
, oi
, retaddr
);
1484 tcg_target_ulong
helper_le_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1485 TCGMemOpIdx oi
, uintptr_t retaddr
)
1487 return (int32_t)helper_le_ldul_mmu(env
, addr
, oi
, retaddr
);
1490 tcg_target_ulong
helper_be_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1491 TCGMemOpIdx oi
, uintptr_t retaddr
)
1493 return (int32_t)helper_be_ldul_mmu(env
, addr
, oi
, retaddr
);
1500 static inline void __attribute__((always_inline
))
1501 store_helper(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1502 TCGMemOpIdx oi
, uintptr_t retaddr
, size_t size
, bool big_endian
)
1504 uintptr_t mmu_idx
= get_mmuidx(oi
);
1505 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1506 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1507 target_ulong tlb_addr
= tlb_addr_write(entry
);
1508 const size_t tlb_off
= offsetof(CPUTLBEntry
, addr_write
);
1509 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
1512 /* Handle CPU specific unaligned behaviour */
1513 if (addr
& ((1 << a_bits
) - 1)) {
1514 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1518 /* If the TLB entry is for a different page, reload and try again. */
1519 if (!tlb_hit(tlb_addr
, addr
)) {
1520 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
1521 addr
& TARGET_PAGE_MASK
)) {
1522 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_STORE
,
1524 index
= tlb_index(env
, mmu_idx
, addr
);
1525 entry
= tlb_entry(env
, mmu_idx
, addr
);
1527 tlb_addr
= tlb_addr_write(entry
) & ~TLB_INVALID_MASK
;
1530 /* Handle an IO access. */
1531 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1532 if ((addr
& (size
- 1)) != 0) {
1533 goto do_unaligned_access
;
1536 if (tlb_addr
& TLB_RECHECK
) {
1538 * This is a TLB_RECHECK access, where the MMU protection
1539 * covers a smaller range than a target page, and we must
1540 * repeat the MMU check here. This tlb_fill() call might
1541 * longjump out if this access should cause a guest exception.
1543 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_STORE
,
1545 index
= tlb_index(env
, mmu_idx
, addr
);
1546 entry
= tlb_entry(env
, mmu_idx
, addr
);
1548 tlb_addr
= tlb_addr_write(entry
);
1549 tlb_addr
&= ~TLB_RECHECK
;
1550 if (!(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1552 goto do_aligned_access
;
1556 io_writex(env
, &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
], mmu_idx
,
1557 handle_bswap(val
, size
, big_endian
),
1558 addr
, retaddr
, size
);
1562 /* Handle slow unaligned access (it spans two pages or IO). */
1564 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
1565 >= TARGET_PAGE_SIZE
)) {
1568 CPUTLBEntry
*entry2
;
1569 target_ulong page2
, tlb_addr2
;
1570 do_unaligned_access
:
1572 * Ensure the second page is in the TLB. Note that the first page
1573 * is already guaranteed to be filled, and that the second page
1574 * cannot evict the first.
1576 page2
= (addr
+ size
) & TARGET_PAGE_MASK
;
1577 index2
= tlb_index(env
, mmu_idx
, page2
);
1578 entry2
= tlb_entry(env
, mmu_idx
, page2
);
1579 tlb_addr2
= tlb_addr_write(entry2
);
1580 if (!tlb_hit_page(tlb_addr2
, page2
)
1581 && !victim_tlb_hit(env
, mmu_idx
, index2
, tlb_off
,
1582 page2
& TARGET_PAGE_MASK
)) {
1583 tlb_fill(env_cpu(env
), page2
, size
, MMU_DATA_STORE
,
1588 * XXX: not efficient, but simple.
1589 * This loop must go in the forward direction to avoid issues
1590 * with self-modifying code in Windows 64-bit.
1592 for (i
= 0; i
< size
; ++i
) {
1595 /* Big-endian extract. */
1596 val8
= val
>> (((size
- 1) * 8) - (i
* 8));
1598 /* Little-endian extract. */
1599 val8
= val
>> (i
* 8);
1601 helper_ret_stb_mmu(env
, addr
+ i
, val8
, oi
, retaddr
);
1607 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1614 stw_be_p(haddr
, val
);
1616 stw_le_p(haddr
, val
);
1621 stl_be_p(haddr
, val
);
1623 stl_le_p(haddr
, val
);
1628 stq_be_p(haddr
, val
);
1630 stq_le_p(haddr
, val
);
1634 g_assert_not_reached();
1639 void helper_ret_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
1640 TCGMemOpIdx oi
, uintptr_t retaddr
)
1642 store_helper(env
, addr
, val
, oi
, retaddr
, 1, false);
1645 void helper_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1646 TCGMemOpIdx oi
, uintptr_t retaddr
)
1648 store_helper(env
, addr
, val
, oi
, retaddr
, 2, false);
1651 void helper_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1652 TCGMemOpIdx oi
, uintptr_t retaddr
)
1654 store_helper(env
, addr
, val
, oi
, retaddr
, 2, true);
1657 void helper_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1658 TCGMemOpIdx oi
, uintptr_t retaddr
)
1660 store_helper(env
, addr
, val
, oi
, retaddr
, 4, false);
1663 void helper_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1664 TCGMemOpIdx oi
, uintptr_t retaddr
)
1666 store_helper(env
, addr
, val
, oi
, retaddr
, 4, true);
1669 void helper_le_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1670 TCGMemOpIdx oi
, uintptr_t retaddr
)
1672 store_helper(env
, addr
, val
, oi
, retaddr
, 8, false);
1675 void helper_be_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1676 TCGMemOpIdx oi
, uintptr_t retaddr
)
1678 store_helper(env
, addr
, val
, oi
, retaddr
, 8, true);
1681 /* First set of helpers allows passing in of OI and RETADDR. This makes
1682 them callable from other helpers. */
1684 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1685 #define ATOMIC_NAME(X) \
1686 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1687 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1688 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1689 #define ATOMIC_MMU_CLEANUP \
1691 if (unlikely(ndi.active)) { \
1692 memory_notdirty_write_complete(&ndi); \
1697 #include "atomic_template.h"
1700 #include "atomic_template.h"
1703 #include "atomic_template.h"
1705 #ifdef CONFIG_ATOMIC64
1707 #include "atomic_template.h"
1710 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1711 #define DATA_SIZE 16
1712 #include "atomic_template.h"
1715 /* Second set of helpers are directly callable from TCG as helpers. */
1719 #undef ATOMIC_MMU_LOOKUP
1720 #define EXTRA_ARGS , TCGMemOpIdx oi
1721 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1722 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1725 #include "atomic_template.h"
1728 #include "atomic_template.h"
1731 #include "atomic_template.h"
1733 #ifdef CONFIG_ATOMIC64
1735 #include "atomic_template.h"
1738 /* Code access functions. */
1740 static uint64_t full_ldub_cmmu(CPUArchState
*env
, target_ulong addr
,
1741 TCGMemOpIdx oi
, uintptr_t retaddr
)
1743 return load_helper(env
, addr
, oi
, retaddr
, 1, false, true,
1747 uint8_t helper_ret_ldb_cmmu(CPUArchState
*env
, target_ulong addr
,
1748 TCGMemOpIdx oi
, uintptr_t retaddr
)
1750 return full_ldub_cmmu(env
, addr
, oi
, retaddr
);
1753 static uint64_t full_le_lduw_cmmu(CPUArchState
*env
, target_ulong addr
,
1754 TCGMemOpIdx oi
, uintptr_t retaddr
)
1756 return load_helper(env
, addr
, oi
, retaddr
, 2, false, true,
1760 uint16_t helper_le_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1761 TCGMemOpIdx oi
, uintptr_t retaddr
)
1763 return full_le_lduw_cmmu(env
, addr
, oi
, retaddr
);
1766 static uint64_t full_be_lduw_cmmu(CPUArchState
*env
, target_ulong addr
,
1767 TCGMemOpIdx oi
, uintptr_t retaddr
)
1769 return load_helper(env
, addr
, oi
, retaddr
, 2, true, true,
1773 uint16_t helper_be_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1774 TCGMemOpIdx oi
, uintptr_t retaddr
)
1776 return full_be_lduw_cmmu(env
, addr
, oi
, retaddr
);
1779 static uint64_t full_le_ldul_cmmu(CPUArchState
*env
, target_ulong addr
,
1780 TCGMemOpIdx oi
, uintptr_t retaddr
)
1782 return load_helper(env
, addr
, oi
, retaddr
, 4, false, true,
1786 uint32_t helper_le_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1787 TCGMemOpIdx oi
, uintptr_t retaddr
)
1789 return full_le_ldul_cmmu(env
, addr
, oi
, retaddr
);
1792 static uint64_t full_be_ldul_cmmu(CPUArchState
*env
, target_ulong addr
,
1793 TCGMemOpIdx oi
, uintptr_t retaddr
)
1795 return load_helper(env
, addr
, oi
, retaddr
, 4, true, true,
1799 uint32_t helper_be_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1800 TCGMemOpIdx oi
, uintptr_t retaddr
)
1802 return full_be_ldul_cmmu(env
, addr
, oi
, retaddr
);
1805 uint64_t helper_le_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1806 TCGMemOpIdx oi
, uintptr_t retaddr
)
1808 return load_helper(env
, addr
, oi
, retaddr
, 8, false, true,
1809 helper_le_ldq_cmmu
);
1812 uint64_t helper_be_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1813 TCGMemOpIdx oi
, uintptr_t retaddr
)
1815 return load_helper(env
, addr
, oi
, retaddr
, 8, true, true,
1816 helper_be_ldq_cmmu
);