2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "exec/memory.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/tb-flush.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "exec/mmu-access-type.h"
32 #include "exec/tlb-common.h"
33 #include "exec/vaddr.h"
35 #include "qemu/error-report.h"
37 #include "exec/helper-proto-common.h"
38 #include "qemu/atomic.h"
39 #include "qemu/atomic128.h"
40 #include "exec/translate-all.h"
43 #include "internal-common.h"
44 #include "internal-target.h"
46 #include "qemu/plugin-memory.h"
48 #include "tcg/tcg-ldst.h"
49 #include "tcg/oversized-guest.h"
51 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
52 /* #define DEBUG_TLB */
53 /* #define DEBUG_TLB_LOG */
56 # define DEBUG_TLB_GATE 1
58 # define DEBUG_TLB_LOG_GATE 1
60 # define DEBUG_TLB_LOG_GATE 0
63 # define DEBUG_TLB_GATE 0
64 # define DEBUG_TLB_LOG_GATE 0
67 #define tlb_debug(fmt, ...) do { \
68 if (DEBUG_TLB_LOG_GATE) { \
69 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
71 } else if (DEBUG_TLB_GATE) { \
72 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
76 #define assert_cpu_is_self(cpu) do { \
77 if (DEBUG_TLB_GATE) { \
78 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
82 /* run_on_cpu_data.target_ptr should always be big enough for a
83 * vaddr even on 32 bit builds
85 QEMU_BUILD_BUG_ON(sizeof(vaddr
) > sizeof(run_on_cpu_data
));
87 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
89 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> 16);
90 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
92 static inline size_t tlb_n_entries(CPUTLBDescFast
*fast
)
94 return (fast
->mask
>> CPU_TLB_ENTRY_BITS
) + 1;
97 static inline size_t sizeof_tlb(CPUTLBDescFast
*fast
)
99 return fast
->mask
+ (1 << CPU_TLB_ENTRY_BITS
);
102 static inline uint64_t tlb_read_idx(const CPUTLBEntry
*entry
,
103 MMUAccessType access_type
)
105 /* Do not rearrange the CPUTLBEntry structure members. */
106 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry
, addr_read
) !=
107 MMU_DATA_LOAD
* sizeof(uint64_t));
108 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry
, addr_write
) !=
109 MMU_DATA_STORE
* sizeof(uint64_t));
110 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry
, addr_code
) !=
111 MMU_INST_FETCH
* sizeof(uint64_t));
113 #if TARGET_LONG_BITS == 32
114 /* Use qatomic_read, in case of addr_write; only care about low bits. */
115 const uint32_t *ptr
= (uint32_t *)&entry
->addr_idx
[access_type
];
116 ptr
+= HOST_BIG_ENDIAN
;
117 return qatomic_read(ptr
);
119 const uint64_t *ptr
= &entry
->addr_idx
[access_type
];
120 # if TCG_OVERSIZED_GUEST
123 /* ofs might correspond to .addr_write, so use qatomic_read */
124 return qatomic_read(ptr
);
129 static inline uint64_t tlb_addr_write(const CPUTLBEntry
*entry
)
131 return tlb_read_idx(entry
, MMU_DATA_STORE
);
134 /* Find the TLB index corresponding to the mmu_idx + address pair. */
135 static inline uintptr_t tlb_index(CPUState
*cpu
, uintptr_t mmu_idx
,
138 uintptr_t size_mask
= cpu
->neg
.tlb
.f
[mmu_idx
].mask
>> CPU_TLB_ENTRY_BITS
;
140 return (addr
>> TARGET_PAGE_BITS
) & size_mask
;
143 /* Find the TLB entry corresponding to the mmu_idx + address pair. */
144 static inline CPUTLBEntry
*tlb_entry(CPUState
*cpu
, uintptr_t mmu_idx
,
147 return &cpu
->neg
.tlb
.f
[mmu_idx
].table
[tlb_index(cpu
, mmu_idx
, addr
)];
150 static void tlb_window_reset(CPUTLBDesc
*desc
, int64_t ns
,
153 desc
->window_begin_ns
= ns
;
154 desc
->window_max_entries
= max_entries
;
157 static void tb_jmp_cache_clear_page(CPUState
*cpu
, vaddr page_addr
)
159 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
166 i0
= tb_jmp_cache_hash_page(page_addr
);
167 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
168 qatomic_set(&jc
->array
[i0
+ i
].tb
, NULL
);
173 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
174 * @desc: The CPUTLBDesc portion of the TLB
175 * @fast: The CPUTLBDescFast portion of the same TLB
177 * Called with tlb_lock_held.
179 * We have two main constraints when resizing a TLB: (1) we only resize it
180 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
181 * the array or unnecessarily flushing it), which means we do not control how
182 * frequently the resizing can occur; (2) we don't have access to the guest's
183 * future scheduling decisions, and therefore have to decide the magnitude of
184 * the resize based on past observations.
186 * In general, a memory-hungry process can benefit greatly from an appropriately
187 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
188 * we just have to make the TLB as large as possible; while an oversized TLB
189 * results in minimal TLB miss rates, it also takes longer to be flushed
190 * (flushes can be _very_ frequent), and the reduced locality can also hurt
193 * To achieve near-optimal performance for all kinds of workloads, we:
195 * 1. Aggressively increase the size of the TLB when the use rate of the
196 * TLB being flushed is high, since it is likely that in the near future this
197 * memory-hungry process will execute again, and its memory hungriness will
198 * probably be similar.
200 * 2. Slowly reduce the size of the TLB as the use rate declines over a
201 * reasonably large time window. The rationale is that if in such a time window
202 * we have not observed a high TLB use rate, it is likely that we won't observe
203 * it in the near future. In that case, once a time window expires we downsize
204 * the TLB to match the maximum use rate observed in the window.
206 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
207 * since in that range performance is likely near-optimal. Recall that the TLB
208 * is direct mapped, so we want the use rate to be low (or at least not too
209 * high), since otherwise we are likely to have a significant amount of
212 static void tlb_mmu_resize_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
,
215 size_t old_size
= tlb_n_entries(fast
);
217 size_t new_size
= old_size
;
218 int64_t window_len_ms
= 100;
219 int64_t window_len_ns
= window_len_ms
* 1000 * 1000;
220 bool window_expired
= now
> desc
->window_begin_ns
+ window_len_ns
;
222 if (desc
->n_used_entries
> desc
->window_max_entries
) {
223 desc
->window_max_entries
= desc
->n_used_entries
;
225 rate
= desc
->window_max_entries
* 100 / old_size
;
228 new_size
= MIN(old_size
<< 1, 1 << CPU_TLB_DYN_MAX_BITS
);
229 } else if (rate
< 30 && window_expired
) {
230 size_t ceil
= pow2ceil(desc
->window_max_entries
);
231 size_t expected_rate
= desc
->window_max_entries
* 100 / ceil
;
234 * Avoid undersizing when the max number of entries seen is just below
235 * a pow2. For instance, if max_entries == 1025, the expected use rate
236 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
237 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
238 * later. Thus, make sure that the expected use rate remains below 70%.
239 * (and since we double the size, that means the lowest rate we'd
240 * expect to get is 35%, which is still in the 30-70% range where
241 * we consider that the size is appropriate.)
243 if (expected_rate
> 70) {
246 new_size
= MAX(ceil
, 1 << CPU_TLB_DYN_MIN_BITS
);
249 if (new_size
== old_size
) {
250 if (window_expired
) {
251 tlb_window_reset(desc
, now
, desc
->n_used_entries
);
257 g_free(desc
->fulltlb
);
259 tlb_window_reset(desc
, now
, 0);
260 /* desc->n_used_entries is cleared by the caller */
261 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
262 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
263 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
266 * If the allocations fail, try smaller sizes. We just freed some
267 * memory, so going back to half of new_size has a good chance of working.
268 * Increased memory pressure elsewhere in the system might cause the
269 * allocations to fail though, so we progressively reduce the allocation
270 * size, aborting if we cannot even allocate the smallest TLB we support.
272 while (fast
->table
== NULL
|| desc
->fulltlb
== NULL
) {
273 if (new_size
== (1 << CPU_TLB_DYN_MIN_BITS
)) {
274 error_report("%s: %s", __func__
, strerror(errno
));
277 new_size
= MAX(new_size
>> 1, 1 << CPU_TLB_DYN_MIN_BITS
);
278 fast
->mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
281 g_free(desc
->fulltlb
);
282 fast
->table
= g_try_new(CPUTLBEntry
, new_size
);
283 desc
->fulltlb
= g_try_new(CPUTLBEntryFull
, new_size
);
287 static void tlb_mmu_flush_locked(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
)
289 desc
->n_used_entries
= 0;
290 desc
->large_page_addr
= -1;
291 desc
->large_page_mask
= -1;
293 memset(fast
->table
, -1, sizeof_tlb(fast
));
294 memset(desc
->vtable
, -1, sizeof(desc
->vtable
));
297 static void tlb_flush_one_mmuidx_locked(CPUState
*cpu
, int mmu_idx
,
300 CPUTLBDesc
*desc
= &cpu
->neg
.tlb
.d
[mmu_idx
];
301 CPUTLBDescFast
*fast
= &cpu
->neg
.tlb
.f
[mmu_idx
];
303 tlb_mmu_resize_locked(desc
, fast
, now
);
304 tlb_mmu_flush_locked(desc
, fast
);
307 static void tlb_mmu_init(CPUTLBDesc
*desc
, CPUTLBDescFast
*fast
, int64_t now
)
309 size_t n_entries
= 1 << CPU_TLB_DYN_DEFAULT_BITS
;
311 tlb_window_reset(desc
, now
, 0);
312 desc
->n_used_entries
= 0;
313 fast
->mask
= (n_entries
- 1) << CPU_TLB_ENTRY_BITS
;
314 fast
->table
= g_new(CPUTLBEntry
, n_entries
);
315 desc
->fulltlb
= g_new(CPUTLBEntryFull
, n_entries
);
316 tlb_mmu_flush_locked(desc
, fast
);
319 static inline void tlb_n_used_entries_inc(CPUState
*cpu
, uintptr_t mmu_idx
)
321 cpu
->neg
.tlb
.d
[mmu_idx
].n_used_entries
++;
324 static inline void tlb_n_used_entries_dec(CPUState
*cpu
, uintptr_t mmu_idx
)
326 cpu
->neg
.tlb
.d
[mmu_idx
].n_used_entries
--;
329 void tlb_init(CPUState
*cpu
)
331 int64_t now
= get_clock_realtime();
334 qemu_spin_init(&cpu
->neg
.tlb
.c
.lock
);
336 /* All tlbs are initialized flushed. */
337 cpu
->neg
.tlb
.c
.dirty
= 0;
339 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
340 tlb_mmu_init(&cpu
->neg
.tlb
.d
[i
], &cpu
->neg
.tlb
.f
[i
], now
);
344 void tlb_destroy(CPUState
*cpu
)
348 qemu_spin_destroy(&cpu
->neg
.tlb
.c
.lock
);
349 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
350 CPUTLBDesc
*desc
= &cpu
->neg
.tlb
.d
[i
];
351 CPUTLBDescFast
*fast
= &cpu
->neg
.tlb
.f
[i
];
354 g_free(desc
->fulltlb
);
358 /* flush_all_helper: run fn across all cpus
360 * If the wait flag is set then the src cpu's helper will be queued as
361 * "safe" work and the loop exited creating a synchronisation point
362 * where all queued work will be finished before execution starts
365 static void flush_all_helper(CPUState
*src
, run_on_cpu_func fn
,
372 async_run_on_cpu(cpu
, fn
, d
);
377 static void tlb_flush_by_mmuidx_async_work(CPUState
*cpu
, run_on_cpu_data data
)
379 uint16_t asked
= data
.host_int
;
380 uint16_t all_dirty
, work
, to_clean
;
381 int64_t now
= get_clock_realtime();
383 assert_cpu_is_self(cpu
);
385 tlb_debug("mmu_idx:0x%04" PRIx16
"\n", asked
);
387 qemu_spin_lock(&cpu
->neg
.tlb
.c
.lock
);
389 all_dirty
= cpu
->neg
.tlb
.c
.dirty
;
390 to_clean
= asked
& all_dirty
;
391 all_dirty
&= ~to_clean
;
392 cpu
->neg
.tlb
.c
.dirty
= all_dirty
;
394 for (work
= to_clean
; work
!= 0; work
&= work
- 1) {
395 int mmu_idx
= ctz32(work
);
396 tlb_flush_one_mmuidx_locked(cpu
, mmu_idx
, now
);
399 qemu_spin_unlock(&cpu
->neg
.tlb
.c
.lock
);
401 tcg_flush_jmp_cache(cpu
);
403 if (to_clean
== ALL_MMUIDX_BITS
) {
404 qatomic_set(&cpu
->neg
.tlb
.c
.full_flush_count
,
405 cpu
->neg
.tlb
.c
.full_flush_count
+ 1);
407 qatomic_set(&cpu
->neg
.tlb
.c
.part_flush_count
,
408 cpu
->neg
.tlb
.c
.part_flush_count
+ ctpop16(to_clean
));
409 if (to_clean
!= asked
) {
410 qatomic_set(&cpu
->neg
.tlb
.c
.elide_flush_count
,
411 cpu
->neg
.tlb
.c
.elide_flush_count
+
412 ctpop16(asked
& ~to_clean
));
417 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
419 tlb_debug("mmu_idx: 0x%" PRIx16
"\n", idxmap
);
421 assert_cpu_is_self(cpu
);
423 tlb_flush_by_mmuidx_async_work(cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
426 void tlb_flush(CPUState
*cpu
)
428 tlb_flush_by_mmuidx(cpu
, ALL_MMUIDX_BITS
);
431 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
, uint16_t idxmap
)
433 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
435 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
437 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
438 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
441 void tlb_flush_all_cpus_synced(CPUState
*src_cpu
)
443 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, ALL_MMUIDX_BITS
);
446 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry
*tlb_entry
,
447 vaddr page
, vaddr mask
)
450 mask
&= TARGET_PAGE_MASK
| TLB_INVALID_MASK
;
452 return (page
== (tlb_entry
->addr_read
& mask
) ||
453 page
== (tlb_addr_write(tlb_entry
) & mask
) ||
454 page
== (tlb_entry
->addr_code
& mask
));
457 static inline bool tlb_hit_page_anyprot(CPUTLBEntry
*tlb_entry
, vaddr page
)
459 return tlb_hit_page_mask_anyprot(tlb_entry
, page
, -1);
463 * tlb_entry_is_empty - return true if the entry is not in use
464 * @te: pointer to CPUTLBEntry
466 static inline bool tlb_entry_is_empty(const CPUTLBEntry
*te
)
468 return te
->addr_read
== -1 && te
->addr_write
== -1 && te
->addr_code
== -1;
471 /* Called with tlb_c.lock held */
472 static bool tlb_flush_entry_mask_locked(CPUTLBEntry
*tlb_entry
,
476 if (tlb_hit_page_mask_anyprot(tlb_entry
, page
, mask
)) {
477 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
483 static inline bool tlb_flush_entry_locked(CPUTLBEntry
*tlb_entry
, vaddr page
)
485 return tlb_flush_entry_mask_locked(tlb_entry
, page
, -1);
488 /* Called with tlb_c.lock held */
489 static void tlb_flush_vtlb_page_mask_locked(CPUState
*cpu
, int mmu_idx
,
493 CPUTLBDesc
*d
= &cpu
->neg
.tlb
.d
[mmu_idx
];
496 assert_cpu_is_self(cpu
);
497 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
498 if (tlb_flush_entry_mask_locked(&d
->vtable
[k
], page
, mask
)) {
499 tlb_n_used_entries_dec(cpu
, mmu_idx
);
504 static inline void tlb_flush_vtlb_page_locked(CPUState
*cpu
, int mmu_idx
,
507 tlb_flush_vtlb_page_mask_locked(cpu
, mmu_idx
, page
, -1);
510 static void tlb_flush_page_locked(CPUState
*cpu
, int midx
, vaddr page
)
512 vaddr lp_addr
= cpu
->neg
.tlb
.d
[midx
].large_page_addr
;
513 vaddr lp_mask
= cpu
->neg
.tlb
.d
[midx
].large_page_mask
;
515 /* Check if we need to flush due to large pages. */
516 if ((page
& lp_mask
) == lp_addr
) {
517 tlb_debug("forcing full flush midx %d (%016"
518 VADDR_PRIx
"/%016" VADDR_PRIx
")\n",
519 midx
, lp_addr
, lp_mask
);
520 tlb_flush_one_mmuidx_locked(cpu
, midx
, get_clock_realtime());
522 if (tlb_flush_entry_locked(tlb_entry(cpu
, midx
, page
), page
)) {
523 tlb_n_used_entries_dec(cpu
, midx
);
525 tlb_flush_vtlb_page_locked(cpu
, midx
, page
);
530 * tlb_flush_page_by_mmuidx_async_0:
531 * @cpu: cpu on which to flush
532 * @addr: page of virtual address to flush
533 * @idxmap: set of mmu_idx to flush
535 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
536 * at @addr from the tlbs indicated by @idxmap from @cpu.
538 static void tlb_flush_page_by_mmuidx_async_0(CPUState
*cpu
,
544 assert_cpu_is_self(cpu
);
546 tlb_debug("page addr: %016" VADDR_PRIx
" mmu_map:0x%x\n", addr
, idxmap
);
548 qemu_spin_lock(&cpu
->neg
.tlb
.c
.lock
);
549 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
550 if ((idxmap
>> mmu_idx
) & 1) {
551 tlb_flush_page_locked(cpu
, mmu_idx
, addr
);
554 qemu_spin_unlock(&cpu
->neg
.tlb
.c
.lock
);
557 * Discard jump cache entries for any tb which might potentially
558 * overlap the flushed page, which includes the previous.
560 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
561 tb_jmp_cache_clear_page(cpu
, addr
);
565 * tlb_flush_page_by_mmuidx_async_1:
566 * @cpu: cpu on which to flush
567 * @data: encoded addr + idxmap
569 * Helper for tlb_flush_page_by_mmuidx and friends, called through
570 * async_run_on_cpu. The idxmap parameter is encoded in the page
571 * offset of the target_ptr field. This limits the set of mmu_idx
572 * that can be passed via this method.
574 static void tlb_flush_page_by_mmuidx_async_1(CPUState
*cpu
,
575 run_on_cpu_data data
)
577 vaddr addr_and_idxmap
= data
.target_ptr
;
578 vaddr addr
= addr_and_idxmap
& TARGET_PAGE_MASK
;
579 uint16_t idxmap
= addr_and_idxmap
& ~TARGET_PAGE_MASK
;
581 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
587 } TLBFlushPageByMMUIdxData
;
590 * tlb_flush_page_by_mmuidx_async_2:
591 * @cpu: cpu on which to flush
592 * @data: allocated addr + idxmap
594 * Helper for tlb_flush_page_by_mmuidx and friends, called through
595 * async_run_on_cpu. The addr+idxmap parameters are stored in a
596 * TLBFlushPageByMMUIdxData structure that has been allocated
597 * specifically for this helper. Free the structure when done.
599 static void tlb_flush_page_by_mmuidx_async_2(CPUState
*cpu
,
600 run_on_cpu_data data
)
602 TLBFlushPageByMMUIdxData
*d
= data
.host_ptr
;
604 tlb_flush_page_by_mmuidx_async_0(cpu
, d
->addr
, d
->idxmap
);
608 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, vaddr addr
, uint16_t idxmap
)
610 tlb_debug("addr: %016" VADDR_PRIx
" mmu_idx:%" PRIx16
"\n", addr
, idxmap
);
612 assert_cpu_is_self(cpu
);
614 /* This should already be page aligned */
615 addr
&= TARGET_PAGE_MASK
;
617 tlb_flush_page_by_mmuidx_async_0(cpu
, addr
, idxmap
);
620 void tlb_flush_page(CPUState
*cpu
, vaddr addr
)
622 tlb_flush_page_by_mmuidx(cpu
, addr
, ALL_MMUIDX_BITS
);
625 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
629 tlb_debug("addr: %016" VADDR_PRIx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
631 /* This should already be page aligned */
632 addr
&= TARGET_PAGE_MASK
;
635 * Allocate memory to hold addr+idxmap only when needed.
636 * See tlb_flush_page_by_mmuidx for details.
638 if (idxmap
< TARGET_PAGE_SIZE
) {
639 flush_all_helper(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
640 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
641 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_1
,
642 RUN_ON_CPU_TARGET_PTR(addr
| idxmap
));
645 TLBFlushPageByMMUIdxData
*d
;
647 /* Allocate a separate data block for each destination cpu. */
648 CPU_FOREACH(dst_cpu
) {
649 if (dst_cpu
!= src_cpu
) {
650 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
653 async_run_on_cpu(dst_cpu
, tlb_flush_page_by_mmuidx_async_2
,
654 RUN_ON_CPU_HOST_PTR(d
));
658 d
= g_new(TLBFlushPageByMMUIdxData
, 1);
661 async_safe_run_on_cpu(src_cpu
, tlb_flush_page_by_mmuidx_async_2
,
662 RUN_ON_CPU_HOST_PTR(d
));
666 void tlb_flush_page_all_cpus_synced(CPUState
*src
, vaddr addr
)
668 tlb_flush_page_by_mmuidx_all_cpus_synced(src
, addr
, ALL_MMUIDX_BITS
);
671 static void tlb_flush_range_locked(CPUState
*cpu
, int midx
,
672 vaddr addr
, vaddr len
,
675 CPUTLBDesc
*d
= &cpu
->neg
.tlb
.d
[midx
];
676 CPUTLBDescFast
*f
= &cpu
->neg
.tlb
.f
[midx
];
677 vaddr mask
= MAKE_64BIT_MASK(0, bits
);
680 * If @bits is smaller than the tlb size, there may be multiple entries
681 * within the TLB; otherwise all addresses that match under @mask hit
682 * the same TLB entry.
683 * TODO: Perhaps allow bits to be a few bits less than the size.
684 * For now, just flush the entire TLB.
686 * If @len is larger than the tlb size, then it will take longer to
687 * test all of the entries in the TLB than it will to flush it all.
689 if (mask
< f
->mask
|| len
> f
->mask
) {
690 tlb_debug("forcing full flush midx %d ("
691 "%016" VADDR_PRIx
"/%016" VADDR_PRIx
"+%016" VADDR_PRIx
")\n",
692 midx
, addr
, mask
, len
);
693 tlb_flush_one_mmuidx_locked(cpu
, midx
, get_clock_realtime());
698 * Check if we need to flush due to large pages.
699 * Because large_page_mask contains all 1's from the msb,
700 * we only need to test the end of the range.
702 if (((addr
+ len
- 1) & d
->large_page_mask
) == d
->large_page_addr
) {
703 tlb_debug("forcing full flush midx %d ("
704 "%016" VADDR_PRIx
"/%016" VADDR_PRIx
")\n",
705 midx
, d
->large_page_addr
, d
->large_page_mask
);
706 tlb_flush_one_mmuidx_locked(cpu
, midx
, get_clock_realtime());
710 for (vaddr i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
711 vaddr page
= addr
+ i
;
712 CPUTLBEntry
*entry
= tlb_entry(cpu
, midx
, page
);
714 if (tlb_flush_entry_mask_locked(entry
, page
, mask
)) {
715 tlb_n_used_entries_dec(cpu
, midx
);
717 tlb_flush_vtlb_page_mask_locked(cpu
, midx
, page
, mask
);
728 static void tlb_flush_range_by_mmuidx_async_0(CPUState
*cpu
,
733 assert_cpu_is_self(cpu
);
735 tlb_debug("range: %016" VADDR_PRIx
"/%u+%016" VADDR_PRIx
" mmu_map:0x%x\n",
736 d
.addr
, d
.bits
, d
.len
, d
.idxmap
);
738 qemu_spin_lock(&cpu
->neg
.tlb
.c
.lock
);
739 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
740 if ((d
.idxmap
>> mmu_idx
) & 1) {
741 tlb_flush_range_locked(cpu
, mmu_idx
, d
.addr
, d
.len
, d
.bits
);
744 qemu_spin_unlock(&cpu
->neg
.tlb
.c
.lock
);
747 * If the length is larger than the jump cache size, then it will take
748 * longer to clear each entry individually than it will to clear it all.
750 if (d
.len
>= (TARGET_PAGE_SIZE
* TB_JMP_CACHE_SIZE
)) {
751 tcg_flush_jmp_cache(cpu
);
756 * Discard jump cache entries for any tb which might potentially
757 * overlap the flushed pages, which includes the previous.
759 d
.addr
-= TARGET_PAGE_SIZE
;
760 for (vaddr i
= 0, n
= d
.len
/ TARGET_PAGE_SIZE
+ 1; i
< n
; i
++) {
761 tb_jmp_cache_clear_page(cpu
, d
.addr
);
762 d
.addr
+= TARGET_PAGE_SIZE
;
766 static void tlb_flush_range_by_mmuidx_async_1(CPUState
*cpu
,
767 run_on_cpu_data data
)
769 TLBFlushRangeData
*d
= data
.host_ptr
;
770 tlb_flush_range_by_mmuidx_async_0(cpu
, *d
);
774 void tlb_flush_range_by_mmuidx(CPUState
*cpu
, vaddr addr
,
775 vaddr len
, uint16_t idxmap
,
780 assert_cpu_is_self(cpu
);
783 * If all bits are significant, and len is small,
784 * this devolves to tlb_flush_page.
786 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
787 tlb_flush_page_by_mmuidx(cpu
, addr
, idxmap
);
790 /* If no page bits are significant, this devolves to tlb_flush. */
791 if (bits
< TARGET_PAGE_BITS
) {
792 tlb_flush_by_mmuidx(cpu
, idxmap
);
796 /* This should already be page aligned */
797 d
.addr
= addr
& TARGET_PAGE_MASK
;
802 tlb_flush_range_by_mmuidx_async_0(cpu
, d
);
805 void tlb_flush_page_bits_by_mmuidx(CPUState
*cpu
, vaddr addr
,
806 uint16_t idxmap
, unsigned bits
)
808 tlb_flush_range_by_mmuidx(cpu
, addr
, TARGET_PAGE_SIZE
, idxmap
, bits
);
811 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
817 TLBFlushRangeData d
, *p
;
821 * If all bits are significant, and len is small,
822 * this devolves to tlb_flush_page.
824 if (bits
>= TARGET_LONG_BITS
&& len
<= TARGET_PAGE_SIZE
) {
825 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu
, addr
, idxmap
);
828 /* If no page bits are significant, this devolves to tlb_flush. */
829 if (bits
< TARGET_PAGE_BITS
) {
830 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, idxmap
);
834 /* This should already be page aligned */
835 d
.addr
= addr
& TARGET_PAGE_MASK
;
840 /* Allocate a separate data block for each destination cpu. */
841 CPU_FOREACH(dst_cpu
) {
842 if (dst_cpu
!= src_cpu
) {
843 p
= g_memdup(&d
, sizeof(d
));
844 async_run_on_cpu(dst_cpu
, tlb_flush_range_by_mmuidx_async_1
,
845 RUN_ON_CPU_HOST_PTR(p
));
849 p
= g_memdup(&d
, sizeof(d
));
850 async_safe_run_on_cpu(src_cpu
, tlb_flush_range_by_mmuidx_async_1
,
851 RUN_ON_CPU_HOST_PTR(p
));
854 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
859 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu
, addr
, TARGET_PAGE_SIZE
,
863 /* update the TLBs so that writes to code in the virtual page 'addr'
865 void tlb_protect_code(ram_addr_t ram_addr
)
867 cpu_physical_memory_test_and_clear_dirty(ram_addr
& TARGET_PAGE_MASK
,
872 /* update the TLB so that writes in physical page 'phys_addr' are no longer
873 tested for self modifying code */
874 void tlb_unprotect_code(ram_addr_t ram_addr
)
876 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
881 * Dirty write flag handling
883 * When the TCG code writes to a location it looks up the address in
884 * the TLB and uses that data to compute the final address. If any of
885 * the lower bits of the address are set then the slow path is forced.
886 * There are a number of reasons to do this but for normal RAM the
887 * most usual is detecting writes to code regions which may invalidate
890 * Other vCPUs might be reading their TLBs during guest execution, so we update
891 * te->addr_write with qatomic_set. We don't need to worry about this for
892 * oversized guests as MTTCG is disabled for them.
894 * Called with tlb_c.lock held.
896 static void tlb_reset_dirty_range_locked(CPUTLBEntry
*tlb_entry
,
897 uintptr_t start
, uintptr_t length
)
899 uintptr_t addr
= tlb_entry
->addr_write
;
901 if ((addr
& (TLB_INVALID_MASK
| TLB_MMIO
|
902 TLB_DISCARD_WRITE
| TLB_NOTDIRTY
)) == 0) {
903 addr
&= TARGET_PAGE_MASK
;
904 addr
+= tlb_entry
->addend
;
905 if ((addr
- start
) < length
) {
906 #if TARGET_LONG_BITS == 32
907 uint32_t *ptr_write
= (uint32_t *)&tlb_entry
->addr_write
;
908 ptr_write
+= HOST_BIG_ENDIAN
;
909 qatomic_set(ptr_write
, *ptr_write
| TLB_NOTDIRTY
);
910 #elif TCG_OVERSIZED_GUEST
911 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
913 qatomic_set(&tlb_entry
->addr_write
,
914 tlb_entry
->addr_write
| TLB_NOTDIRTY
);
921 * Called with tlb_c.lock held.
922 * Called only from the vCPU context, i.e. the TLB's owner thread.
924 static inline void copy_tlb_helper_locked(CPUTLBEntry
*d
, const CPUTLBEntry
*s
)
929 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
931 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
932 * thing actually updated is the target TLB entry ->addr_write flags.
934 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
938 qemu_spin_lock(&cpu
->neg
.tlb
.c
.lock
);
939 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
941 unsigned int n
= tlb_n_entries(&cpu
->neg
.tlb
.f
[mmu_idx
]);
943 for (i
= 0; i
< n
; i
++) {
944 tlb_reset_dirty_range_locked(&cpu
->neg
.tlb
.f
[mmu_idx
].table
[i
],
948 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
949 tlb_reset_dirty_range_locked(&cpu
->neg
.tlb
.d
[mmu_idx
].vtable
[i
],
953 qemu_spin_unlock(&cpu
->neg
.tlb
.c
.lock
);
956 /* Called with tlb_c.lock held */
957 static inline void tlb_set_dirty1_locked(CPUTLBEntry
*tlb_entry
,
960 if (tlb_entry
->addr_write
== (addr
| TLB_NOTDIRTY
)) {
961 tlb_entry
->addr_write
= addr
;
965 /* update the TLB corresponding to virtual page vaddr
966 so that it is no longer dirty */
967 static void tlb_set_dirty(CPUState
*cpu
, vaddr addr
)
971 assert_cpu_is_self(cpu
);
973 addr
&= TARGET_PAGE_MASK
;
974 qemu_spin_lock(&cpu
->neg
.tlb
.c
.lock
);
975 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
976 tlb_set_dirty1_locked(tlb_entry(cpu
, mmu_idx
, addr
), addr
);
979 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
981 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
982 tlb_set_dirty1_locked(&cpu
->neg
.tlb
.d
[mmu_idx
].vtable
[k
], addr
);
985 qemu_spin_unlock(&cpu
->neg
.tlb
.c
.lock
);
988 /* Our TLB does not support large pages, so remember the area covered by
989 large pages and trigger a full TLB flush if these are invalidated. */
990 static void tlb_add_large_page(CPUState
*cpu
, int mmu_idx
,
991 vaddr addr
, uint64_t size
)
993 vaddr lp_addr
= cpu
->neg
.tlb
.d
[mmu_idx
].large_page_addr
;
994 vaddr lp_mask
= ~(size
- 1);
996 if (lp_addr
== (vaddr
)-1) {
997 /* No previous large page. */
1000 /* Extend the existing region to include the new page.
1001 This is a compromise between unnecessary flushes and
1002 the cost of maintaining a full variable size TLB. */
1003 lp_mask
&= cpu
->neg
.tlb
.d
[mmu_idx
].large_page_mask
;
1004 while (((lp_addr
^ addr
) & lp_mask
) != 0) {
1008 cpu
->neg
.tlb
.d
[mmu_idx
].large_page_addr
= lp_addr
& lp_mask
;
1009 cpu
->neg
.tlb
.d
[mmu_idx
].large_page_mask
= lp_mask
;
1012 static inline void tlb_set_compare(CPUTLBEntryFull
*full
, CPUTLBEntry
*ent
,
1013 vaddr address
, int flags
,
1014 MMUAccessType access_type
, bool enable
)
1017 address
|= flags
& TLB_FLAGS_MASK
;
1018 flags
&= TLB_SLOW_FLAGS_MASK
;
1020 address
|= TLB_FORCE_SLOW
;
1026 ent
->addr_idx
[access_type
] = address
;
1027 full
->slow_flags
[access_type
] = flags
;
1031 * Add a new TLB entry. At most one entry for a given virtual address
1032 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1033 * supplied size is only used by tlb_flush_page.
1035 * Called from TCG-generated code, which is under an RCU read-side
1038 void tlb_set_page_full(CPUState
*cpu
, int mmu_idx
,
1039 vaddr addr
, CPUTLBEntryFull
*full
)
1041 CPUTLB
*tlb
= &cpu
->neg
.tlb
;
1042 CPUTLBDesc
*desc
= &tlb
->d
[mmu_idx
];
1043 MemoryRegionSection
*section
;
1044 unsigned int index
, read_flags
, write_flags
;
1046 CPUTLBEntry
*te
, tn
;
1047 hwaddr iotlb
, xlat
, sz
, paddr_page
;
1049 int asidx
, wp_flags
, prot
;
1050 bool is_ram
, is_romd
;
1052 assert_cpu_is_self(cpu
);
1054 if (full
->lg_page_size
<= TARGET_PAGE_BITS
) {
1055 sz
= TARGET_PAGE_SIZE
;
1057 sz
= (hwaddr
)1 << full
->lg_page_size
;
1058 tlb_add_large_page(cpu
, mmu_idx
, addr
, sz
);
1060 addr_page
= addr
& TARGET_PAGE_MASK
;
1061 paddr_page
= full
->phys_addr
& TARGET_PAGE_MASK
;
1064 asidx
= cpu_asidx_from_attrs(cpu
, full
->attrs
);
1065 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr_page
,
1066 &xlat
, &sz
, full
->attrs
, &prot
);
1067 assert(sz
>= TARGET_PAGE_SIZE
);
1069 tlb_debug("vaddr=%016" VADDR_PRIx
" paddr=0x" HWADDR_FMT_plx
1070 " prot=%x idx=%d\n",
1071 addr
, full
->phys_addr
, prot
, mmu_idx
);
1073 read_flags
= full
->tlb_fill_flags
;
1074 if (full
->lg_page_size
< TARGET_PAGE_BITS
) {
1075 /* Repeat the MMU check and TLB fill on every access. */
1076 read_flags
|= TLB_INVALID_MASK
;
1079 is_ram
= memory_region_is_ram(section
->mr
);
1080 is_romd
= memory_region_is_romd(section
->mr
);
1082 if (is_ram
|| is_romd
) {
1083 /* RAM and ROMD both have associated host memory. */
1084 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
1086 /* I/O does not; force the host address to NULL. */
1090 write_flags
= read_flags
;
1092 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1093 assert(!(iotlb
& ~TARGET_PAGE_MASK
));
1095 * Computing is_clean is expensive; avoid all that unless
1096 * the page is actually writable.
1098 if (prot
& PAGE_WRITE
) {
1099 if (section
->readonly
) {
1100 write_flags
|= TLB_DISCARD_WRITE
;
1101 } else if (cpu_physical_memory_is_clean(iotlb
)) {
1102 write_flags
|= TLB_NOTDIRTY
;
1107 iotlb
= memory_region_section_get_iotlb(cpu
, section
) + xlat
;
1109 * Writes to romd devices must go through MMIO to enable write.
1110 * Reads to romd devices go through the ram_ptr found above,
1111 * but of course reads to I/O must go through MMIO.
1113 write_flags
|= TLB_MMIO
;
1115 read_flags
= write_flags
;
1119 wp_flags
= cpu_watchpoint_address_matches(cpu
, addr_page
,
1122 index
= tlb_index(cpu
, mmu_idx
, addr_page
);
1123 te
= tlb_entry(cpu
, mmu_idx
, addr_page
);
1126 * Hold the TLB lock for the rest of the function. We could acquire/release
1127 * the lock several times in the function, but it is faster to amortize the
1128 * acquisition cost by acquiring it just once. Note that this leads to
1129 * a longer critical section, but this is not a concern since the TLB lock
1130 * is unlikely to be contended.
1132 qemu_spin_lock(&tlb
->c
.lock
);
1134 /* Note that the tlb is no longer clean. */
1135 tlb
->c
.dirty
|= 1 << mmu_idx
;
1137 /* Make sure there's no cached translation for the new page. */
1138 tlb_flush_vtlb_page_locked(cpu
, mmu_idx
, addr_page
);
1141 * Only evict the old entry to the victim tlb if it's for a
1142 * different page; otherwise just overwrite the stale data.
1144 if (!tlb_hit_page_anyprot(te
, addr_page
) && !tlb_entry_is_empty(te
)) {
1145 unsigned vidx
= desc
->vindex
++ % CPU_VTLB_SIZE
;
1146 CPUTLBEntry
*tv
= &desc
->vtable
[vidx
];
1148 /* Evict the old entry into the victim tlb. */
1149 copy_tlb_helper_locked(tv
, te
);
1150 desc
->vfulltlb
[vidx
] = desc
->fulltlb
[index
];
1151 tlb_n_used_entries_dec(cpu
, mmu_idx
);
1154 /* refill the tlb */
1156 * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1157 * aligned ram_addr_t of the page base of the target RAM.
1158 * Otherwise, iotlb contains
1159 * - a physical section number in the lower TARGET_PAGE_BITS
1160 * - the offset within section->mr of the page base (I/O, ROMD) with the
1161 * TARGET_PAGE_BITS masked off.
1162 * We subtract addr_page (which is page aligned and thus won't
1163 * disturb the low bits) to give an offset which can be added to the
1164 * (non-page-aligned) vaddr of the eventual memory access to get
1165 * the MemoryRegion offset for the access. Note that the vaddr we
1166 * subtract here is that of the page base, and not the same as the
1167 * vaddr we add back in io_prepare()/get_page_addr_code().
1169 desc
->fulltlb
[index
] = *full
;
1170 full
= &desc
->fulltlb
[index
];
1171 full
->xlat_section
= iotlb
- addr_page
;
1172 full
->phys_addr
= paddr_page
;
1174 /* Now calculate the new entry */
1175 tn
.addend
= addend
- addr_page
;
1177 tlb_set_compare(full
, &tn
, addr_page
, read_flags
,
1178 MMU_INST_FETCH
, prot
& PAGE_EXEC
);
1180 if (wp_flags
& BP_MEM_READ
) {
1181 read_flags
|= TLB_WATCHPOINT
;
1183 tlb_set_compare(full
, &tn
, addr_page
, read_flags
,
1184 MMU_DATA_LOAD
, prot
& PAGE_READ
);
1186 if (prot
& PAGE_WRITE_INV
) {
1187 write_flags
|= TLB_INVALID_MASK
;
1189 if (wp_flags
& BP_MEM_WRITE
) {
1190 write_flags
|= TLB_WATCHPOINT
;
1192 tlb_set_compare(full
, &tn
, addr_page
, write_flags
,
1193 MMU_DATA_STORE
, prot
& PAGE_WRITE
);
1195 copy_tlb_helper_locked(te
, &tn
);
1196 tlb_n_used_entries_inc(cpu
, mmu_idx
);
1197 qemu_spin_unlock(&tlb
->c
.lock
);
1200 void tlb_set_page_with_attrs(CPUState
*cpu
, vaddr addr
,
1201 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
1202 int mmu_idx
, uint64_t size
)
1204 CPUTLBEntryFull full
= {
1208 .lg_page_size
= ctz64(size
)
1211 assert(is_power_of_2(size
));
1212 tlb_set_page_full(cpu
, mmu_idx
, addr
, &full
);
1215 void tlb_set_page(CPUState
*cpu
, vaddr addr
,
1216 hwaddr paddr
, int prot
,
1217 int mmu_idx
, uint64_t size
)
1219 tlb_set_page_with_attrs(cpu
, addr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
1220 prot
, mmu_idx
, size
);
1224 * Note: tlb_fill_align() can trigger a resize of the TLB.
1225 * This means that all of the caller's prior references to the TLB table
1226 * (e.g. CPUTLBEntry pointers) must be discarded and looked up again
1227 * (e.g. via tlb_entry()).
1229 static bool tlb_fill_align(CPUState
*cpu
, vaddr addr
, MMUAccessType type
,
1230 int mmu_idx
, MemOp memop
, int size
,
1231 bool probe
, uintptr_t ra
)
1233 const TCGCPUOps
*ops
= cpu
->cc
->tcg_ops
;
1234 CPUTLBEntryFull full
;
1236 if (ops
->tlb_fill_align
) {
1237 if (ops
->tlb_fill_align(cpu
, &full
, addr
, type
, mmu_idx
,
1238 memop
, size
, probe
, ra
)) {
1239 tlb_set_page_full(cpu
, mmu_idx
, addr
, &full
);
1243 /* Legacy behaviour is alignment before paging. */
1244 if (addr
& ((1u << memop_alignment_bits(memop
)) - 1)) {
1245 ops
->do_unaligned_access(cpu
, addr
, type
, mmu_idx
, ra
);
1247 if (ops
->tlb_fill(cpu
, addr
, size
, type
, mmu_idx
, probe
, ra
)) {
1255 static inline void cpu_unaligned_access(CPUState
*cpu
, vaddr addr
,
1256 MMUAccessType access_type
,
1257 int mmu_idx
, uintptr_t retaddr
)
1259 cpu
->cc
->tcg_ops
->do_unaligned_access(cpu
, addr
, access_type
,
1263 static MemoryRegionSection
*
1264 io_prepare(hwaddr
*out_offset
, CPUState
*cpu
, hwaddr xlat
,
1265 MemTxAttrs attrs
, vaddr addr
, uintptr_t retaddr
)
1267 MemoryRegionSection
*section
;
1270 section
= iotlb_to_section(cpu
, xlat
, attrs
);
1271 mr_offset
= (xlat
& TARGET_PAGE_MASK
) + addr
;
1272 cpu
->mem_io_pc
= retaddr
;
1273 if (!cpu
->neg
.can_do_io
) {
1274 cpu_io_recompile(cpu
, retaddr
);
1277 *out_offset
= mr_offset
;
1281 static void io_failed(CPUState
*cpu
, CPUTLBEntryFull
*full
, vaddr addr
,
1282 unsigned size
, MMUAccessType access_type
, int mmu_idx
,
1283 MemTxResult response
, uintptr_t retaddr
)
1285 if (!cpu
->ignore_memory_transaction_failures
1286 && cpu
->cc
->tcg_ops
->do_transaction_failed
) {
1287 hwaddr physaddr
= full
->phys_addr
| (addr
& ~TARGET_PAGE_MASK
);
1289 cpu
->cc
->tcg_ops
->do_transaction_failed(cpu
, physaddr
, addr
, size
,
1290 access_type
, mmu_idx
,
1291 full
->attrs
, response
, retaddr
);
1295 /* Return true if ADDR is present in the victim tlb, and has been copied
1296 back to the main tlb. */
1297 static bool victim_tlb_hit(CPUState
*cpu
, size_t mmu_idx
, size_t index
,
1298 MMUAccessType access_type
, vaddr page
)
1302 assert_cpu_is_self(cpu
);
1303 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
1304 CPUTLBEntry
*vtlb
= &cpu
->neg
.tlb
.d
[mmu_idx
].vtable
[vidx
];
1305 uint64_t cmp
= tlb_read_idx(vtlb
, access_type
);
1308 /* Found entry in victim tlb, swap tlb and iotlb. */
1309 CPUTLBEntry tmptlb
, *tlb
= &cpu
->neg
.tlb
.f
[mmu_idx
].table
[index
];
1311 qemu_spin_lock(&cpu
->neg
.tlb
.c
.lock
);
1312 copy_tlb_helper_locked(&tmptlb
, tlb
);
1313 copy_tlb_helper_locked(tlb
, vtlb
);
1314 copy_tlb_helper_locked(vtlb
, &tmptlb
);
1315 qemu_spin_unlock(&cpu
->neg
.tlb
.c
.lock
);
1317 CPUTLBEntryFull
*f1
= &cpu
->neg
.tlb
.d
[mmu_idx
].fulltlb
[index
];
1318 CPUTLBEntryFull
*f2
= &cpu
->neg
.tlb
.d
[mmu_idx
].vfulltlb
[vidx
];
1319 CPUTLBEntryFull tmpf
;
1320 tmpf
= *f1
; *f1
= *f2
; *f2
= tmpf
;
1327 static void notdirty_write(CPUState
*cpu
, vaddr mem_vaddr
, unsigned size
,
1328 CPUTLBEntryFull
*full
, uintptr_t retaddr
)
1330 ram_addr_t ram_addr
= mem_vaddr
+ full
->xlat_section
;
1332 trace_memory_notdirty_write_access(mem_vaddr
, ram_addr
, size
);
1334 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1335 tb_invalidate_phys_range_fast(ram_addr
, size
, retaddr
);
1339 * Set both VGA and migration bits for simplicity and to remove
1340 * the notdirty callback faster.
1342 cpu_physical_memory_set_dirty_range(ram_addr
, size
, DIRTY_CLIENTS_NOCODE
);
1344 /* We remove the notdirty callback only if the code has been flushed. */
1345 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1346 trace_memory_notdirty_set_dirty(mem_vaddr
);
1347 tlb_set_dirty(cpu
, mem_vaddr
);
1351 static int probe_access_internal(CPUState
*cpu
, vaddr addr
,
1352 int fault_size
, MMUAccessType access_type
,
1353 int mmu_idx
, bool nonfault
,
1354 void **phost
, CPUTLBEntryFull
**pfull
,
1355 uintptr_t retaddr
, bool check_mem_cbs
)
1357 uintptr_t index
= tlb_index(cpu
, mmu_idx
, addr
);
1358 CPUTLBEntry
*entry
= tlb_entry(cpu
, mmu_idx
, addr
);
1359 uint64_t tlb_addr
= tlb_read_idx(entry
, access_type
);
1360 vaddr page_addr
= addr
& TARGET_PAGE_MASK
;
1361 int flags
= TLB_FLAGS_MASK
& ~TLB_FORCE_SLOW
;
1362 bool force_mmio
= check_mem_cbs
&& cpu_plugin_mem_cbs_enabled(cpu
);
1363 CPUTLBEntryFull
*full
;
1365 if (!tlb_hit_page(tlb_addr
, page_addr
)) {
1366 if (!victim_tlb_hit(cpu
, mmu_idx
, index
, access_type
, page_addr
)) {
1367 if (!tlb_fill_align(cpu
, addr
, access_type
, mmu_idx
,
1368 0, fault_size
, nonfault
, retaddr
)) {
1369 /* Non-faulting page table read failed. */
1372 return TLB_INVALID_MASK
;
1375 /* TLB resize via tlb_fill_align may have moved the entry. */
1376 index
= tlb_index(cpu
, mmu_idx
, addr
);
1377 entry
= tlb_entry(cpu
, mmu_idx
, addr
);
1380 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1381 * to force the next access through tlb_fill_align. We've just
1382 * called tlb_fill_align, so we know that this entry *is* valid.
1384 flags
&= ~TLB_INVALID_MASK
;
1386 tlb_addr
= tlb_read_idx(entry
, access_type
);
1390 *pfull
= full
= &cpu
->neg
.tlb
.d
[mmu_idx
].fulltlb
[index
];
1391 flags
|= full
->slow_flags
[access_type
];
1393 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1394 if (unlikely(flags
& ~(TLB_WATCHPOINT
| TLB_NOTDIRTY
| TLB_CHECK_ALIGNED
))
1395 || (access_type
!= MMU_INST_FETCH
&& force_mmio
)) {
1400 /* Everything else is RAM. */
1401 *phost
= (void *)((uintptr_t)addr
+ entry
->addend
);
1405 int probe_access_full(CPUArchState
*env
, vaddr addr
, int size
,
1406 MMUAccessType access_type
, int mmu_idx
,
1407 bool nonfault
, void **phost
, CPUTLBEntryFull
**pfull
,
1410 int flags
= probe_access_internal(env_cpu(env
), addr
, size
, access_type
,
1411 mmu_idx
, nonfault
, phost
, pfull
, retaddr
,
1414 /* Handle clean RAM pages. */
1415 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1416 int dirtysize
= size
== 0 ? 1 : size
;
1417 notdirty_write(env_cpu(env
), addr
, dirtysize
, *pfull
, retaddr
);
1418 flags
&= ~TLB_NOTDIRTY
;
1424 int probe_access_full_mmu(CPUArchState
*env
, vaddr addr
, int size
,
1425 MMUAccessType access_type
, int mmu_idx
,
1426 void **phost
, CPUTLBEntryFull
**pfull
)
1428 void *discard_phost
;
1429 CPUTLBEntryFull
*discard_tlb
;
1431 /* privately handle users that don't need full results */
1432 phost
= phost
? phost
: &discard_phost
;
1433 pfull
= pfull
? pfull
: &discard_tlb
;
1435 int flags
= probe_access_internal(env_cpu(env
), addr
, size
, access_type
,
1436 mmu_idx
, true, phost
, pfull
, 0, false);
1438 /* Handle clean RAM pages. */
1439 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1440 int dirtysize
= size
== 0 ? 1 : size
;
1441 notdirty_write(env_cpu(env
), addr
, dirtysize
, *pfull
, 0);
1442 flags
&= ~TLB_NOTDIRTY
;
1448 int probe_access_flags(CPUArchState
*env
, vaddr addr
, int size
,
1449 MMUAccessType access_type
, int mmu_idx
,
1450 bool nonfault
, void **phost
, uintptr_t retaddr
)
1452 CPUTLBEntryFull
*full
;
1455 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1457 flags
= probe_access_internal(env_cpu(env
), addr
, size
, access_type
,
1458 mmu_idx
, nonfault
, phost
, &full
, retaddr
,
1461 /* Handle clean RAM pages. */
1462 if (unlikely(flags
& TLB_NOTDIRTY
)) {
1463 int dirtysize
= size
== 0 ? 1 : size
;
1464 notdirty_write(env_cpu(env
), addr
, dirtysize
, full
, retaddr
);
1465 flags
&= ~TLB_NOTDIRTY
;
1471 void *probe_access(CPUArchState
*env
, vaddr addr
, int size
,
1472 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
1474 CPUTLBEntryFull
*full
;
1478 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
1480 flags
= probe_access_internal(env_cpu(env
), addr
, size
, access_type
,
1481 mmu_idx
, false, &host
, &full
, retaddr
,
1484 /* Per the interface, size == 0 merely faults the access. */
1489 if (unlikely(flags
& (TLB_NOTDIRTY
| TLB_WATCHPOINT
))) {
1490 /* Handle watchpoints. */
1491 if (flags
& TLB_WATCHPOINT
) {
1492 int wp_access
= (access_type
== MMU_DATA_STORE
1493 ? BP_MEM_WRITE
: BP_MEM_READ
);
1494 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1495 full
->attrs
, wp_access
, retaddr
);
1498 /* Handle clean RAM pages. */
1499 if (flags
& TLB_NOTDIRTY
) {
1500 notdirty_write(env_cpu(env
), addr
, size
, full
, retaddr
);
1507 void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
1508 MMUAccessType access_type
, int mmu_idx
)
1510 CPUTLBEntryFull
*full
;
1514 flags
= probe_access_internal(env_cpu(env
), addr
, 0, access_type
,
1515 mmu_idx
, true, &host
, &full
, 0, false);
1517 /* No combination of flags are expected by the caller. */
1518 return flags
? NULL
: host
;
1522 * Return a ram_addr_t for the virtual address for execution.
1524 * Return -1 if we can't translate and execute from an entire page
1525 * of RAM. This will force us to execute by loading and translating
1526 * one insn at a time, without caching.
1528 * NOTE: This function will trigger an exception if the page is
1531 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, vaddr addr
,
1534 CPUTLBEntryFull
*full
;
1537 (void)probe_access_internal(env_cpu(env
), addr
, 1, MMU_INST_FETCH
,
1538 cpu_mmu_index(env_cpu(env
), true), false,
1539 &p
, &full
, 0, false);
1544 if (full
->lg_page_size
< TARGET_PAGE_BITS
) {
1551 return qemu_ram_addr_from_host_nofail(p
);
1554 /* Load/store with atomicity primitives. */
1555 #include "ldst_atomicity.c.inc"
1557 #ifdef CONFIG_PLUGIN
1559 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1560 * This should be a hot path as we will have just looked this path up
1561 * in the softmmu lookup code (or helper). We don't handle re-fills or
1562 * checking the victim table. This is purely informational.
1564 * The one corner case is i/o write, which can cause changes to the
1565 * address space. Those changes, and the corresponding tlb flush,
1566 * should be delayed until the next TB, so even then this ought not fail.
1567 * But check, Just in Case.
1569 bool tlb_plugin_lookup(CPUState
*cpu
, vaddr addr
, int mmu_idx
,
1570 bool is_store
, struct qemu_plugin_hwaddr
*data
)
1572 CPUTLBEntry
*tlbe
= tlb_entry(cpu
, mmu_idx
, addr
);
1573 uintptr_t index
= tlb_index(cpu
, mmu_idx
, addr
);
1574 MMUAccessType access_type
= is_store
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
1575 uint64_t tlb_addr
= tlb_read_idx(tlbe
, access_type
);
1576 CPUTLBEntryFull
*full
;
1578 if (unlikely(!tlb_hit(tlb_addr
, addr
))) {
1582 full
= &cpu
->neg
.tlb
.d
[mmu_idx
].fulltlb
[index
];
1583 data
->phys_addr
= full
->phys_addr
| (addr
& ~TARGET_PAGE_MASK
);
1585 /* We must have an iotlb entry for MMIO */
1586 if (tlb_addr
& TLB_MMIO
) {
1587 MemoryRegionSection
*section
=
1588 iotlb_to_section(cpu
, full
->xlat_section
& ~TARGET_PAGE_MASK
,
1591 data
->mr
= section
->mr
;
1593 data
->is_io
= false;
1601 * Probe for a load/store operation.
1602 * Return the host address and into @flags.
1605 typedef struct MMULookupPageData
{
1606 CPUTLBEntryFull
*full
;
1611 } MMULookupPageData
;
1613 typedef struct MMULookupLocals
{
1614 MMULookupPageData page
[2];
1620 * mmu_lookup1: translate one page
1621 * @cpu: generic cpu state
1622 * @data: lookup parameters
1623 * @memop: memory operation for the access, or 0
1624 * @mmu_idx: virtual address context
1625 * @access_type: load/store/code
1626 * @ra: return address into tcg generated code, or 0
1628 * Resolve the translation for the one page at @data.addr, filling in
1629 * the rest of @data with the results. If the translation fails,
1630 * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
1631 * @mmu_idx may have resized.
1633 static bool mmu_lookup1(CPUState
*cpu
, MMULookupPageData
*data
, MemOp memop
,
1634 int mmu_idx
, MMUAccessType access_type
, uintptr_t ra
)
1636 vaddr addr
= data
->addr
;
1637 uintptr_t index
= tlb_index(cpu
, mmu_idx
, addr
);
1638 CPUTLBEntry
*entry
= tlb_entry(cpu
, mmu_idx
, addr
);
1639 uint64_t tlb_addr
= tlb_read_idx(entry
, access_type
);
1640 bool maybe_resized
= false;
1641 CPUTLBEntryFull
*full
;
1644 /* If the TLB entry is for a different page, reload and try again. */
1645 if (!tlb_hit(tlb_addr
, addr
)) {
1646 if (!victim_tlb_hit(cpu
, mmu_idx
, index
, access_type
,
1647 addr
& TARGET_PAGE_MASK
)) {
1648 tlb_fill_align(cpu
, addr
, access_type
, mmu_idx
,
1649 memop
, data
->size
, false, ra
);
1650 maybe_resized
= true;
1651 index
= tlb_index(cpu
, mmu_idx
, addr
);
1652 entry
= tlb_entry(cpu
, mmu_idx
, addr
);
1654 tlb_addr
= tlb_read_idx(entry
, access_type
) & ~TLB_INVALID_MASK
;
1657 full
= &cpu
->neg
.tlb
.d
[mmu_idx
].fulltlb
[index
];
1658 flags
= tlb_addr
& (TLB_FLAGS_MASK
& ~TLB_FORCE_SLOW
);
1659 flags
|= full
->slow_flags
[access_type
];
1661 if (likely(!maybe_resized
)) {
1662 /* Alignment has not been checked by tlb_fill_align. */
1663 int a_bits
= memop_alignment_bits(memop
);
1666 * This alignment check differs from the one above, in that this is
1667 * based on the atomicity of the operation. The intended use case is
1668 * the ARM memory type field of each PTE, where access to pages with
1669 * Device memory type require alignment.
1671 if (unlikely(flags
& TLB_CHECK_ALIGNED
)) {
1672 int at_bits
= memop_atomicity_bits(memop
);
1673 a_bits
= MAX(a_bits
, at_bits
);
1675 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
1676 cpu_unaligned_access(cpu
, addr
, access_type
, mmu_idx
, ra
);
1681 data
->flags
= flags
;
1682 /* Compute haddr speculatively; depending on flags it might be invalid. */
1683 data
->haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1685 return maybe_resized
;
1689 * mmu_watch_or_dirty
1690 * @cpu: generic cpu state
1691 * @data: lookup parameters
1692 * @access_type: load/store/code
1693 * @ra: return address into tcg generated code, or 0
1695 * Trigger watchpoints for @data.addr:@data.size;
1696 * record writes to protected clean pages.
1698 static void mmu_watch_or_dirty(CPUState
*cpu
, MMULookupPageData
*data
,
1699 MMUAccessType access_type
, uintptr_t ra
)
1701 CPUTLBEntryFull
*full
= data
->full
;
1702 vaddr addr
= data
->addr
;
1703 int flags
= data
->flags
;
1704 int size
= data
->size
;
1706 /* On watchpoint hit, this will longjmp out. */
1707 if (flags
& TLB_WATCHPOINT
) {
1708 int wp
= access_type
== MMU_DATA_STORE
? BP_MEM_WRITE
: BP_MEM_READ
;
1709 cpu_check_watchpoint(cpu
, addr
, size
, full
->attrs
, wp
, ra
);
1710 flags
&= ~TLB_WATCHPOINT
;
1713 /* Note that notdirty is only set for writes. */
1714 if (flags
& TLB_NOTDIRTY
) {
1715 notdirty_write(cpu
, addr
, size
, full
, ra
);
1716 flags
&= ~TLB_NOTDIRTY
;
1718 data
->flags
= flags
;
1722 * mmu_lookup: translate page(s)
1723 * @cpu: generic cpu state
1724 * @addr: virtual address
1725 * @oi: combined mmu_idx and MemOp
1726 * @ra: return address into tcg generated code, or 0
1727 * @access_type: load/store/code
1730 * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1731 * bytes. Return true if the lookup crosses a page boundary.
1733 static bool mmu_lookup(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
1734 uintptr_t ra
, MMUAccessType type
, MMULookupLocals
*l
)
1739 l
->memop
= get_memop(oi
);
1740 l
->mmu_idx
= get_mmuidx(oi
);
1742 tcg_debug_assert(l
->mmu_idx
< NB_MMU_MODES
);
1744 l
->page
[0].addr
= addr
;
1745 l
->page
[0].size
= memop_size(l
->memop
);
1746 l
->page
[1].addr
= (addr
+ l
->page
[0].size
- 1) & TARGET_PAGE_MASK
;
1747 l
->page
[1].size
= 0;
1748 crosspage
= (addr
^ l
->page
[1].addr
) & TARGET_PAGE_MASK
;
1750 if (likely(!crosspage
)) {
1751 mmu_lookup1(cpu
, &l
->page
[0], l
->memop
, l
->mmu_idx
, type
, ra
);
1753 flags
= l
->page
[0].flags
;
1754 if (unlikely(flags
& (TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1755 mmu_watch_or_dirty(cpu
, &l
->page
[0], type
, ra
);
1757 if (unlikely(flags
& TLB_BSWAP
)) {
1758 l
->memop
^= MO_BSWAP
;
1761 /* Finish compute of page crossing. */
1762 int size0
= l
->page
[1].addr
- addr
;
1763 l
->page
[1].size
= l
->page
[0].size
- size0
;
1764 l
->page
[0].size
= size0
;
1767 * Lookup both pages, recognizing exceptions from either. If the
1768 * second lookup potentially resized, refresh first CPUTLBEntryFull.
1770 mmu_lookup1(cpu
, &l
->page
[0], l
->memop
, l
->mmu_idx
, type
, ra
);
1771 if (mmu_lookup1(cpu
, &l
->page
[1], 0, l
->mmu_idx
, type
, ra
)) {
1772 uintptr_t index
= tlb_index(cpu
, l
->mmu_idx
, addr
);
1773 l
->page
[0].full
= &cpu
->neg
.tlb
.d
[l
->mmu_idx
].fulltlb
[index
];
1776 flags
= l
->page
[0].flags
| l
->page
[1].flags
;
1777 if (unlikely(flags
& (TLB_WATCHPOINT
| TLB_NOTDIRTY
))) {
1778 mmu_watch_or_dirty(cpu
, &l
->page
[0], type
, ra
);
1779 mmu_watch_or_dirty(cpu
, &l
->page
[1], type
, ra
);
1783 * Since target/sparc is the only user of TLB_BSWAP, and all
1784 * Sparc accesses are aligned, any treatment across two pages
1785 * would be arbitrary. Refuse it until there's a use.
1787 tcg_debug_assert((flags
& TLB_BSWAP
) == 0);
1794 * Probe for an atomic operation. Do not allow unaligned operations,
1795 * or io operations to proceed. Return the host address.
1797 static void *atomic_mmu_lookup(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
1798 int size
, uintptr_t retaddr
)
1800 uintptr_t mmu_idx
= get_mmuidx(oi
);
1801 MemOp mop
= get_memop(oi
);
1806 CPUTLBEntryFull
*full
;
1807 bool did_tlb_fill
= false;
1809 tcg_debug_assert(mmu_idx
< NB_MMU_MODES
);
1811 /* Adjust the given return address. */
1812 retaddr
-= GETPC_ADJ
;
1814 index
= tlb_index(cpu
, mmu_idx
, addr
);
1815 tlbe
= tlb_entry(cpu
, mmu_idx
, addr
);
1817 /* Check TLB entry and enforce page permissions. */
1818 tlb_addr
= tlb_addr_write(tlbe
);
1819 if (!tlb_hit(tlb_addr
, addr
)) {
1820 if (!victim_tlb_hit(cpu
, mmu_idx
, index
, MMU_DATA_STORE
,
1821 addr
& TARGET_PAGE_MASK
)) {
1822 tlb_fill_align(cpu
, addr
, MMU_DATA_STORE
, mmu_idx
,
1823 mop
, size
, false, retaddr
);
1824 did_tlb_fill
= true;
1825 index
= tlb_index(cpu
, mmu_idx
, addr
);
1826 tlbe
= tlb_entry(cpu
, mmu_idx
, addr
);
1828 tlb_addr
= tlb_addr_write(tlbe
) & ~TLB_INVALID_MASK
;
1832 * Let the guest notice RMW on a write-only page.
1833 * We have just verified that the page is writable.
1834 * Subpage lookups may have left TLB_INVALID_MASK set,
1835 * but addr_read will only be -1 if PAGE_READ was unset.
1837 if (unlikely(tlbe
->addr_read
== -1)) {
1838 tlb_fill_align(cpu
, addr
, MMU_DATA_LOAD
, mmu_idx
,
1839 0, size
, false, retaddr
);
1841 * Since we don't support reads and writes to different
1842 * addresses, and we do have the proper page loaded for
1843 * write, this shouldn't ever return.
1845 g_assert_not_reached();
1848 /* Enforce guest required alignment, if not handled by tlb_fill_align. */
1849 if (!did_tlb_fill
&& (addr
& ((1 << memop_alignment_bits(mop
)) - 1))) {
1850 cpu_unaligned_access(cpu
, addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
1853 /* Enforce qemu required alignment. */
1854 if (unlikely(addr
& (size
- 1))) {
1856 * We get here if guest alignment was not requested, or was not
1857 * enforced by cpu_unaligned_access or tlb_fill_align above.
1858 * We might widen the access and emulate, but for now
1859 * mark an exception and exit the cpu loop.
1861 goto stop_the_world
;
1864 /* Collect tlb flags for read. */
1865 tlb_addr
|= tlbe
->addr_read
;
1867 /* Notice an IO access or a needs-MMU-lookup access */
1868 if (unlikely(tlb_addr
& (TLB_MMIO
| TLB_DISCARD_WRITE
))) {
1869 /* There's really nothing that can be done to
1870 support this apart from stop-the-world. */
1871 goto stop_the_world
;
1874 hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1875 full
= &cpu
->neg
.tlb
.d
[mmu_idx
].fulltlb
[index
];
1877 if (unlikely(tlb_addr
& TLB_NOTDIRTY
)) {
1878 notdirty_write(cpu
, addr
, size
, full
, retaddr
);
1881 if (unlikely(tlb_addr
& TLB_FORCE_SLOW
)) {
1884 if (full
->slow_flags
[MMU_DATA_STORE
] & TLB_WATCHPOINT
) {
1885 wp_flags
|= BP_MEM_WRITE
;
1887 if (full
->slow_flags
[MMU_DATA_LOAD
] & TLB_WATCHPOINT
) {
1888 wp_flags
|= BP_MEM_READ
;
1891 cpu_check_watchpoint(cpu
, addr
, size
,
1892 full
->attrs
, wp_flags
, retaddr
);
1899 cpu_loop_exit_atomic(cpu
, retaddr
);
1905 * We support two different access types. SOFTMMU_CODE_ACCESS is
1906 * specifically for reading instructions from system memory. It is
1907 * called by the translation loop and in some helpers where the code
1908 * is disassembled. It shouldn't be called directly by guest code.
1910 * For the benefit of TCG generated code, we want to avoid the
1911 * complication of ABI-specific return type promotion and always
1912 * return a value extended to the register size of the host. This is
1913 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1914 * data, and for that we always have uint64_t.
1916 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1921 * @cpu: generic cpu state
1922 * @full: page parameters
1923 * @ret_be: accumulated data
1924 * @addr: virtual address
1925 * @size: number of bytes
1926 * @mmu_idx: virtual address context
1927 * @ra: return address into tcg generated code, or 0
1930 * Load @size bytes from @addr, which is memory-mapped i/o.
1931 * The bytes are concatenated in big-endian order with @ret_be.
1933 static uint64_t int_ld_mmio_beN(CPUState
*cpu
, CPUTLBEntryFull
*full
,
1934 uint64_t ret_be
, vaddr addr
, int size
,
1935 int mmu_idx
, MMUAccessType type
, uintptr_t ra
,
1936 MemoryRegion
*mr
, hwaddr mr_offset
)
1944 /* Read aligned pieces up to 8 bytes. */
1945 this_mop
= ctz32(size
| (int)addr
| 8);
1946 this_size
= 1 << this_mop
;
1949 r
= memory_region_dispatch_read(mr
, mr_offset
, &val
,
1950 this_mop
, full
->attrs
);
1951 if (unlikely(r
!= MEMTX_OK
)) {
1952 io_failed(cpu
, full
, addr
, this_size
, type
, mmu_idx
, r
, ra
);
1954 if (this_size
== 8) {
1958 ret_be
= (ret_be
<< (this_size
* 8)) | val
;
1960 mr_offset
+= this_size
;
1967 static uint64_t do_ld_mmio_beN(CPUState
*cpu
, CPUTLBEntryFull
*full
,
1968 uint64_t ret_be
, vaddr addr
, int size
,
1969 int mmu_idx
, MMUAccessType type
, uintptr_t ra
)
1971 MemoryRegionSection
*section
;
1976 tcg_debug_assert(size
> 0 && size
<= 8);
1978 attrs
= full
->attrs
;
1979 section
= io_prepare(&mr_offset
, cpu
, full
->xlat_section
, attrs
, addr
, ra
);
1983 return int_ld_mmio_beN(cpu
, full
, ret_be
, addr
, size
, mmu_idx
,
1984 type
, ra
, mr
, mr_offset
);
1987 static Int128
do_ld16_mmio_beN(CPUState
*cpu
, CPUTLBEntryFull
*full
,
1988 uint64_t ret_be
, vaddr addr
, int size
,
1989 int mmu_idx
, uintptr_t ra
)
1991 MemoryRegionSection
*section
;
1997 tcg_debug_assert(size
> 8 && size
<= 16);
1999 attrs
= full
->attrs
;
2000 section
= io_prepare(&mr_offset
, cpu
, full
->xlat_section
, attrs
, addr
, ra
);
2004 a
= int_ld_mmio_beN(cpu
, full
, ret_be
, addr
, size
- 8, mmu_idx
,
2005 MMU_DATA_LOAD
, ra
, mr
, mr_offset
);
2006 b
= int_ld_mmio_beN(cpu
, full
, ret_be
, addr
+ size
- 8, 8, mmu_idx
,
2007 MMU_DATA_LOAD
, ra
, mr
, mr_offset
+ size
- 8);
2008 return int128_make128(b
, a
);
2013 * @p: translation parameters
2014 * @ret_be: accumulated data
2016 * Load @p->size bytes from @p->haddr, which is RAM.
2017 * The bytes to concatenated in big-endian order with @ret_be.
2019 static uint64_t do_ld_bytes_beN(MMULookupPageData
*p
, uint64_t ret_be
)
2021 uint8_t *haddr
= p
->haddr
;
2022 int i
, size
= p
->size
;
2024 for (i
= 0; i
< size
; i
++) {
2025 ret_be
= (ret_be
<< 8) | haddr
[i
];
2032 * @p: translation parameters
2033 * @ret_be: accumulated data
2035 * As do_ld_bytes_beN, but atomically on each aligned part.
2037 static uint64_t do_ld_parts_beN(MMULookupPageData
*p
, uint64_t ret_be
)
2039 void *haddr
= p
->haddr
;
2047 * Find minimum of alignment and size.
2048 * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2049 * would have only checked the low bits of addr|size once at the start,
2050 * but is just as easy.
2052 switch (((uintptr_t)haddr
| size
) & 7) {
2054 x
= cpu_to_be32(load_atomic4(haddr
));
2055 ret_be
= (ret_be
<< 32) | x
;
2060 x
= cpu_to_be16(load_atomic2(haddr
));
2061 ret_be
= (ret_be
<< 16) | x
;
2065 x
= *(uint8_t *)haddr
;
2066 ret_be
= (ret_be
<< 8) | x
;
2070 g_assert_not_reached();
2074 } while (size
!= 0);
2080 * @p: translation parameters
2081 * @ret_be: accumulated data
2083 * As do_ld_bytes_beN, but with one atomic load.
2084 * Four aligned bytes are guaranteed to cover the load.
2086 static uint64_t do_ld_whole_be4(MMULookupPageData
*p
, uint64_t ret_be
)
2088 int o
= p
->addr
& 3;
2089 uint32_t x
= load_atomic4(p
->haddr
- o
);
2093 x
>>= (4 - p
->size
) * 8;
2094 return (ret_be
<< (p
->size
* 8)) | x
;
2099 * @p: translation parameters
2100 * @ret_be: accumulated data
2102 * As do_ld_bytes_beN, but with one atomic load.
2103 * Eight aligned bytes are guaranteed to cover the load.
2105 static uint64_t do_ld_whole_be8(CPUState
*cpu
, uintptr_t ra
,
2106 MMULookupPageData
*p
, uint64_t ret_be
)
2108 int o
= p
->addr
& 7;
2109 uint64_t x
= load_atomic8_or_exit(cpu
, ra
, p
->haddr
- o
);
2113 x
>>= (8 - p
->size
) * 8;
2114 return (ret_be
<< (p
->size
* 8)) | x
;
2119 * @p: translation parameters
2120 * @ret_be: accumulated data
2122 * As do_ld_bytes_beN, but with one atomic load.
2123 * 16 aligned bytes are guaranteed to cover the load.
2125 static Int128
do_ld_whole_be16(CPUState
*cpu
, uintptr_t ra
,
2126 MMULookupPageData
*p
, uint64_t ret_be
)
2128 int o
= p
->addr
& 15;
2129 Int128 x
, y
= load_atomic16_or_exit(cpu
, ra
, p
->haddr
- o
);
2132 if (!HOST_BIG_ENDIAN
) {
2135 y
= int128_lshift(y
, o
* 8);
2136 y
= int128_urshift(y
, (16 - size
) * 8);
2137 x
= int128_make64(ret_be
);
2138 x
= int128_lshift(x
, size
* 8);
2139 return int128_or(x
, y
);
2143 * Wrapper for the above.
2145 static uint64_t do_ld_beN(CPUState
*cpu
, MMULookupPageData
*p
,
2146 uint64_t ret_be
, int mmu_idx
, MMUAccessType type
,
2147 MemOp mop
, uintptr_t ra
)
2150 unsigned tmp
, half_size
;
2152 if (unlikely(p
->flags
& TLB_MMIO
)) {
2153 return do_ld_mmio_beN(cpu
, p
->full
, ret_be
, p
->addr
, p
->size
,
2158 * It is a given that we cross a page and therefore there is no
2159 * atomicity for the load as a whole, but subobjects may need attention.
2161 atom
= mop
& MO_ATOM_MASK
;
2163 case MO_ATOM_SUBALIGN
:
2164 return do_ld_parts_beN(p
, ret_be
);
2166 case MO_ATOM_IFALIGN_PAIR
:
2167 case MO_ATOM_WITHIN16_PAIR
:
2168 tmp
= mop
& MO_SIZE
;
2169 tmp
= tmp
? tmp
- 1 : 0;
2170 half_size
= 1 << tmp
;
2171 if (atom
== MO_ATOM_IFALIGN_PAIR
2172 ? p
->size
== half_size
2173 : p
->size
>= half_size
) {
2174 if (!HAVE_al8_fast
&& p
->size
< 4) {
2175 return do_ld_whole_be4(p
, ret_be
);
2177 return do_ld_whole_be8(cpu
, ra
, p
, ret_be
);
2182 case MO_ATOM_IFALIGN
:
2183 case MO_ATOM_WITHIN16
:
2185 return do_ld_bytes_beN(p
, ret_be
);
2188 g_assert_not_reached();
2193 * Wrapper for the above, for 8 < size < 16.
2195 static Int128
do_ld16_beN(CPUState
*cpu
, MMULookupPageData
*p
,
2196 uint64_t a
, int mmu_idx
, MemOp mop
, uintptr_t ra
)
2202 if (unlikely(p
->flags
& TLB_MMIO
)) {
2203 return do_ld16_mmio_beN(cpu
, p
->full
, a
, p
->addr
, size
, mmu_idx
, ra
);
2207 * It is a given that we cross a page and therefore there is no
2208 * atomicity for the load as a whole, but subobjects may need attention.
2210 atom
= mop
& MO_ATOM_MASK
;
2212 case MO_ATOM_SUBALIGN
:
2214 a
= do_ld_parts_beN(p
, a
);
2215 p
->haddr
+= size
- 8;
2217 b
= do_ld_parts_beN(p
, 0);
2220 case MO_ATOM_WITHIN16_PAIR
:
2221 /* Since size > 8, this is the half that must be atomic. */
2222 return do_ld_whole_be16(cpu
, ra
, p
, a
);
2224 case MO_ATOM_IFALIGN_PAIR
:
2226 * Since size > 8, both halves are misaligned,
2227 * and so neither is atomic.
2229 case MO_ATOM_IFALIGN
:
2230 case MO_ATOM_WITHIN16
:
2233 a
= do_ld_bytes_beN(p
, a
);
2234 b
= ldq_be_p(p
->haddr
+ size
- 8);
2238 g_assert_not_reached();
2241 return int128_make128(b
, a
);
2244 static uint8_t do_ld_1(CPUState
*cpu
, MMULookupPageData
*p
, int mmu_idx
,
2245 MMUAccessType type
, uintptr_t ra
)
2247 if (unlikely(p
->flags
& TLB_MMIO
)) {
2248 return do_ld_mmio_beN(cpu
, p
->full
, 0, p
->addr
, 1, mmu_idx
, type
, ra
);
2250 return *(uint8_t *)p
->haddr
;
2254 static uint16_t do_ld_2(CPUState
*cpu
, MMULookupPageData
*p
, int mmu_idx
,
2255 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2259 if (unlikely(p
->flags
& TLB_MMIO
)) {
2260 ret
= do_ld_mmio_beN(cpu
, p
->full
, 0, p
->addr
, 2, mmu_idx
, type
, ra
);
2261 if ((memop
& MO_BSWAP
) == MO_LE
) {
2265 /* Perform the load host endian, then swap if necessary. */
2266 ret
= load_atom_2(cpu
, ra
, p
->haddr
, memop
);
2267 if (memop
& MO_BSWAP
) {
2274 static uint32_t do_ld_4(CPUState
*cpu
, MMULookupPageData
*p
, int mmu_idx
,
2275 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2279 if (unlikely(p
->flags
& TLB_MMIO
)) {
2280 ret
= do_ld_mmio_beN(cpu
, p
->full
, 0, p
->addr
, 4, mmu_idx
, type
, ra
);
2281 if ((memop
& MO_BSWAP
) == MO_LE
) {
2285 /* Perform the load host endian. */
2286 ret
= load_atom_4(cpu
, ra
, p
->haddr
, memop
);
2287 if (memop
& MO_BSWAP
) {
2294 static uint64_t do_ld_8(CPUState
*cpu
, MMULookupPageData
*p
, int mmu_idx
,
2295 MMUAccessType type
, MemOp memop
, uintptr_t ra
)
2299 if (unlikely(p
->flags
& TLB_MMIO
)) {
2300 ret
= do_ld_mmio_beN(cpu
, p
->full
, 0, p
->addr
, 8, mmu_idx
, type
, ra
);
2301 if ((memop
& MO_BSWAP
) == MO_LE
) {
2305 /* Perform the load host endian. */
2306 ret
= load_atom_8(cpu
, ra
, p
->haddr
, memop
);
2307 if (memop
& MO_BSWAP
) {
2314 static uint8_t do_ld1_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
2315 uintptr_t ra
, MMUAccessType access_type
)
2320 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2321 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, access_type
, &l
);
2322 tcg_debug_assert(!crosspage
);
2324 return do_ld_1(cpu
, &l
.page
[0], l
.mmu_idx
, access_type
, ra
);
2327 static uint16_t do_ld2_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
2328 uintptr_t ra
, MMUAccessType access_type
)
2335 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2336 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, access_type
, &l
);
2337 if (likely(!crosspage
)) {
2338 return do_ld_2(cpu
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2341 a
= do_ld_1(cpu
, &l
.page
[0], l
.mmu_idx
, access_type
, ra
);
2342 b
= do_ld_1(cpu
, &l
.page
[1], l
.mmu_idx
, access_type
, ra
);
2344 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2352 static uint32_t do_ld4_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
2353 uintptr_t ra
, MMUAccessType access_type
)
2359 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2360 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, access_type
, &l
);
2361 if (likely(!crosspage
)) {
2362 return do_ld_4(cpu
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2365 ret
= do_ld_beN(cpu
, &l
.page
[0], 0, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2366 ret
= do_ld_beN(cpu
, &l
.page
[1], ret
, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2367 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2373 static uint64_t do_ld8_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
2374 uintptr_t ra
, MMUAccessType access_type
)
2380 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2381 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, access_type
, &l
);
2382 if (likely(!crosspage
)) {
2383 return do_ld_8(cpu
, &l
.page
[0], l
.mmu_idx
, access_type
, l
.memop
, ra
);
2386 ret
= do_ld_beN(cpu
, &l
.page
[0], 0, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2387 ret
= do_ld_beN(cpu
, &l
.page
[1], ret
, l
.mmu_idx
, access_type
, l
.memop
, ra
);
2388 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2394 static Int128
do_ld16_mmu(CPUState
*cpu
, vaddr addr
,
2395 MemOpIdx oi
, uintptr_t ra
)
2403 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2404 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, MMU_DATA_LOAD
, &l
);
2405 if (likely(!crosspage
)) {
2406 if (unlikely(l
.page
[0].flags
& TLB_MMIO
)) {
2407 ret
= do_ld16_mmio_beN(cpu
, l
.page
[0].full
, 0, addr
, 16,
2409 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2410 ret
= bswap128(ret
);
2413 /* Perform the load host endian. */
2414 ret
= load_atom_16(cpu
, ra
, l
.page
[0].haddr
, l
.memop
);
2415 if (l
.memop
& MO_BSWAP
) {
2416 ret
= bswap128(ret
);
2422 first
= l
.page
[0].size
;
2424 MemOp mop8
= (l
.memop
& ~MO_SIZE
) | MO_64
;
2426 a
= do_ld_8(cpu
, &l
.page
[0], l
.mmu_idx
, MMU_DATA_LOAD
, mop8
, ra
);
2427 b
= do_ld_8(cpu
, &l
.page
[1], l
.mmu_idx
, MMU_DATA_LOAD
, mop8
, ra
);
2428 if ((mop8
& MO_BSWAP
) == MO_LE
) {
2429 ret
= int128_make128(a
, b
);
2431 ret
= int128_make128(b
, a
);
2437 a
= do_ld_beN(cpu
, &l
.page
[0], 0, l
.mmu_idx
,
2438 MMU_DATA_LOAD
, l
.memop
, ra
);
2439 ret
= do_ld16_beN(cpu
, &l
.page
[1], a
, l
.mmu_idx
, l
.memop
, ra
);
2441 ret
= do_ld16_beN(cpu
, &l
.page
[0], 0, l
.mmu_idx
, l
.memop
, ra
);
2442 b
= int128_getlo(ret
);
2443 ret
= int128_lshift(ret
, l
.page
[1].size
* 8);
2444 a
= int128_gethi(ret
);
2445 b
= do_ld_beN(cpu
, &l
.page
[1], b
, l
.mmu_idx
,
2446 MMU_DATA_LOAD
, l
.memop
, ra
);
2447 ret
= int128_make128(b
, a
);
2449 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2450 ret
= bswap128(ret
);
2461 * @cpu: generic cpu state
2462 * @full: page parameters
2463 * @val_le: data to store
2464 * @addr: virtual address
2465 * @size: number of bytes
2466 * @mmu_idx: virtual address context
2467 * @ra: return address into tcg generated code, or 0
2470 * Store @size bytes at @addr, which is memory-mapped i/o.
2471 * The bytes to store are extracted in little-endian order from @val_le;
2472 * return the bytes of @val_le beyond @p->size that have not been stored.
2474 static uint64_t int_st_mmio_leN(CPUState
*cpu
, CPUTLBEntryFull
*full
,
2475 uint64_t val_le
, vaddr addr
, int size
,
2476 int mmu_idx
, uintptr_t ra
,
2477 MemoryRegion
*mr
, hwaddr mr_offset
)
2484 /* Store aligned pieces up to 8 bytes. */
2485 this_mop
= ctz32(size
| (int)addr
| 8);
2486 this_size
= 1 << this_mop
;
2489 r
= memory_region_dispatch_write(mr
, mr_offset
, val_le
,
2490 this_mop
, full
->attrs
);
2491 if (unlikely(r
!= MEMTX_OK
)) {
2492 io_failed(cpu
, full
, addr
, this_size
, MMU_DATA_STORE
,
2495 if (this_size
== 8) {
2499 val_le
>>= this_size
* 8;
2501 mr_offset
+= this_size
;
2508 static uint64_t do_st_mmio_leN(CPUState
*cpu
, CPUTLBEntryFull
*full
,
2509 uint64_t val_le
, vaddr addr
, int size
,
2510 int mmu_idx
, uintptr_t ra
)
2512 MemoryRegionSection
*section
;
2517 tcg_debug_assert(size
> 0 && size
<= 8);
2519 attrs
= full
->attrs
;
2520 section
= io_prepare(&mr_offset
, cpu
, full
->xlat_section
, attrs
, addr
, ra
);
2524 return int_st_mmio_leN(cpu
, full
, val_le
, addr
, size
, mmu_idx
,
2528 static uint64_t do_st16_mmio_leN(CPUState
*cpu
, CPUTLBEntryFull
*full
,
2529 Int128 val_le
, vaddr addr
, int size
,
2530 int mmu_idx
, uintptr_t ra
)
2532 MemoryRegionSection
*section
;
2537 tcg_debug_assert(size
> 8 && size
<= 16);
2539 attrs
= full
->attrs
;
2540 section
= io_prepare(&mr_offset
, cpu
, full
->xlat_section
, attrs
, addr
, ra
);
2544 int_st_mmio_leN(cpu
, full
, int128_getlo(val_le
), addr
, 8,
2545 mmu_idx
, ra
, mr
, mr_offset
);
2546 return int_st_mmio_leN(cpu
, full
, int128_gethi(val_le
), addr
+ 8,
2547 size
- 8, mmu_idx
, ra
, mr
, mr_offset
+ 8);
2551 * Wrapper for the above.
2553 static uint64_t do_st_leN(CPUState
*cpu
, MMULookupPageData
*p
,
2554 uint64_t val_le
, int mmu_idx
,
2555 MemOp mop
, uintptr_t ra
)
2558 unsigned tmp
, half_size
;
2560 if (unlikely(p
->flags
& TLB_MMIO
)) {
2561 return do_st_mmio_leN(cpu
, p
->full
, val_le
, p
->addr
,
2562 p
->size
, mmu_idx
, ra
);
2563 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2564 return val_le
>> (p
->size
* 8);
2568 * It is a given that we cross a page and therefore there is no atomicity
2569 * for the store as a whole, but subobjects may need attention.
2571 atom
= mop
& MO_ATOM_MASK
;
2573 case MO_ATOM_SUBALIGN
:
2574 return store_parts_leN(p
->haddr
, p
->size
, val_le
);
2576 case MO_ATOM_IFALIGN_PAIR
:
2577 case MO_ATOM_WITHIN16_PAIR
:
2578 tmp
= mop
& MO_SIZE
;
2579 tmp
= tmp
? tmp
- 1 : 0;
2580 half_size
= 1 << tmp
;
2581 if (atom
== MO_ATOM_IFALIGN_PAIR
2582 ? p
->size
== half_size
2583 : p
->size
>= half_size
) {
2584 if (!HAVE_al8_fast
&& p
->size
<= 4) {
2585 return store_whole_le4(p
->haddr
, p
->size
, val_le
);
2586 } else if (HAVE_al8
) {
2587 return store_whole_le8(p
->haddr
, p
->size
, val_le
);
2589 cpu_loop_exit_atomic(cpu
, ra
);
2594 case MO_ATOM_IFALIGN
:
2595 case MO_ATOM_WITHIN16
:
2597 return store_bytes_leN(p
->haddr
, p
->size
, val_le
);
2600 g_assert_not_reached();
2605 * Wrapper for the above, for 8 < size < 16.
2607 static uint64_t do_st16_leN(CPUState
*cpu
, MMULookupPageData
*p
,
2608 Int128 val_le
, int mmu_idx
,
2609 MemOp mop
, uintptr_t ra
)
2614 if (unlikely(p
->flags
& TLB_MMIO
)) {
2615 return do_st16_mmio_leN(cpu
, p
->full
, val_le
, p
->addr
,
2617 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2618 return int128_gethi(val_le
) >> ((size
- 8) * 8);
2622 * It is a given that we cross a page and therefore there is no atomicity
2623 * for the store as a whole, but subobjects may need attention.
2625 atom
= mop
& MO_ATOM_MASK
;
2627 case MO_ATOM_SUBALIGN
:
2628 store_parts_leN(p
->haddr
, 8, int128_getlo(val_le
));
2629 return store_parts_leN(p
->haddr
+ 8, p
->size
- 8,
2630 int128_gethi(val_le
));
2632 case MO_ATOM_WITHIN16_PAIR
:
2633 /* Since size > 8, this is the half that must be atomic. */
2634 if (!HAVE_CMPXCHG128
) {
2635 cpu_loop_exit_atomic(cpu
, ra
);
2637 return store_whole_le16(p
->haddr
, p
->size
, val_le
);
2639 case MO_ATOM_IFALIGN_PAIR
:
2641 * Since size > 8, both halves are misaligned,
2642 * and so neither is atomic.
2644 case MO_ATOM_IFALIGN
:
2645 case MO_ATOM_WITHIN16
:
2647 stq_le_p(p
->haddr
, int128_getlo(val_le
));
2648 return store_bytes_leN(p
->haddr
+ 8, p
->size
- 8,
2649 int128_gethi(val_le
));
2652 g_assert_not_reached();
2656 static void do_st_1(CPUState
*cpu
, MMULookupPageData
*p
, uint8_t val
,
2657 int mmu_idx
, uintptr_t ra
)
2659 if (unlikely(p
->flags
& TLB_MMIO
)) {
2660 do_st_mmio_leN(cpu
, p
->full
, val
, p
->addr
, 1, mmu_idx
, ra
);
2661 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2664 *(uint8_t *)p
->haddr
= val
;
2668 static void do_st_2(CPUState
*cpu
, MMULookupPageData
*p
, uint16_t val
,
2669 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2671 if (unlikely(p
->flags
& TLB_MMIO
)) {
2672 if ((memop
& MO_BSWAP
) != MO_LE
) {
2675 do_st_mmio_leN(cpu
, p
->full
, val
, p
->addr
, 2, mmu_idx
, ra
);
2676 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2679 /* Swap to host endian if necessary, then store. */
2680 if (memop
& MO_BSWAP
) {
2683 store_atom_2(cpu
, ra
, p
->haddr
, memop
, val
);
2687 static void do_st_4(CPUState
*cpu
, MMULookupPageData
*p
, uint32_t val
,
2688 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2690 if (unlikely(p
->flags
& TLB_MMIO
)) {
2691 if ((memop
& MO_BSWAP
) != MO_LE
) {
2694 do_st_mmio_leN(cpu
, p
->full
, val
, p
->addr
, 4, mmu_idx
, ra
);
2695 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2698 /* Swap to host endian if necessary, then store. */
2699 if (memop
& MO_BSWAP
) {
2702 store_atom_4(cpu
, ra
, p
->haddr
, memop
, val
);
2706 static void do_st_8(CPUState
*cpu
, MMULookupPageData
*p
, uint64_t val
,
2707 int mmu_idx
, MemOp memop
, uintptr_t ra
)
2709 if (unlikely(p
->flags
& TLB_MMIO
)) {
2710 if ((memop
& MO_BSWAP
) != MO_LE
) {
2713 do_st_mmio_leN(cpu
, p
->full
, val
, p
->addr
, 8, mmu_idx
, ra
);
2714 } else if (unlikely(p
->flags
& TLB_DISCARD_WRITE
)) {
2717 /* Swap to host endian if necessary, then store. */
2718 if (memop
& MO_BSWAP
) {
2721 store_atom_8(cpu
, ra
, p
->haddr
, memop
, val
);
2725 static void do_st1_mmu(CPUState
*cpu
, vaddr addr
, uint8_t val
,
2726 MemOpIdx oi
, uintptr_t ra
)
2731 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2732 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2733 tcg_debug_assert(!crosspage
);
2735 do_st_1(cpu
, &l
.page
[0], val
, l
.mmu_idx
, ra
);
2738 static void do_st2_mmu(CPUState
*cpu
, vaddr addr
, uint16_t val
,
2739 MemOpIdx oi
, uintptr_t ra
)
2745 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2746 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2747 if (likely(!crosspage
)) {
2748 do_st_2(cpu
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2752 if ((l
.memop
& MO_BSWAP
) == MO_LE
) {
2753 a
= val
, b
= val
>> 8;
2755 b
= val
, a
= val
>> 8;
2757 do_st_1(cpu
, &l
.page
[0], a
, l
.mmu_idx
, ra
);
2758 do_st_1(cpu
, &l
.page
[1], b
, l
.mmu_idx
, ra
);
2761 static void do_st4_mmu(CPUState
*cpu
, vaddr addr
, uint32_t val
,
2762 MemOpIdx oi
, uintptr_t ra
)
2767 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2768 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2769 if (likely(!crosspage
)) {
2770 do_st_4(cpu
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2774 /* Swap to little endian for simplicity, then store by bytes. */
2775 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2778 val
= do_st_leN(cpu
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2779 (void) do_st_leN(cpu
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2782 static void do_st8_mmu(CPUState
*cpu
, vaddr addr
, uint64_t val
,
2783 MemOpIdx oi
, uintptr_t ra
)
2788 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2789 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2790 if (likely(!crosspage
)) {
2791 do_st_8(cpu
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2795 /* Swap to little endian for simplicity, then store by bytes. */
2796 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2799 val
= do_st_leN(cpu
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2800 (void) do_st_leN(cpu
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2803 static void do_st16_mmu(CPUState
*cpu
, vaddr addr
, Int128 val
,
2804 MemOpIdx oi
, uintptr_t ra
)
2811 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2812 crosspage
= mmu_lookup(cpu
, addr
, oi
, ra
, MMU_DATA_STORE
, &l
);
2813 if (likely(!crosspage
)) {
2814 if (unlikely(l
.page
[0].flags
& TLB_MMIO
)) {
2815 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2816 val
= bswap128(val
);
2818 do_st16_mmio_leN(cpu
, l
.page
[0].full
, val
, addr
, 16, l
.mmu_idx
, ra
);
2819 } else if (unlikely(l
.page
[0].flags
& TLB_DISCARD_WRITE
)) {
2822 /* Swap to host endian if necessary, then store. */
2823 if (l
.memop
& MO_BSWAP
) {
2824 val
= bswap128(val
);
2826 store_atom_16(cpu
, ra
, l
.page
[0].haddr
, l
.memop
, val
);
2831 first
= l
.page
[0].size
;
2833 MemOp mop8
= (l
.memop
& ~(MO_SIZE
| MO_BSWAP
)) | MO_64
;
2835 if (l
.memop
& MO_BSWAP
) {
2836 val
= bswap128(val
);
2838 if (HOST_BIG_ENDIAN
) {
2839 b
= int128_getlo(val
), a
= int128_gethi(val
);
2841 a
= int128_getlo(val
), b
= int128_gethi(val
);
2843 do_st_8(cpu
, &l
.page
[0], a
, l
.mmu_idx
, mop8
, ra
);
2844 do_st_8(cpu
, &l
.page
[1], b
, l
.mmu_idx
, mop8
, ra
);
2848 if ((l
.memop
& MO_BSWAP
) != MO_LE
) {
2849 val
= bswap128(val
);
2852 do_st_leN(cpu
, &l
.page
[0], int128_getlo(val
), l
.mmu_idx
, l
.memop
, ra
);
2853 val
= int128_urshift(val
, first
* 8);
2854 do_st16_leN(cpu
, &l
.page
[1], val
, l
.mmu_idx
, l
.memop
, ra
);
2856 b
= do_st16_leN(cpu
, &l
.page
[0], val
, l
.mmu_idx
, l
.memop
, ra
);
2857 do_st_leN(cpu
, &l
.page
[1], b
, l
.mmu_idx
, l
.memop
, ra
);
2861 #include "ldst_common.c.inc"
2864 * First set of functions passes in OI and RETADDR.
2865 * This makes them callable from other helpers.
2868 #define ATOMIC_NAME(X) \
2869 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2871 #define ATOMIC_MMU_CLEANUP
2873 #include "atomic_common.c.inc"
2876 #include "atomic_template.h"
2879 #include "atomic_template.h"
2882 #include "atomic_template.h"
2884 #ifdef CONFIG_ATOMIC64
2886 #include "atomic_template.h"
2889 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
2890 #define DATA_SIZE 16
2891 #include "atomic_template.h"
2894 /* Code access functions. */
2896 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr addr
)
2898 CPUState
*cs
= env_cpu(env
);
2899 MemOpIdx oi
= make_memop_idx(MO_UB
, cpu_mmu_index(cs
, true));
2900 return do_ld1_mmu(cs
, addr
, oi
, 0, MMU_INST_FETCH
);
2903 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr addr
)
2905 CPUState
*cs
= env_cpu(env
);
2906 MemOpIdx oi
= make_memop_idx(MO_TEUW
, cpu_mmu_index(cs
, true));
2907 return do_ld2_mmu(cs
, addr
, oi
, 0, MMU_INST_FETCH
);
2910 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr addr
)
2912 CPUState
*cs
= env_cpu(env
);
2913 MemOpIdx oi
= make_memop_idx(MO_TEUL
, cpu_mmu_index(cs
, true));
2914 return do_ld4_mmu(cs
, addr
, oi
, 0, MMU_INST_FETCH
);
2917 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr addr
)
2919 CPUState
*cs
= env_cpu(env
);
2920 MemOpIdx oi
= make_memop_idx(MO_TEUQ
, cpu_mmu_index(cs
, true));
2921 return do_ld8_mmu(cs
, addr
, oi
, 0, MMU_INST_FETCH
);
2924 uint8_t cpu_ldb_code_mmu(CPUArchState
*env
, abi_ptr addr
,
2925 MemOpIdx oi
, uintptr_t retaddr
)
2927 return do_ld1_mmu(env_cpu(env
), addr
, oi
, retaddr
, MMU_INST_FETCH
);
2930 uint16_t cpu_ldw_code_mmu(CPUArchState
*env
, abi_ptr addr
,
2931 MemOpIdx oi
, uintptr_t retaddr
)
2933 return do_ld2_mmu(env_cpu(env
), addr
, oi
, retaddr
, MMU_INST_FETCH
);
2936 uint32_t cpu_ldl_code_mmu(CPUArchState
*env
, abi_ptr addr
,
2937 MemOpIdx oi
, uintptr_t retaddr
)
2939 return do_ld4_mmu(env_cpu(env
), addr
, oi
, retaddr
, MMU_INST_FETCH
);
2942 uint64_t cpu_ldq_code_mmu(CPUArchState
*env
, abi_ptr addr
,
2943 MemOpIdx oi
, uintptr_t retaddr
)
2945 return do_ld8_mmu(env_cpu(env
), addr
, oi
, retaddr
, MMU_INST_FETCH
);