2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
24 #include "qemu/bitops.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translate-all.h"
28 #include "exec/helper-proto.h"
29 #include "qemu/atomic128.h"
30 #include "trace/trace-root.h"
31 #include "tcg/tcg-ldst.h"
34 __thread
uintptr_t helper_retaddr
;
36 //#define DEBUG_SIGNAL
39 * Adjust the pc to pass to cpu_restore_state; return the memop type.
41 MMUAccessType
adjust_signal_pc(uintptr_t *pc
, bool is_write
)
43 switch (helper_retaddr
) {
46 * Fault during host memory operation within a helper function.
47 * The helper's host return address, saved here, gives us a
48 * pointer into the generated code that will unwind to the
56 * Fault during host memory operation within generated code.
57 * (Or, a unrelated bug within qemu, but we can't tell from here).
59 * We take the host pc from the signal frame. However, we cannot
60 * use that value directly. Within cpu_restore_state_from_tb, we
61 * assume PC comes from GETPC(), as used by the helper functions,
62 * so we adjust the address by -GETPC_ADJ to form an address that
63 * is within the call insn, so that the address does not accidentally
64 * match the beginning of the next guest insn. However, when the
65 * pc comes from the signal frame it points to the actual faulting
66 * host memory insn and not the return from a call insn.
68 * Therefore, adjust to compensate for what will be done later
69 * by cpu_restore_state_from_tb.
76 * Fault during host read for translation, or loosely, "execution".
78 * The guest pc is already pointing to the start of the TB for which
79 * code is being generated. If the guest translator manages the
80 * page crossings correctly, this is exactly the correct address
81 * (and if the translator doesn't handle page boundaries correctly
82 * there's little we can do about that here). Therefore, do not
83 * trigger the unwinder.
86 return MMU_INST_FETCH
;
89 return is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
93 * handle_sigsegv_accerr_write:
94 * @cpu: the cpu context
95 * @old_set: the sigset_t from the signal ucontext_t
96 * @host_pc: the host pc, adjusted for the signal
97 * @guest_addr: the guest address of the fault
99 * Return true if the write fault has been handled, and should be re-tried.
101 * Note that it is important that we don't call page_unprotect() unless
102 * this is really a "write to nonwritable page" fault, because
103 * page_unprotect() assumes that if it is called for an access to
104 * a page that's writable this means we had two threads racing and
105 * another thread got there first and already made the page writable;
106 * so we will retry the access. If we were to call page_unprotect()
107 * for some other kind of fault that should really be passed to the
108 * guest, we'd end up in an infinite loop of retrying the faulting access.
110 bool handle_sigsegv_accerr_write(CPUState
*cpu
, sigset_t
*old_set
,
111 uintptr_t host_pc
, abi_ptr guest_addr
)
113 switch (page_unprotect(guest_addr
, host_pc
)) {
116 * Fault not caused by a page marked unwritable to protect
117 * cached translations, must be the guest binary's problem.
122 * Fault caused by protection of cached translation; TBs
123 * invalidated, so resume execution.
128 * Fault caused by protection of cached translation, and the
129 * currently executing TB was modified and must be exited immediately.
131 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
132 cpu_loop_exit_noexc(cpu
);
135 g_assert_not_reached();
139 typedef struct PageFlagsNode
{
141 IntervalTreeNode itree
;
145 static IntervalTreeRoot pageflags_root
;
147 static PageFlagsNode
*pageflags_find(target_ulong start
, target_long last
)
151 n
= interval_tree_iter_first(&pageflags_root
, start
, last
);
152 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
155 static PageFlagsNode
*pageflags_next(PageFlagsNode
*p
, target_ulong start
,
160 n
= interval_tree_iter_next(&p
->itree
, start
, last
);
161 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
164 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
170 for (n
= interval_tree_iter_first(&pageflags_root
, 0, -1);
172 n
= interval_tree_iter_next(n
, 0, -1)) {
173 PageFlagsNode
*p
= container_of(n
, PageFlagsNode
, itree
);
175 rc
= fn(priv
, n
->start
, n
->last
+ 1, p
->flags
);
185 static int dump_region(void *priv
, target_ulong start
,
186 target_ulong end
, unsigned long prot
)
188 FILE *f
= (FILE *)priv
;
190 fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
" "TARGET_FMT_lx
" %c%c%c\n",
191 start
, end
, end
- start
,
192 ((prot
& PAGE_READ
) ? 'r' : '-'),
193 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
194 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
198 /* dump memory mappings */
199 void page_dump(FILE *f
)
201 const int length
= sizeof(target_ulong
) * 2;
203 fprintf(f
, "%-*s %-*s %-*s %s\n",
204 length
, "start", length
, "end", length
, "size", "prot");
205 walk_memory_regions(f
, dump_region
);
208 int page_get_flags(target_ulong address
)
210 PageFlagsNode
*p
= pageflags_find(address
, address
);
213 * See util/interval-tree.c re lockless lookups: no false positives but
214 * there are false negatives. If we find nothing, retry with the mmap
220 if (have_mmap_lock()) {
225 p
= pageflags_find(address
, address
);
227 return p
? p
->flags
: 0;
230 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
231 static void pageflags_create(target_ulong start
, target_ulong last
, int flags
)
233 PageFlagsNode
*p
= g_new(PageFlagsNode
, 1);
235 p
->itree
.start
= start
;
236 p
->itree
.last
= last
;
238 interval_tree_insert(&p
->itree
, &pageflags_root
);
241 /* A subroutine of page_set_flags: remove everything in [start,last]. */
242 static bool pageflags_unset(target_ulong start
, target_ulong last
)
244 bool inval_tb
= false;
247 PageFlagsNode
*p
= pageflags_find(start
, last
);
254 if (p
->flags
& PAGE_EXEC
) {
258 interval_tree_remove(&p
->itree
, &pageflags_root
);
259 p_last
= p
->itree
.last
;
261 if (p
->itree
.start
< start
) {
262 /* Truncate the node from the end, or split out the middle. */
263 p
->itree
.last
= start
- 1;
264 interval_tree_insert(&p
->itree
, &pageflags_root
);
266 pageflags_create(last
+ 1, p_last
, p
->flags
);
269 } else if (p_last
<= last
) {
270 /* Range completely covers node -- remove it. */
273 /* Truncate the node from the start. */
274 p
->itree
.start
= last
+ 1;
275 interval_tree_insert(&p
->itree
, &pageflags_root
);
284 * A subroutine of page_set_flags: nothing overlaps [start,last],
285 * but check adjacent mappings and maybe merge into a single range.
287 static void pageflags_create_merge(target_ulong start
, target_ulong last
,
290 PageFlagsNode
*next
= NULL
, *prev
= NULL
;
293 prev
= pageflags_find(start
- 1, start
- 1);
295 if (prev
->flags
== flags
) {
296 interval_tree_remove(&prev
->itree
, &pageflags_root
);
303 next
= pageflags_find(last
+ 1, last
+ 1);
305 if (next
->flags
== flags
) {
306 interval_tree_remove(&next
->itree
, &pageflags_root
);
315 prev
->itree
.last
= next
->itree
.last
;
316 g_free_rcu(next
, rcu
);
318 prev
->itree
.last
= last
;
320 interval_tree_insert(&prev
->itree
, &pageflags_root
);
322 next
->itree
.start
= start
;
323 interval_tree_insert(&next
->itree
, &pageflags_root
);
325 pageflags_create(start
, last
, flags
);
330 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
331 * By default, they are not kept.
333 #ifndef PAGE_TARGET_STICKY
334 #define PAGE_TARGET_STICKY 0
336 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
338 /* A subroutine of page_set_flags: add flags to [start,last]. */
339 static bool pageflags_set_clear(target_ulong start
, target_ulong last
,
340 int set_flags
, int clear_flags
)
343 target_ulong p_start
, p_last
;
344 int p_flags
, merge_flags
;
345 bool inval_tb
= false;
348 p
= pageflags_find(start
, last
);
351 pageflags_create_merge(start
, last
, set_flags
);
356 p_start
= p
->itree
.start
;
357 p_last
= p
->itree
.last
;
359 /* Using mprotect on a page does not change sticky bits. */
360 merge_flags
= (p_flags
& ~clear_flags
) | set_flags
;
363 * Need to flush if an overlapping executable region
364 * removes exec, or adds write.
366 if ((p_flags
& PAGE_EXEC
)
367 && (!(merge_flags
& PAGE_EXEC
)
368 || (merge_flags
& ~p_flags
& PAGE_WRITE
))) {
373 * If there is an exact range match, update and return without
374 * attempting to merge with adjacent regions.
376 if (start
== p_start
&& last
== p_last
) {
378 p
->flags
= merge_flags
;
380 interval_tree_remove(&p
->itree
, &pageflags_root
);
387 * If sticky bits affect the original mapping, then we must be more
388 * careful about the existing intervals and the separate flags.
390 if (set_flags
!= merge_flags
) {
391 if (p_start
< start
) {
392 interval_tree_remove(&p
->itree
, &pageflags_root
);
393 p
->itree
.last
= start
- 1;
394 interval_tree_insert(&p
->itree
, &pageflags_root
);
398 pageflags_create(start
, last
, merge_flags
);
400 pageflags_create(last
+ 1, p_last
, p_flags
);
403 pageflags_create(start
, p_last
, merge_flags
);
411 if (start
< p_start
&& set_flags
) {
412 pageflags_create(start
, p_start
- 1, set_flags
);
415 interval_tree_remove(&p
->itree
, &pageflags_root
);
416 p
->itree
.start
= last
+ 1;
417 interval_tree_insert(&p
->itree
, &pageflags_root
);
419 pageflags_create(start
, last
, merge_flags
);
423 p
->flags
= merge_flags
;
425 interval_tree_remove(&p
->itree
, &pageflags_root
);
437 /* If flags are not changing for this range, incorporate it. */
438 if (set_flags
== p_flags
) {
439 if (start
< p_start
) {
440 interval_tree_remove(&p
->itree
, &pageflags_root
);
441 p
->itree
.start
= start
;
442 interval_tree_insert(&p
->itree
, &pageflags_root
);
451 /* Maybe split out head and/or tail ranges with the original flags. */
452 interval_tree_remove(&p
->itree
, &pageflags_root
);
453 if (p_start
< start
) {
454 p
->itree
.last
= start
- 1;
455 interval_tree_insert(&p
->itree
, &pageflags_root
);
461 pageflags_create(last
+ 1, p_last
, p_flags
);
463 } else if (last
< p_last
) {
464 p
->itree
.start
= last
+ 1;
465 interval_tree_insert(&p
->itree
, &pageflags_root
);
471 pageflags_create(start
, last
, set_flags
);
479 * Modify the flags of a page and invalidate the code if necessary.
480 * The flag PAGE_WRITE_ORG is positioned automatically depending
481 * on PAGE_WRITE. The mmap_lock should already be held.
483 void page_set_flags(target_ulong start
, target_ulong last
, int flags
)
486 bool inval_tb
= false;
488 /* This function should never be called with addresses outside the
489 guest address space. If this assert fires, it probably indicates
490 a missing call to h2g_valid. */
491 assert(start
<= last
);
492 assert(last
<= GUEST_ADDR_MAX
);
493 /* Only set PAGE_ANON with new mappings. */
494 assert(!(flags
& PAGE_ANON
) || (flags
& PAGE_RESET
));
495 assert_memory_lock();
497 start
&= TARGET_PAGE_MASK
;
498 last
|= ~TARGET_PAGE_MASK
;
500 if (!(flags
& PAGE_VALID
)) {
503 reset
= flags
& PAGE_RESET
;
504 flags
&= ~PAGE_RESET
;
505 if (flags
& PAGE_WRITE
) {
506 flags
|= PAGE_WRITE_ORG
;
510 if (!flags
|| reset
) {
511 page_reset_target_data(start
, last
);
512 inval_tb
|= pageflags_unset(start
, last
);
515 inval_tb
|= pageflags_set_clear(start
, last
, flags
,
516 ~(reset
? 0 : PAGE_STICKY
));
519 tb_invalidate_phys_range(start
, last
);
523 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
526 int locked
; /* tri-state: =0: unlocked, +1: global, -1: local */
530 return 0; /* trivial length */
533 last
= start
+ len
- 1;
535 return -1; /* wrap around */
538 locked
= have_mmap_lock();
540 PageFlagsNode
*p
= pageflags_find(start
, last
);
546 * Lockless lookups have false negatives.
547 * Retry with the lock held.
551 p
= pageflags_find(start
, last
);
554 ret
= -1; /* entire region invalid */
558 if (start
< p
->itree
.start
) {
559 ret
= -1; /* initial bytes invalid */
563 missing
= flags
& ~p
->flags
;
564 if (missing
& PAGE_READ
) {
565 ret
= -1; /* page not readable */
568 if (missing
& PAGE_WRITE
) {
569 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
570 ret
= -1; /* page not writable */
573 /* Asking about writable, but has been protected: undo. */
574 if (!page_unprotect(start
, 0)) {
578 /* TODO: page_unprotect should take a range, not a single page. */
579 if (last
- start
< TARGET_PAGE_SIZE
) {
583 start
+= TARGET_PAGE_SIZE
;
587 if (last
<= p
->itree
.last
) {
591 start
= p
->itree
.last
+ 1;
594 /* Release the lock if acquired locally. */
601 void page_protect(tb_page_addr_t address
)
604 target_ulong start
, last
;
607 assert_memory_lock();
609 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
610 start
= address
& TARGET_PAGE_MASK
;
611 last
= start
+ TARGET_PAGE_SIZE
- 1;
613 start
= address
& qemu_host_page_mask
;
614 last
= start
+ qemu_host_page_size
- 1;
617 p
= pageflags_find(start
, last
);
623 if (unlikely(p
->itree
.last
< last
)) {
624 /* More than one protection region covers the one host page. */
625 assert(TARGET_PAGE_SIZE
< qemu_host_page_size
);
626 while ((p
= pageflags_next(p
, start
, last
)) != NULL
) {
631 if (prot
& PAGE_WRITE
) {
632 pageflags_set_clear(start
, last
, 0, PAGE_WRITE
);
633 mprotect(g2h_untagged(start
), qemu_host_page_size
,
634 prot
& (PAGE_READ
| PAGE_EXEC
) ? PROT_READ
: PROT_NONE
);
639 * Called from signal handler: invalidate the code and unprotect the
640 * page. Return 0 if the fault was not handled, 1 if it was handled,
641 * and 2 if it was handled but the caller must cause the TB to be
642 * immediately exited. (We can only return 2 if the 'pc' argument is
645 int page_unprotect(target_ulong address
, uintptr_t pc
)
648 bool current_tb_invalidated
;
651 * Technically this isn't safe inside a signal handler. However we
652 * know this only ever happens in a synchronous SEGV handler, so in
653 * practice it seems to be ok.
657 p
= pageflags_find(address
, address
);
659 /* If this address was not really writable, nothing to do. */
660 if (!p
|| !(p
->flags
& PAGE_WRITE_ORG
)) {
665 current_tb_invalidated
= false;
666 if (p
->flags
& PAGE_WRITE
) {
668 * If the page is actually marked WRITE then assume this is because
669 * this thread raced with another one which got here first and
670 * set the page to PAGE_WRITE and did the TB invalidate for us.
672 #ifdef TARGET_HAS_PRECISE_SMC
673 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
675 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
679 target_ulong start
, len
, i
;
682 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
683 start
= address
& TARGET_PAGE_MASK
;
684 len
= TARGET_PAGE_SIZE
;
685 prot
= p
->flags
| PAGE_WRITE
;
686 pageflags_set_clear(start
, start
+ len
- 1, PAGE_WRITE
, 0);
687 current_tb_invalidated
= tb_invalidate_phys_page_unwind(start
, pc
);
689 start
= address
& qemu_host_page_mask
;
690 len
= qemu_host_page_size
;
693 for (i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
694 target_ulong addr
= start
+ i
;
696 p
= pageflags_find(addr
, addr
);
699 if (p
->flags
& PAGE_WRITE_ORG
) {
701 pageflags_set_clear(addr
, addr
+ TARGET_PAGE_SIZE
- 1,
706 * Since the content will be modified, we must invalidate
707 * the corresponding translated code.
709 current_tb_invalidated
|=
710 tb_invalidate_phys_page_unwind(addr
, pc
);
713 if (prot
& PAGE_EXEC
) {
714 prot
= (prot
& ~PAGE_EXEC
) | PAGE_READ
;
716 mprotect((void *)g2h_untagged(start
), len
, prot
& PAGE_BITS
);
720 /* If current TB was invalidated return to main loop */
721 return current_tb_invalidated
? 2 : 1;
724 static int probe_access_internal(CPUArchState
*env
, target_ulong addr
,
725 int fault_size
, MMUAccessType access_type
,
726 bool nonfault
, uintptr_t ra
)
731 switch (access_type
) {
733 acc_flag
= PAGE_WRITE_ORG
;
736 acc_flag
= PAGE_READ
;
739 acc_flag
= PAGE_EXEC
;
742 g_assert_not_reached();
745 if (guest_addr_valid_untagged(addr
)) {
746 int page_flags
= page_get_flags(addr
);
747 if (page_flags
& acc_flag
) {
748 return 0; /* success */
750 maperr
= !(page_flags
& PAGE_VALID
);
756 return TLB_INVALID_MASK
;
759 cpu_loop_exit_sigsegv(env_cpu(env
), addr
, access_type
, maperr
, ra
);
762 int probe_access_flags(CPUArchState
*env
, target_ulong addr
, int size
,
763 MMUAccessType access_type
, int mmu_idx
,
764 bool nonfault
, void **phost
, uintptr_t ra
)
768 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
769 flags
= probe_access_internal(env
, addr
, size
, access_type
, nonfault
, ra
);
770 *phost
= flags
? NULL
: g2h(env_cpu(env
), addr
);
774 void *probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
775 MMUAccessType access_type
, int mmu_idx
, uintptr_t ra
)
779 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
780 flags
= probe_access_internal(env
, addr
, size
, access_type
, false, ra
);
781 g_assert(flags
== 0);
783 return size
? g2h(env_cpu(env
), addr
) : NULL
;
786 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, target_ulong addr
,
791 flags
= probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
, false, 0);
792 g_assert(flags
== 0);
795 *hostp
= g2h_untagged(addr
);
800 #ifdef TARGET_PAGE_DATA_SIZE
802 * Allocate chunks of target data together. For the only current user,
803 * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
804 * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
807 #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
809 typedef struct TargetPageDataNode
{
811 IntervalTreeNode itree
;
812 char data
[TPD_PAGES
][TARGET_PAGE_DATA_SIZE
] __attribute__((aligned
));
813 } TargetPageDataNode
;
815 static IntervalTreeRoot targetdata_root
;
817 void page_reset_target_data(target_ulong start
, target_ulong last
)
819 IntervalTreeNode
*n
, *next
;
821 assert_memory_lock();
823 start
&= TARGET_PAGE_MASK
;
824 last
|= ~TARGET_PAGE_MASK
;
826 for (n
= interval_tree_iter_first(&targetdata_root
, start
, last
),
827 next
= n
? interval_tree_iter_next(n
, start
, last
) : NULL
;
830 next
= next
? interval_tree_iter_next(n
, start
, last
) : NULL
) {
831 target_ulong n_start
, n_last
, p_ofs
, p_len
;
832 TargetPageDataNode
*t
= container_of(n
, TargetPageDataNode
, itree
);
834 if (n
->start
>= start
&& n
->last
<= last
) {
835 interval_tree_remove(n
, &targetdata_root
);
840 if (n
->start
< start
) {
842 p_ofs
= (start
- n
->start
) >> TARGET_PAGE_BITS
;
847 n_last
= MIN(last
, n
->last
);
848 p_len
= (n_last
+ 1 - n_start
) >> TARGET_PAGE_BITS
;
850 memset(t
->data
[p_ofs
], 0, p_len
* TARGET_PAGE_DATA_SIZE
);
854 void *page_get_target_data(target_ulong address
)
857 TargetPageDataNode
*t
;
858 target_ulong page
, region
;
860 page
= address
& TARGET_PAGE_MASK
;
861 region
= address
& TBD_MASK
;
863 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
866 * See util/interval-tree.c re lockless lookups: no false positives
867 * but there are false negatives. If we find nothing, retry with
868 * the mmap lock acquired. We also need the lock for the
869 * allocation + insert.
872 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
874 t
= g_new0(TargetPageDataNode
, 1);
877 n
->last
= region
| ~TBD_MASK
;
878 interval_tree_insert(n
, &targetdata_root
);
883 t
= container_of(n
, TargetPageDataNode
, itree
);
884 return t
->data
[(page
- region
) >> TARGET_PAGE_BITS
];
887 void page_reset_target_data(target_ulong start
, target_ulong last
) { }
888 #endif /* TARGET_PAGE_DATA_SIZE */
890 /* The softmmu versions of these helpers are in cputlb.c. */
893 * Verify that we have passed the correct MemOp to the correct function.
895 * We could present one function to target code, and dispatch based on
896 * the MemOp, but so far we have worked hard to avoid an indirect function
897 * call along the memory path.
899 static void validate_memop(MemOpIdx oi
, MemOp expected
)
901 #ifdef CONFIG_DEBUG_TCG
902 MemOp have
= get_memop(oi
) & (MO_SIZE
| MO_BSWAP
);
903 assert(have
== expected
);
907 void helper_unaligned_ld(CPUArchState
*env
, target_ulong addr
)
909 cpu_loop_exit_sigbus(env_cpu(env
), addr
, MMU_DATA_LOAD
, GETPC());
912 void helper_unaligned_st(CPUArchState
*env
, target_ulong addr
)
914 cpu_loop_exit_sigbus(env_cpu(env
), addr
, MMU_DATA_STORE
, GETPC());
917 static void *cpu_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
918 MemOpIdx oi
, uintptr_t ra
, MMUAccessType type
)
920 MemOp mop
= get_memop(oi
);
921 int a_bits
= get_alignment_bits(mop
);
924 /* Enforce guest required alignment. */
925 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
926 cpu_loop_exit_sigbus(env_cpu(env
), addr
, type
, ra
);
929 ret
= g2h(env_cpu(env
), addr
);
930 set_helper_retaddr(ra
);
934 uint8_t cpu_ldb_mmu(CPUArchState
*env
, abi_ptr addr
,
935 MemOpIdx oi
, uintptr_t ra
)
940 validate_memop(oi
, MO_UB
);
941 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
943 clear_helper_retaddr();
944 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
948 uint16_t cpu_ldw_be_mmu(CPUArchState
*env
, abi_ptr addr
,
949 MemOpIdx oi
, uintptr_t ra
)
954 validate_memop(oi
, MO_BEUW
);
955 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
956 ret
= lduw_be_p(haddr
);
957 clear_helper_retaddr();
958 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
962 uint32_t cpu_ldl_be_mmu(CPUArchState
*env
, abi_ptr addr
,
963 MemOpIdx oi
, uintptr_t ra
)
968 validate_memop(oi
, MO_BEUL
);
969 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
970 ret
= ldl_be_p(haddr
);
971 clear_helper_retaddr();
972 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
976 uint64_t cpu_ldq_be_mmu(CPUArchState
*env
, abi_ptr addr
,
977 MemOpIdx oi
, uintptr_t ra
)
982 validate_memop(oi
, MO_BEUQ
);
983 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
984 ret
= ldq_be_p(haddr
);
985 clear_helper_retaddr();
986 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
990 uint16_t cpu_ldw_le_mmu(CPUArchState
*env
, abi_ptr addr
,
991 MemOpIdx oi
, uintptr_t ra
)
996 validate_memop(oi
, MO_LEUW
);
997 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
998 ret
= lduw_le_p(haddr
);
999 clear_helper_retaddr();
1000 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1004 uint32_t cpu_ldl_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1005 MemOpIdx oi
, uintptr_t ra
)
1010 validate_memop(oi
, MO_LEUL
);
1011 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
1012 ret
= ldl_le_p(haddr
);
1013 clear_helper_retaddr();
1014 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1018 uint64_t cpu_ldq_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1019 MemOpIdx oi
, uintptr_t ra
)
1024 validate_memop(oi
, MO_LEUQ
);
1025 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
1026 ret
= ldq_le_p(haddr
);
1027 clear_helper_retaddr();
1028 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1032 Int128
cpu_ld16_be_mmu(CPUArchState
*env
, abi_ptr addr
,
1033 MemOpIdx oi
, uintptr_t ra
)
1038 validate_memop(oi
, MO_128
| MO_BE
);
1039 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
1040 memcpy(&ret
, haddr
, 16);
1041 clear_helper_retaddr();
1042 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1044 if (!HOST_BIG_ENDIAN
) {
1045 ret
= bswap128(ret
);
1050 Int128
cpu_ld16_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1051 MemOpIdx oi
, uintptr_t ra
)
1056 validate_memop(oi
, MO_128
| MO_LE
);
1057 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
1058 memcpy(&ret
, haddr
, 16);
1059 clear_helper_retaddr();
1060 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1062 if (HOST_BIG_ENDIAN
) {
1063 ret
= bswap128(ret
);
1068 void cpu_stb_mmu(CPUArchState
*env
, abi_ptr addr
, uint8_t val
,
1069 MemOpIdx oi
, uintptr_t ra
)
1073 validate_memop(oi
, MO_UB
);
1074 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1076 clear_helper_retaddr();
1077 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1080 void cpu_stw_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1081 MemOpIdx oi
, uintptr_t ra
)
1085 validate_memop(oi
, MO_BEUW
);
1086 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1087 stw_be_p(haddr
, val
);
1088 clear_helper_retaddr();
1089 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1092 void cpu_stl_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1093 MemOpIdx oi
, uintptr_t ra
)
1097 validate_memop(oi
, MO_BEUL
);
1098 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1099 stl_be_p(haddr
, val
);
1100 clear_helper_retaddr();
1101 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1104 void cpu_stq_be_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1105 MemOpIdx oi
, uintptr_t ra
)
1109 validate_memop(oi
, MO_BEUQ
);
1110 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1111 stq_be_p(haddr
, val
);
1112 clear_helper_retaddr();
1113 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1116 void cpu_stw_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1117 MemOpIdx oi
, uintptr_t ra
)
1121 validate_memop(oi
, MO_LEUW
);
1122 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1123 stw_le_p(haddr
, val
);
1124 clear_helper_retaddr();
1125 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1128 void cpu_stl_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1129 MemOpIdx oi
, uintptr_t ra
)
1133 validate_memop(oi
, MO_LEUL
);
1134 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1135 stl_le_p(haddr
, val
);
1136 clear_helper_retaddr();
1137 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1140 void cpu_stq_le_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1141 MemOpIdx oi
, uintptr_t ra
)
1145 validate_memop(oi
, MO_LEUQ
);
1146 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1147 stq_le_p(haddr
, val
);
1148 clear_helper_retaddr();
1149 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1152 void cpu_st16_be_mmu(CPUArchState
*env
, abi_ptr addr
,
1153 Int128 val
, MemOpIdx oi
, uintptr_t ra
)
1157 validate_memop(oi
, MO_128
| MO_BE
);
1158 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1159 if (!HOST_BIG_ENDIAN
) {
1160 val
= bswap128(val
);
1162 memcpy(haddr
, &val
, 16);
1163 clear_helper_retaddr();
1164 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1167 void cpu_st16_le_mmu(CPUArchState
*env
, abi_ptr addr
,
1168 Int128 val
, MemOpIdx oi
, uintptr_t ra
)
1172 validate_memop(oi
, MO_128
| MO_LE
);
1173 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_STORE
);
1174 if (HOST_BIG_ENDIAN
) {
1175 val
= bswap128(val
);
1177 memcpy(haddr
, &val
, 16);
1178 clear_helper_retaddr();
1179 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1182 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr ptr
)
1186 set_helper_retaddr(1);
1187 ret
= ldub_p(g2h_untagged(ptr
));
1188 clear_helper_retaddr();
1192 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr ptr
)
1196 set_helper_retaddr(1);
1197 ret
= lduw_p(g2h_untagged(ptr
));
1198 clear_helper_retaddr();
1202 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr ptr
)
1206 set_helper_retaddr(1);
1207 ret
= ldl_p(g2h_untagged(ptr
));
1208 clear_helper_retaddr();
1212 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr ptr
)
1216 set_helper_retaddr(1);
1217 ret
= ldq_p(g2h_untagged(ptr
));
1218 clear_helper_retaddr();
1222 #include "ldst_common.c.inc"
1225 * Do not allow unaligned operations to proceed. Return the host address.
1227 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1229 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1230 MemOpIdx oi
, int size
, int prot
,
1233 MemOp mop
= get_memop(oi
);
1234 int a_bits
= get_alignment_bits(mop
);
1237 /* Enforce guest required alignment. */
1238 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
1239 MMUAccessType t
= prot
== PAGE_READ
? MMU_DATA_LOAD
: MMU_DATA_STORE
;
1240 cpu_loop_exit_sigbus(env_cpu(env
), addr
, t
, retaddr
);
1243 /* Enforce qemu required alignment. */
1244 if (unlikely(addr
& (size
- 1))) {
1245 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1248 ret
= g2h(env_cpu(env
), addr
);
1249 set_helper_retaddr(retaddr
);
1253 #include "atomic_common.c.inc"
1256 * First set of functions passes in OI and RETADDR.
1257 * This makes them callable from other helpers.
1260 #define ATOMIC_NAME(X) \
1261 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1262 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1265 #include "atomic_template.h"
1268 #include "atomic_template.h"
1271 #include "atomic_template.h"
1273 #ifdef CONFIG_ATOMIC64
1275 #include "atomic_template.h"
1278 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1279 #define DATA_SIZE 16
1280 #include "atomic_template.h"