2 * ARM v8.5-MemTag Operations
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/page-protection.h"
26 #include "exec/ram_addr.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "hw/core/tcg-cpu-ops.h"
30 #include "qapi/error.h"
31 #include "qemu/guest-random.h"
32 #include "mte_helper.h"
35 static int choose_nonexcluded_tag(int tag
, int offset
, uint16_t exclude
)
37 if (exclude
== 0xffff) {
41 while (exclude
& (1 << tag
)) {
48 } while (exclude
& (1 << tag
));
49 } while (--offset
> 0);
54 uint8_t *allocation_tag_mem_probe(CPUARMState
*env
, int ptr_mmu_idx
,
55 uint64_t ptr
, MMUAccessType ptr_access
,
56 int ptr_size
, MMUAccessType tag_access
,
57 bool probe
, uintptr_t ra
)
59 #ifdef CONFIG_USER_ONLY
60 uint64_t clean_ptr
= useronly_clean_ptr(ptr
);
61 int flags
= page_get_flags(clean_ptr
);
65 assert(!(probe
&& ra
));
67 if (!(flags
& (ptr_access
== MMU_DATA_STORE
? PAGE_WRITE_ORG
: PAGE_READ
))) {
71 cpu_loop_exit_sigsegv(env_cpu(env
), ptr
, ptr_access
,
72 !(flags
& PAGE_VALID
), ra
);
75 /* Require both MAP_ANON and PROT_MTE for the page. */
76 if (!(flags
& PAGE_ANON
) || !(flags
& PAGE_MTE
)) {
80 tags
= page_get_target_data(clean_ptr
);
82 index
= extract32(ptr
, LOG2_TAG_GRANULE
+ 1,
83 TARGET_PAGE_BITS
- LOG2_TAG_GRANULE
- 1);
86 CPUTLBEntryFull
*full
;
89 hwaddr ptr_paddr
, tag_paddr
, xlat
;
96 * Probe the first byte of the virtual address. This raises an
97 * exception for inaccessible pages, and resolves the virtual address
98 * into the softmmu tlb.
100 * When RA == 0, this is either a pure probe or a no-fault-expected probe.
101 * Indicate to probe_access_flags no-fault, then either return NULL
102 * for the pure probe, or assert that we received a valid page for the
103 * no-fault-expected probe.
105 flags
= probe_access_full(env
, ptr
, 0, ptr_access
, ptr_mmu_idx
,
106 ra
== 0, &host
, &full
, ra
);
107 if (probe
&& (flags
& TLB_INVALID_MASK
)) {
110 assert(!(flags
& TLB_INVALID_MASK
));
112 /* If the virtual page MemAttr != Tagged, access unchecked. */
113 if (full
->extra
.arm
.pte_attrs
!= 0xf0) {
118 * If not backed by host ram, there is no tag storage: access unchecked.
119 * This is probably a guest os bug though, so log it.
121 if (unlikely(flags
& TLB_MMIO
)) {
122 qemu_log_mask(LOG_GUEST_ERROR
,
123 "Page @ 0x%" PRIx64
" indicates Tagged Normal memory "
124 "but is not backed by host ram\n", ptr
);
129 * Remember these values across the second lookup below,
130 * which may invalidate this pointer via tlb resize.
132 ptr_paddr
= full
->phys_addr
| (ptr
& ~TARGET_PAGE_MASK
);
137 * The Normal memory access can extend to the next page. E.g. a single
138 * 8-byte access to the last byte of a page will check only the last
139 * tag on the first page.
140 * Any page access exception has priority over tag check exception.
142 in_page
= -(ptr
| TARGET_PAGE_MASK
);
143 if (unlikely(ptr_size
> in_page
)) {
144 flags
|= probe_access_full(env
, ptr
+ in_page
, 0, ptr_access
,
145 ptr_mmu_idx
, ra
== 0, &host
, &full
, ra
);
146 assert(!(flags
& TLB_INVALID_MASK
));
149 /* Any debug exception has priority over a tag check exception. */
150 if (!probe
&& unlikely(flags
& TLB_WATCHPOINT
)) {
151 int wp
= ptr_access
== MMU_DATA_LOAD
? BP_MEM_READ
: BP_MEM_WRITE
;
153 cpu_check_watchpoint(env_cpu(env
), ptr
, ptr_size
, attrs
, wp
, ra
);
156 /* Convert to the physical address in tag space. */
157 tag_paddr
= ptr_paddr
>> (LOG2_TAG_GRANULE
+ 1);
159 /* Look up the address in tag space. */
160 tag_asi
= attrs
.secure
? ARMASIdx_TagS
: ARMASIdx_TagNS
;
161 tag_as
= cpu_get_address_space(env_cpu(env
), tag_asi
);
162 mr
= address_space_translate(tag_as
, tag_paddr
, &xlat
, NULL
,
163 tag_access
== MMU_DATA_STORE
, attrs
);
166 * Note that @mr will never be NULL. If there is nothing in the address
167 * space at @tag_paddr, the translation will return the unallocated memory
168 * region. For our purposes, the result must be ram.
170 if (unlikely(!memory_region_is_ram(mr
))) {
171 /* ??? Failure is a board configuration error. */
172 qemu_log_mask(LOG_UNIMP
,
173 "Tag Memory @ 0x%" HWADDR_PRIx
" not found for "
174 "Normal Memory @ 0x%" HWADDR_PRIx
"\n",
175 tag_paddr
, ptr_paddr
);
180 * Ensure the tag memory is dirty on write, for migration.
181 * Tag memory can never contain code or display memory (vga).
183 if (tag_access
== MMU_DATA_STORE
) {
184 ram_addr_t tag_ra
= memory_region_get_ram_addr(mr
) + xlat
;
185 cpu_physical_memory_set_dirty_flag(tag_ra
, DIRTY_MEMORY_MIGRATION
);
188 return memory_region_get_ram_ptr(mr
) + xlat
;
192 static uint8_t *allocation_tag_mem(CPUARMState
*env
, int ptr_mmu_idx
,
193 uint64_t ptr
, MMUAccessType ptr_access
,
194 int ptr_size
, MMUAccessType tag_access
,
197 return allocation_tag_mem_probe(env
, ptr_mmu_idx
, ptr
, ptr_access
,
198 ptr_size
, tag_access
, false, ra
);
201 uint64_t HELPER(irg
)(CPUARMState
*env
, uint64_t rn
, uint64_t rm
)
203 uint16_t exclude
= extract32(rm
| env
->cp15
.gcr_el1
, 0, 16);
204 int rrnd
= extract32(env
->cp15
.gcr_el1
, 16, 1);
205 int start
= extract32(env
->cp15
.rgsr_el1
, 0, 4);
206 int seed
= extract32(env
->cp15
.rgsr_el1
, 8, 16);
210 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
211 * deterministic algorithm. Except that with RRND==1 the kernel is
212 * not required to have set RGSR_EL1.SEED != 0, which is required for
213 * the deterministic algorithm to function. So we force a non-zero
214 * SEED for that case.
216 if (unlikely(seed
== 0) && rrnd
) {
221 if (qemu_guest_getrandom(&two
, sizeof(two
), &err
) < 0) {
223 * Failed, for unknown reasons in the crypto subsystem.
224 * Best we can do is log the reason and use a constant seed.
226 qemu_log_mask(LOG_UNIMP
, "IRG: Crypto failure: %s\n",
227 error_get_pretty(err
));
236 for (i
= offset
= 0; i
< 4; ++i
) {
237 /* NextRandomTagBit */
238 int top
= (extract32(seed
, 5, 1) ^ extract32(seed
, 3, 1) ^
239 extract32(seed
, 2, 1) ^ extract32(seed
, 0, 1));
240 seed
= (top
<< 15) | (seed
>> 1);
243 rtag
= choose_nonexcluded_tag(start
, offset
, exclude
);
244 env
->cp15
.rgsr_el1
= rtag
| (seed
<< 8);
246 return address_with_allocation_tag(rn
, rtag
);
249 uint64_t HELPER(addsubg
)(CPUARMState
*env
, uint64_t ptr
,
250 int32_t offset
, uint32_t tag_offset
)
252 int start_tag
= allocation_tag_from_addr(ptr
);
253 uint16_t exclude
= extract32(env
->cp15
.gcr_el1
, 0, 16);
254 int rtag
= choose_nonexcluded_tag(start_tag
, tag_offset
, exclude
);
256 return address_with_allocation_tag(ptr
+ offset
, rtag
);
259 int load_tag1(uint64_t ptr
, uint8_t *mem
)
261 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
262 return extract32(*mem
, ofs
, 4);
265 uint64_t HELPER(ldg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
267 int mmu_idx
= arm_env_mmu_index(env
);
271 /* Trap if accessing an invalid page. */
272 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
, 1,
273 MMU_DATA_LOAD
, GETPC());
275 /* Load if page supports tags. */
277 rtag
= load_tag1(ptr
, mem
);
280 return address_with_allocation_tag(xt
, rtag
);
283 static void check_tag_aligned(CPUARMState
*env
, uint64_t ptr
, uintptr_t ra
)
285 if (unlikely(!QEMU_IS_ALIGNED(ptr
, TAG_GRANULE
))) {
286 arm_cpu_do_unaligned_access(env_cpu(env
), ptr
, MMU_DATA_STORE
,
287 arm_env_mmu_index(env
), ra
);
288 g_assert_not_reached();
292 /* For use in a non-parallel context, store to the given nibble. */
293 void store_tag1(uint64_t ptr
, uint8_t *mem
, int tag
)
295 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
296 *mem
= deposit32(*mem
, ofs
, 4, tag
);
299 /* For use in a parallel context, atomically store to the given nibble. */
300 static void store_tag1_parallel(uint64_t ptr
, uint8_t *mem
, int tag
)
302 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
303 uint8_t old
= qatomic_read(mem
);
306 uint8_t new = deposit32(old
, ofs
, 4, tag
);
307 uint8_t cmp
= qatomic_cmpxchg(mem
, old
, new);
308 if (likely(cmp
== old
)) {
315 typedef void stg_store1(uint64_t, uint8_t *, int);
317 static inline void do_stg(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
318 uintptr_t ra
, stg_store1 store1
)
320 int mmu_idx
= arm_env_mmu_index(env
);
323 check_tag_aligned(env
, ptr
, ra
);
325 /* Trap if accessing an invalid page. */
326 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, TAG_GRANULE
,
329 /* Store if page supports tags. */
331 store1(ptr
, mem
, allocation_tag_from_addr(xt
));
335 void HELPER(stg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
337 do_stg(env
, ptr
, xt
, GETPC(), store_tag1
);
340 void HELPER(stg_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
342 do_stg(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
345 void HELPER(stg_stub
)(CPUARMState
*env
, uint64_t ptr
)
347 int mmu_idx
= arm_env_mmu_index(env
);
348 uintptr_t ra
= GETPC();
350 check_tag_aligned(env
, ptr
, ra
);
351 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
354 static inline void do_st2g(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
355 uintptr_t ra
, stg_store1 store1
)
357 int mmu_idx
= arm_env_mmu_index(env
);
358 int tag
= allocation_tag_from_addr(xt
);
359 uint8_t *mem1
, *mem2
;
361 check_tag_aligned(env
, ptr
, ra
);
364 * Trap if accessing an invalid page(s).
365 * This takes priority over !allocation_tag_access_enabled.
367 if (ptr
& TAG_GRANULE
) {
368 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
369 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
370 TAG_GRANULE
, MMU_DATA_STORE
, ra
);
371 mem2
= allocation_tag_mem(env
, mmu_idx
, ptr
+ TAG_GRANULE
,
372 MMU_DATA_STORE
, TAG_GRANULE
,
375 /* Store if page(s) support tags. */
377 store1(TAG_GRANULE
, mem1
, tag
);
380 store1(0, mem2
, tag
);
383 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
384 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
385 2 * TAG_GRANULE
, MMU_DATA_STORE
, ra
);
388 qatomic_set(mem1
, tag
);
393 void HELPER(st2g
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
395 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1
);
398 void HELPER(st2g_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
400 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
403 void HELPER(st2g_stub
)(CPUARMState
*env
, uint64_t ptr
)
405 int mmu_idx
= arm_env_mmu_index(env
);
406 uintptr_t ra
= GETPC();
407 int in_page
= -(ptr
| TARGET_PAGE_MASK
);
409 check_tag_aligned(env
, ptr
, ra
);
411 if (likely(in_page
>= 2 * TAG_GRANULE
)) {
412 probe_write(env
, ptr
, 2 * TAG_GRANULE
, mmu_idx
, ra
);
414 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
415 probe_write(env
, ptr
+ TAG_GRANULE
, TAG_GRANULE
, mmu_idx
, ra
);
419 uint64_t HELPER(ldgm
)(CPUARMState
*env
, uint64_t ptr
)
421 int mmu_idx
= arm_env_mmu_index(env
);
422 uintptr_t ra
= GETPC();
423 int gm_bs
= env_archcpu(env
)->gm_blocksize
;
424 int gm_bs_bytes
= 4 << gm_bs
;
429 ptr
= QEMU_ALIGN_DOWN(ptr
, gm_bs_bytes
);
431 /* Trap if accessing an invalid page. */
432 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
,
433 gm_bs_bytes
, MMU_DATA_LOAD
, ra
);
435 /* The tag is squashed to zero if the page does not support tags. */
441 * The ordering of elements within the word corresponds to
442 * a little-endian operation. Computation of shift comes from
444 * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
445 * data<index*4+3:index*4> = tag
447 * Because of the alignment of ptr above, BS=6 has shift=0.
448 * All memory operations are aligned. Defer support for BS=2,
449 * requiring insertion or extraction of a nibble, until we
450 * support a cpu that requires it.
454 /* 32 bytes -> 2 tags -> 8 result bits */
455 ret
= *(uint8_t *)tag_mem
;
458 /* 64 bytes -> 4 tags -> 16 result bits */
459 ret
= cpu_to_le16(*(uint16_t *)tag_mem
);
462 /* 128 bytes -> 8 tags -> 32 result bits */
463 ret
= cpu_to_le32(*(uint32_t *)tag_mem
);
466 /* 256 bytes -> 16 tags -> 64 result bits */
467 return cpu_to_le64(*(uint64_t *)tag_mem
);
470 * CPU configured with unsupported/invalid gm blocksize.
471 * This is detected early in arm_cpu_realizefn.
473 g_assert_not_reached();
475 shift
= extract64(ptr
, LOG2_TAG_GRANULE
, 4) * 4;
479 void HELPER(stgm
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
481 int mmu_idx
= arm_env_mmu_index(env
);
482 uintptr_t ra
= GETPC();
483 int gm_bs
= env_archcpu(env
)->gm_blocksize
;
484 int gm_bs_bytes
= 4 << gm_bs
;
488 ptr
= QEMU_ALIGN_DOWN(ptr
, gm_bs_bytes
);
490 /* Trap if accessing an invalid page. */
491 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
492 gm_bs_bytes
, MMU_DATA_LOAD
, ra
);
495 * Tag store only happens if the page support tags,
496 * and if the OS has enabled access to the tags.
502 /* See LDGM for comments on BS and on shift. */
503 shift
= extract64(ptr
, LOG2_TAG_GRANULE
, 4) * 4;
507 /* 32 bytes -> 2 tags -> 8 result bits */
508 *(uint8_t *)tag_mem
= val
;
511 /* 64 bytes -> 4 tags -> 16 result bits */
512 *(uint16_t *)tag_mem
= cpu_to_le16(val
);
515 /* 128 bytes -> 8 tags -> 32 result bits */
516 *(uint32_t *)tag_mem
= cpu_to_le32(val
);
519 /* 256 bytes -> 16 tags -> 64 result bits */
520 *(uint64_t *)tag_mem
= cpu_to_le64(val
);
523 /* cpu configured with unsupported gm blocksize. */
524 g_assert_not_reached();
528 void HELPER(stzgm_tags
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
530 uintptr_t ra
= GETPC();
531 int mmu_idx
= arm_env_mmu_index(env
);
532 int log2_dcz_bytes
, log2_tag_bytes
;
533 intptr_t dcz_bytes
, tag_bytes
;
537 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
538 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
539 * to make sure that we can access one complete tag byte here.
541 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
542 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
543 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
544 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
547 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, dcz_bytes
,
550 int tag_pair
= (val
& 0xf) * 0x11;
551 memset(mem
, tag_pair
, tag_bytes
);
555 static void mte_sync_check_fail(CPUARMState
*env
, uint32_t desc
,
556 uint64_t dirty_ptr
, uintptr_t ra
)
560 env
->exception
.vaddress
= dirty_ptr
;
562 is_write
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
563 syn
= syn_data_abort_no_iss(arm_current_el(env
) != 0, 0, 0, 0, 0, is_write
,
565 raise_exception_ra(env
, EXCP_DATA_ABORT
, syn
, exception_target_el(env
), ra
);
566 g_assert_not_reached();
569 static void mte_async_check_fail(CPUARMState
*env
, uint64_t dirty_ptr
,
570 uintptr_t ra
, ARMMMUIdx arm_mmu_idx
, int el
)
574 if (regime_has_2_ranges(arm_mmu_idx
)) {
575 select
= extract64(dirty_ptr
, 55, 1);
579 env
->cp15
.tfsr_el
[el
] |= 1 << select
;
580 #ifdef CONFIG_USER_ONLY
582 * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
583 * which then sends a SIGSEGV when the thread is next scheduled.
584 * This cpu will return to the main loop at the end of the TB,
585 * which is rather sooner than "normal". But the alternative
586 * is waiting until the next syscall.
588 qemu_cpu_kick(env_cpu(env
));
592 /* Record a tag check failure. */
593 void mte_check_fail(CPUARMState
*env
, uint32_t desc
,
594 uint64_t dirty_ptr
, uintptr_t ra
)
596 int mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
597 ARMMMUIdx arm_mmu_idx
= core_to_aa64_mmu_idx(mmu_idx
);
601 reg_el
= regime_el(env
, arm_mmu_idx
);
602 sctlr
= env
->cp15
.sctlr_el
[reg_el
];
604 switch (arm_mmu_idx
) {
605 case ARMMMUIdx_E10_0
:
606 case ARMMMUIdx_E20_0
:
608 tcf
= extract64(sctlr
, 38, 2);
612 tcf
= extract64(sctlr
, 40, 2);
617 /* Tag check fail causes a synchronous exception. */
618 mte_sync_check_fail(env
, desc
, dirty_ptr
, ra
);
623 * Tag check fail does not affect the PE.
624 * We eliminate this case by not setting MTE_ACTIVE
625 * in tb_flags, so that we never make this runtime call.
627 g_assert_not_reached();
630 /* Tag check fail causes asynchronous flag set. */
631 mte_async_check_fail(env
, dirty_ptr
, ra
, arm_mmu_idx
, el
);
636 * Tag check fail causes asynchronous flag set for stores, or
637 * a synchronous exception for loads.
639 if (FIELD_EX32(desc
, MTEDESC
, WRITE
)) {
640 mte_async_check_fail(env
, dirty_ptr
, ra
, arm_mmu_idx
, el
);
642 mte_sync_check_fail(env
, desc
, dirty_ptr
, ra
);
650 * @tag: tag memory to test
651 * @odd: true to begin testing at tags at odd nibble
652 * @cmp: the tag to compare against
653 * @count: number of tags to test
655 * Return the number of successful tests.
656 * Thus a return value < @count indicates a failure.
658 * A note about sizes: count is expected to be small.
660 * The most common use will be LDP/STP of two integer registers,
661 * which means 16 bytes of memory touching at most 2 tags, but
662 * often the access is aligned and thus just 1 tag.
664 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
665 * touching at most 5 tags. SVE LDR/STR (vector) with the default
666 * vector length is also 64 bytes; the maximum architectural length
667 * is 256 bytes touching at most 9 tags.
669 * The loop below uses 7 logical operations and 1 memory operation
670 * per tag pair. An implementation that loads an aligned word and
671 * uses masking to ignore adjacent tags requires 18 logical operations
672 * and thus does not begin to pay off until 6 tags.
673 * Which, according to the survey above, is unlikely to be common.
675 static int checkN(uint8_t *mem
, int odd
, int cmp
, int count
)
679 /* Replicate the test tag and compare. */
689 if (unlikely((diff
) & 0x0f)) {
698 if (unlikely((diff
) & 0xf0)) {
712 * @tag: tag memory to test
713 * @odd: true to begin testing at tags at odd nibble
714 * @cmp: the tag to compare against
715 * @count: number of tags to test
717 * Return the number of successful tests.
718 * Thus a return value < @count indicates a failure.
720 * This is like checkN, but it runs backwards, checking the
721 * tags starting with @tag and then the tags preceding it.
722 * This is needed by the backwards-memory-copying operations.
724 static int checkNrev(uint8_t *mem
, int odd
, int cmp
, int count
)
728 /* Replicate the test tag and compare. */
738 if (unlikely((diff
) & 0xf0)) {
747 if (unlikely((diff
) & 0x0f)) {
760 * mte_probe_int() - helper for mte_probe and mte_check
761 * @env: CPU environment
762 * @desc: MTEDESC descriptor
763 * @ptr: virtual address of the base of the access
764 * @fault: return virtual address of the first check failure
766 * Internal routine for both mte_probe and mte_check.
767 * Return zero on failure, filling in *fault.
768 * Return negative on trivial success for tbi disabled.
769 * Return positive on success with tbi enabled.
771 static int mte_probe_int(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
,
772 uintptr_t ra
, uint64_t *fault
)
774 int mmu_idx
, ptr_tag
, bit55
;
775 uint64_t ptr_last
, prev_page
, next_page
;
776 uint64_t tag_first
, tag_last
;
777 uint32_t sizem1
, tag_count
, n
, c
;
778 uint8_t *mem1
, *mem2
;
781 bit55
= extract64(ptr
, 55, 1);
784 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
785 if (unlikely(!tbi_check(desc
, bit55
))) {
789 ptr_tag
= allocation_tag_from_addr(ptr
);
791 if (tcma_check(desc
, bit55
, ptr_tag
)) {
795 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
796 type
= FIELD_EX32(desc
, MTEDESC
, WRITE
) ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
797 sizem1
= FIELD_EX32(desc
, MTEDESC
, SIZEM1
);
799 /* Find the addr of the end of the access */
800 ptr_last
= ptr
+ sizem1
;
802 /* Round the bounds to the tag granule, and compute the number of tags. */
803 tag_first
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
804 tag_last
= QEMU_ALIGN_DOWN(ptr_last
, TAG_GRANULE
);
805 tag_count
= ((tag_last
- tag_first
) / TAG_GRANULE
) + 1;
807 /* Locate the page boundaries. */
808 prev_page
= ptr
& TARGET_PAGE_MASK
;
809 next_page
= prev_page
+ TARGET_PAGE_SIZE
;
811 if (likely(tag_last
- prev_page
< TARGET_PAGE_SIZE
)) {
812 /* Memory access stays on one page. */
813 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, sizem1
+ 1,
818 /* Perform all of the comparisons. */
819 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
821 /* Memory access crosses to next page. */
822 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, next_page
- ptr
,
825 mem2
= allocation_tag_mem(env
, mmu_idx
, next_page
, type
,
826 ptr_last
- next_page
+ 1,
830 * Perform all of the comparisons.
831 * Note the possible but unlikely case of the operation spanning
832 * two pages that do not both have tagging enabled.
834 n
= c
= (next_page
- tag_first
) / TAG_GRANULE
;
836 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, c
);
842 n
+= checkN(mem2
, 0, ptr_tag
, tag_count
- c
);
846 if (likely(n
== tag_count
)) {
851 * If we failed, we know which granule. For the first granule, the
852 * failure address is @ptr, the first byte accessed. Otherwise the
853 * failure address is the first byte of the nth granule.
856 *fault
= tag_first
+ n
* TAG_GRANULE
;
861 uint64_t mte_check(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
, uintptr_t ra
)
864 int ret
= mte_probe_int(env
, desc
, ptr
, ra
, &fault
);
866 if (unlikely(ret
== 0)) {
867 mte_check_fail(env
, desc
, fault
, ra
);
868 } else if (ret
< 0) {
871 return useronly_clean_ptr(ptr
);
874 uint64_t HELPER(mte_check
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
877 * R_XCHFJ: Alignment check not caused by memory type is priority 1,
878 * higher than any translation fault. When MTE is disabled, tcg
879 * performs the alignment check during the code generated for the
880 * memory access. With MTE enabled, we must check this here before
881 * raising any translation fault in allocation_tag_mem.
883 unsigned align
= FIELD_EX32(desc
, MTEDESC
, ALIGN
);
884 if (unlikely(align
)) {
885 align
= (1u << align
) - 1;
886 if (unlikely(ptr
& align
)) {
887 int idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
888 bool w
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
889 MMUAccessType type
= w
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
890 arm_cpu_do_unaligned_access(env_cpu(env
), ptr
, type
, idx
, GETPC());
894 return mte_check(env
, desc
, ptr
, GETPC());
898 * No-fault version of mte_check, to be used by SVE for MemSingleNF.
899 * Returns false if the access is Checked and the check failed. This
900 * is only intended to probe the tag -- the validity of the page must
901 * be checked beforehand.
903 bool mte_probe(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
906 int ret
= mte_probe_int(env
, desc
, ptr
, 0, &fault
);
912 * Perform an MTE checked access for DC_ZVA.
914 uint64_t HELPER(mte_check_zva
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
916 uintptr_t ra
= GETPC();
917 int log2_dcz_bytes
, log2_tag_bytes
;
919 intptr_t dcz_bytes
, tag_bytes
, i
;
921 uint64_t ptr_tag
, mem_tag
, align_ptr
;
923 bit55
= extract64(ptr
, 55, 1);
925 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
926 if (unlikely(!tbi_check(desc
, bit55
))) {
930 ptr_tag
= allocation_tag_from_addr(ptr
);
932 if (tcma_check(desc
, bit55
, ptr_tag
)) {
937 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
938 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
939 * sure that we can access one complete tag byte here.
941 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
942 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
943 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
944 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
945 align_ptr
= ptr
& -dcz_bytes
;
948 * Trap if accessing an invalid page. DC_ZVA requires that we supply
949 * the original pointer for an invalid page. But watchpoints require
950 * that we probe the actual space. So do both.
952 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
953 (void) probe_write(env
, ptr
, 1, mmu_idx
, ra
);
954 mem
= allocation_tag_mem(env
, mmu_idx
, align_ptr
, MMU_DATA_STORE
,
955 dcz_bytes
, MMU_DATA_LOAD
, ra
);
961 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
962 * it is quite easy to perform all of the comparisons at once without
965 * The most common zva block size is 64; some of the thunderx cpus use
966 * a block size of 128. For user-only, aarch64_max_initfn will set the
967 * block size to 512. Fill out the other cases for future-proofing.
969 * In order to be able to find the first miscompare later, we want the
970 * tag bytes to be in little-endian order.
972 switch (log2_tag_bytes
) {
973 case 0: /* zva_blocksize 32 */
974 mem_tag
= *(uint8_t *)mem
;
977 case 1: /* zva_blocksize 64 */
978 mem_tag
= cpu_to_le16(*(uint16_t *)mem
);
981 case 2: /* zva_blocksize 128 */
982 mem_tag
= cpu_to_le32(*(uint32_t *)mem
);
983 ptr_tag
*= 0x11111111u
;
985 case 3: /* zva_blocksize 256 */
986 mem_tag
= cpu_to_le64(*(uint64_t *)mem
);
987 ptr_tag
*= 0x1111111111111111ull
;
990 default: /* zva_blocksize 512, 1024, 2048 */
991 ptr_tag
*= 0x1111111111111111ull
;
994 mem_tag
= cpu_to_le64(*(uint64_t *)(mem
+ i
));
995 if (unlikely(mem_tag
!= ptr_tag
)) {
999 align_ptr
+= 16 * TAG_GRANULE
;
1000 } while (i
< tag_bytes
);
1004 if (likely(mem_tag
== ptr_tag
)) {
1009 /* Locate the first nibble that differs. */
1010 i
= ctz64(mem_tag
^ ptr_tag
) >> 4;
1011 mte_check_fail(env
, desc
, align_ptr
+ i
* TAG_GRANULE
, ra
);
1014 return useronly_clean_ptr(ptr
);
1017 uint64_t mte_mops_probe(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1020 int mmu_idx
, tag_count
;
1021 uint64_t ptr_tag
, tag_first
, tag_last
;
1023 bool w
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
1026 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
1027 /* True probe; this will never fault */
1028 mem
= allocation_tag_mem_probe(env
, mmu_idx
, ptr
,
1029 w
? MMU_DATA_STORE
: MMU_DATA_LOAD
,
1030 size
, MMU_DATA_LOAD
, true, 0);
1036 * TODO: checkN() is not designed for checks of the size we expect
1037 * for FEAT_MOPS operations, so we should implement this differently.
1038 * Maybe we should do something like
1039 * if (region start and size are aligned nicely) {
1040 * do direct loads of 64 tag bits at a time;
1045 /* Round the bounds to the tag granule, and compute the number of tags. */
1046 ptr_tag
= allocation_tag_from_addr(ptr
);
1047 tag_first
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
1048 tag_last
= QEMU_ALIGN_DOWN(ptr
+ size
- 1, TAG_GRANULE
);
1049 tag_count
= ((tag_last
- tag_first
) / TAG_GRANULE
) + 1;
1050 n
= checkN(mem
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
1051 if (likely(n
== tag_count
)) {
1056 * Failure; for the first granule, it's at @ptr. Otherwise
1057 * it's at the first byte of the nth granule. Calculate how
1058 * many bytes we can access without hitting that failure.
1063 return n
* TAG_GRANULE
- (ptr
- tag_first
);
1067 uint64_t mte_mops_probe_rev(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1070 int mmu_idx
, tag_count
;
1071 uint64_t ptr_tag
, tag_first
, tag_last
;
1073 bool w
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
1076 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
1078 * True probe; this will never fault. Note that our caller passes
1079 * us a pointer to the end of the region, but allocation_tag_mem_probe()
1080 * wants a pointer to the start. Because we know we don't span a page
1081 * boundary and that allocation_tag_mem_probe() doesn't otherwise care
1082 * about the size, pass in a size of 1 byte. This is simpler than
1083 * adjusting the ptr to point to the start of the region and then having
1084 * to adjust the returned 'mem' to get the end of the tag memory.
1086 mem
= allocation_tag_mem_probe(env
, mmu_idx
, ptr
,
1087 w
? MMU_DATA_STORE
: MMU_DATA_LOAD
,
1088 1, MMU_DATA_LOAD
, true, 0);
1094 * TODO: checkNrev() is not designed for checks of the size we expect
1095 * for FEAT_MOPS operations, so we should implement this differently.
1096 * Maybe we should do something like
1097 * if (region start and size are aligned nicely) {
1098 * do direct loads of 64 tag bits at a time;
1103 /* Round the bounds to the tag granule, and compute the number of tags. */
1104 ptr_tag
= allocation_tag_from_addr(ptr
);
1105 tag_first
= QEMU_ALIGN_DOWN(ptr
- (size
- 1), TAG_GRANULE
);
1106 tag_last
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
1107 tag_count
= ((tag_last
- tag_first
) / TAG_GRANULE
) + 1;
1108 n
= checkNrev(mem
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
1109 if (likely(n
== tag_count
)) {
1114 * Failure; for the first granule, it's at @ptr. Otherwise
1115 * it's at the last byte of the nth granule. Calculate how
1116 * many bytes we can access without hitting that failure.
1121 return (n
- 1) * TAG_GRANULE
+ ((ptr
+ 1) - tag_last
);
1125 void mte_mops_set_tags(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1128 int mmu_idx
, tag_count
;
1133 /* Tags not actually enabled */
1137 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
1138 /* True probe: this will never fault */
1139 mem
= allocation_tag_mem_probe(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, size
,
1140 MMU_DATA_STORE
, true, 0);
1146 * We know that ptr and size are both TAG_GRANULE aligned; store
1147 * the tag from the pointer value into the tag memory.
1149 ptr_tag
= allocation_tag_from_addr(ptr
);
1150 tag_count
= size
/ TAG_GRANULE
;
1151 if (ptr
& TAG_GRANULE
) {
1152 /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
1153 store_tag1_parallel(TAG_GRANULE
, mem
, ptr_tag
);
1157 memset(mem
, ptr_tag
| (ptr_tag
<< 4), tag_count
/ 2);
1158 if (tag_count
& 1) {
1159 /* Final trailing unaligned nibble */
1160 mem
+= tag_count
/ 2;
1161 store_tag1_parallel(0, mem
, ptr_tag
);