4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/helper-proto.h"
16 #include "qemu/main-loop.h"
17 #include "qemu/timer.h"
18 #include "qemu/bitops.h"
19 #include "qemu/crc32c.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/exec-all.h"
22 #include <zlib.h> /* for crc32 */
24 #include "sysemu/cpu-timers.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/tcg.h"
27 #include "qapi/error.h"
28 #include "qemu/guest-random.h"
30 #include "semihosting/common-semi.h"
33 #include "target/arm/gtimer.h"
35 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
37 static void switch_mode(CPUARMState
*env
, int mode
);
39 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
41 assert(ri
->fieldoffset
);
42 if (cpreg_field_is_64bit(ri
)) {
43 return CPREG_FIELD64(env
, ri
);
45 return CPREG_FIELD32(env
, ri
);
49 void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
51 assert(ri
->fieldoffset
);
52 if (cpreg_field_is_64bit(ri
)) {
53 CPREG_FIELD64(env
, ri
) = value
;
55 CPREG_FIELD32(env
, ri
) = value
;
59 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
61 return (char *)env
+ ri
->fieldoffset
;
64 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
66 /* Raw read of a coprocessor register (as needed for migration, etc). */
67 if (ri
->type
& ARM_CP_CONST
) {
68 return ri
->resetvalue
;
69 } else if (ri
->raw_readfn
) {
70 return ri
->raw_readfn(env
, ri
);
71 } else if (ri
->readfn
) {
72 return ri
->readfn(env
, ri
);
74 return raw_read(env
, ri
);
78 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
82 * Raw write of a coprocessor register (as needed for migration, etc).
83 * Note that constant registers are treated as write-ignored; the
84 * caller should check for success by whether a readback gives the
87 if (ri
->type
& ARM_CP_CONST
) {
89 } else if (ri
->raw_writefn
) {
90 ri
->raw_writefn(env
, ri
, v
);
91 } else if (ri
->writefn
) {
92 ri
->writefn(env
, ri
, v
);
94 raw_write(env
, ri
, v
);
98 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
101 * Return true if the regdef would cause an assertion if you called
102 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
103 * program bug for it not to have the NO_RAW flag).
104 * NB that returning false here doesn't necessarily mean that calling
105 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
106 * read/write access functions which are safe for raw use" from "has
107 * read/write access functions which have side effects but has forgotten
108 * to provide raw access functions".
109 * The tests here line up with the conditions in read/write_raw_cp_reg()
110 * and assertions in raw_read()/raw_write().
112 if ((ri
->type
& ARM_CP_CONST
) ||
114 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
120 bool write_cpustate_to_list(ARMCPU
*cpu
, bool kvm_sync
)
122 /* Write the coprocessor state from cpu->env to the (index,value) list. */
126 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
127 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
128 const ARMCPRegInfo
*ri
;
131 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
136 if (ri
->type
& ARM_CP_NO_RAW
) {
140 newval
= read_raw_cp_reg(&cpu
->env
, ri
);
143 * Only sync if the previous list->cpustate sync succeeded.
144 * Rather than tracking the success/failure state for every
145 * item in the list, we just recheck "does the raw write we must
146 * have made in write_list_to_cpustate() read back OK" here.
148 uint64_t oldval
= cpu
->cpreg_values
[i
];
150 if (oldval
== newval
) {
154 write_raw_cp_reg(&cpu
->env
, ri
, oldval
);
155 if (read_raw_cp_reg(&cpu
->env
, ri
) != oldval
) {
159 write_raw_cp_reg(&cpu
->env
, ri
, newval
);
161 cpu
->cpreg_values
[i
] = newval
;
166 bool write_list_to_cpustate(ARMCPU
*cpu
)
171 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
172 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
173 uint64_t v
= cpu
->cpreg_values
[i
];
174 const ARMCPRegInfo
*ri
;
176 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
181 if (ri
->type
& ARM_CP_NO_RAW
) {
185 * Write value and confirm it reads back as written
186 * (to catch read-only registers and partially read-only
187 * registers where the incoming migration value doesn't match)
189 write_raw_cp_reg(&cpu
->env
, ri
, v
);
190 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
197 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
199 ARMCPU
*cpu
= opaque
;
200 uint32_t regidx
= (uintptr_t)key
;
201 const ARMCPRegInfo
*ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
203 if (!(ri
->type
& (ARM_CP_NO_RAW
| ARM_CP_ALIAS
))) {
204 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
205 /* The value array need not be initialized at this point */
206 cpu
->cpreg_array_len
++;
210 static void count_cpreg(gpointer key
, gpointer opaque
)
212 ARMCPU
*cpu
= opaque
;
213 const ARMCPRegInfo
*ri
;
215 ri
= g_hash_table_lookup(cpu
->cp_regs
, key
);
217 if (!(ri
->type
& (ARM_CP_NO_RAW
| ARM_CP_ALIAS
))) {
218 cpu
->cpreg_array_len
++;
222 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
224 uint64_t aidx
= cpreg_to_kvm_id((uintptr_t)a
);
225 uint64_t bidx
= cpreg_to_kvm_id((uintptr_t)b
);
236 void init_cpreg_list(ARMCPU
*cpu
)
239 * Initialise the cpreg_tuples[] array based on the cp_regs hash.
240 * Note that we require cpreg_tuples[] to be sorted by key ID.
245 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
246 keys
= g_list_sort(keys
, cpreg_key_compare
);
248 cpu
->cpreg_array_len
= 0;
250 g_list_foreach(keys
, count_cpreg
, cpu
);
252 arraylen
= cpu
->cpreg_array_len
;
253 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
254 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
255 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
256 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
257 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
258 cpu
->cpreg_array_len
= 0;
260 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
262 assert(cpu
->cpreg_array_len
== arraylen
);
267 static bool arm_pan_enabled(CPUARMState
*env
)
270 if ((arm_hcr_el2_eff(env
) & (HCR_NV
| HCR_NV1
)) == (HCR_NV
| HCR_NV1
)) {
273 return env
->pstate
& PSTATE_PAN
;
275 return env
->uncached_cpsr
& CPSR_PAN
;
280 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
282 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
283 const ARMCPRegInfo
*ri
,
286 if (!is_a64(env
) && arm_current_el(env
) == 3 &&
287 arm_is_secure_below_el3(env
)) {
288 return CP_ACCESS_TRAP_UNCATEGORIZED
;
294 * Some secure-only AArch32 registers trap to EL3 if used from
295 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
296 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
297 * We assume that the .access field is set to PL1_RW.
299 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
300 const ARMCPRegInfo
*ri
,
303 if (arm_current_el(env
) == 3) {
306 if (arm_is_secure_below_el3(env
)) {
307 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
308 return CP_ACCESS_TRAP_EL2
;
310 return CP_ACCESS_TRAP_EL3
;
312 /* This will be EL1 NS and EL2 NS, which just UNDEF */
313 return CP_ACCESS_TRAP_UNCATEGORIZED
;
317 * Check for traps to performance monitor registers, which are controlled
318 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
320 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
323 int el
= arm_current_el(env
);
324 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
326 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
327 return CP_ACCESS_TRAP_EL2
;
329 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
330 return CP_ACCESS_TRAP_EL3
;
335 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
336 CPAccessResult
access_tvm_trvm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
339 if (arm_current_el(env
) == 1) {
340 uint64_t trap
= isread
? HCR_TRVM
: HCR_TVM
;
341 if (arm_hcr_el2_eff(env
) & trap
) {
342 return CP_ACCESS_TRAP_EL2
;
348 /* Check for traps from EL1 due to HCR_EL2.TSW. */
349 static CPAccessResult
access_tsw(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
352 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TSW
)) {
353 return CP_ACCESS_TRAP_EL2
;
358 /* Check for traps from EL1 due to HCR_EL2.TACR. */
359 static CPAccessResult
access_tacr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
362 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TACR
)) {
363 return CP_ACCESS_TRAP_EL2
;
368 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
369 static CPAccessResult
access_ttlb(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
372 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TTLB
)) {
373 return CP_ACCESS_TRAP_EL2
;
378 /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */
379 static CPAccessResult
access_ttlbis(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
382 if (arm_current_el(env
) == 1 &&
383 (arm_hcr_el2_eff(env
) & (HCR_TTLB
| HCR_TTLBIS
))) {
384 return CP_ACCESS_TRAP_EL2
;
389 #ifdef TARGET_AARCH64
390 /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
391 static CPAccessResult
access_ttlbos(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
394 if (arm_current_el(env
) == 1 &&
395 (arm_hcr_el2_eff(env
) & (HCR_TTLB
| HCR_TTLBOS
))) {
396 return CP_ACCESS_TRAP_EL2
;
402 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
404 ARMCPU
*cpu
= env_archcpu(env
);
406 raw_write(env
, ri
, value
);
407 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
410 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
412 ARMCPU
*cpu
= env_archcpu(env
);
414 if (raw_read(env
, ri
) != value
) {
416 * Unlike real hardware the qemu TLB uses virtual addresses,
417 * not modified virtual addresses, so this causes a TLB flush.
420 raw_write(env
, ri
, value
);
424 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
427 ARMCPU
*cpu
= env_archcpu(env
);
429 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
430 && !extended_addresses_enabled(env
)) {
432 * For VMSA (when not using the LPAE long descriptor page table
433 * format) this register includes the ASID, so do a TLB flush.
434 * For PMSA it is purely a process ID and no action is needed.
438 raw_write(env
, ri
, value
);
441 static int alle1_tlbmask(CPUARMState
*env
)
444 * Note that the 'ALL' scope must invalidate both stage 1 and
445 * stage 2 translations, whereas most other scopes only invalidate
446 * stage 1 translations.
448 return (ARMMMUIdxBit_E10_1
|
449 ARMMMUIdxBit_E10_1_PAN
|
451 ARMMMUIdxBit_Stage2
|
452 ARMMMUIdxBit_Stage2_S
);
456 /* IS variants of TLB operations must affect all cores */
457 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
460 CPUState
*cs
= env_cpu(env
);
462 tlb_flush_all_cpus_synced(cs
);
465 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
468 CPUState
*cs
= env_cpu(env
);
470 tlb_flush_all_cpus_synced(cs
);
473 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
476 CPUState
*cs
= env_cpu(env
);
478 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
481 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
484 CPUState
*cs
= env_cpu(env
);
486 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
490 * Non-IS variants of TLB operations are upgraded to
491 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
492 * force broadcast of these operations.
494 static bool tlb_force_broadcast(CPUARMState
*env
)
496 return arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_FB
);
499 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
502 /* Invalidate all (TLBIALL) */
503 CPUState
*cs
= env_cpu(env
);
505 if (tlb_force_broadcast(env
)) {
506 tlb_flush_all_cpus_synced(cs
);
512 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
515 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
516 CPUState
*cs
= env_cpu(env
);
518 value
&= TARGET_PAGE_MASK
;
519 if (tlb_force_broadcast(env
)) {
520 tlb_flush_page_all_cpus_synced(cs
, value
);
522 tlb_flush_page(cs
, value
);
526 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
529 /* Invalidate by ASID (TLBIASID) */
530 CPUState
*cs
= env_cpu(env
);
532 if (tlb_force_broadcast(env
)) {
533 tlb_flush_all_cpus_synced(cs
);
539 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
542 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
543 CPUState
*cs
= env_cpu(env
);
545 value
&= TARGET_PAGE_MASK
;
546 if (tlb_force_broadcast(env
)) {
547 tlb_flush_page_all_cpus_synced(cs
, value
);
549 tlb_flush_page(cs
, value
);
553 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
556 CPUState
*cs
= env_cpu(env
);
558 tlb_flush_by_mmuidx(cs
, alle1_tlbmask(env
));
561 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
564 CPUState
*cs
= env_cpu(env
);
566 tlb_flush_by_mmuidx_all_cpus_synced(cs
, alle1_tlbmask(env
));
570 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
573 CPUState
*cs
= env_cpu(env
);
575 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E2
);
578 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
581 CPUState
*cs
= env_cpu(env
);
583 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E2
);
586 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
589 CPUState
*cs
= env_cpu(env
);
590 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
592 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E2
);
595 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
598 CPUState
*cs
= env_cpu(env
);
599 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
601 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
605 static void tlbiipas2_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
608 CPUState
*cs
= env_cpu(env
);
609 uint64_t pageaddr
= (value
& MAKE_64BIT_MASK(0, 28)) << 12;
611 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
614 static void tlbiipas2is_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
617 CPUState
*cs
= env_cpu(env
);
618 uint64_t pageaddr
= (value
& MAKE_64BIT_MASK(0, 28)) << 12;
620 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, ARMMMUIdxBit_Stage2
);
623 static const ARMCPRegInfo cp_reginfo
[] = {
625 * Define the secure and non-secure FCSE identifier CP registers
626 * separately because there is no secure bank in V8 (no _EL3). This allows
627 * the secure register to be properly reset and migrated. There is also no
628 * v8 EL1 version of the register so the non-secure instance stands alone.
631 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
632 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
633 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
634 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
635 { .name
= "FCSEIDR_S",
636 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
637 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
638 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
639 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
641 * Define the secure and non-secure context identifier CP registers
642 * separately because there is no secure bank in V8 (no _EL3). This allows
643 * the secure register to be properly reset and migrated. In the
644 * non-secure case, the 32-bit register will have reset and migration
645 * disabled during registration as it is handled by the 64-bit instance.
647 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
648 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
649 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
650 .fgt
= FGT_CONTEXTIDR_EL1
,
651 .nv2_redirect_offset
= 0x108 | NV2_REDIR_NV1
,
652 .secure
= ARM_CP_SECSTATE_NS
,
653 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
654 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
655 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
656 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
657 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
658 .secure
= ARM_CP_SECSTATE_S
,
659 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
660 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
663 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
665 * NB: Some of these registers exist in v8 but with more precise
666 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
668 /* MMU Domain access control / MPU write buffer control */
670 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
671 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
672 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
673 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
674 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
676 * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
677 * For v6 and v5, these mappings are overly broad.
679 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
680 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
681 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
682 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
683 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
684 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
685 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
686 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
687 /* Cache maintenance ops; some of this space may be overridden later. */
688 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
689 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
690 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
693 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
695 * Not all pre-v6 cores implemented this WFI, so this is slightly
698 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
699 .access
= PL1_W
, .type
= ARM_CP_WFI
},
702 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
704 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
705 * is UNPREDICTABLE; we choose to NOP as most implementations do).
707 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
708 .access
= PL1_W
, .type
= ARM_CP_WFI
},
710 * L1 cache lockdown. Not architectural in v6 and earlier but in practice
711 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
712 * OMAPCP will override this space.
714 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
715 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
717 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
718 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
720 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
721 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
722 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
725 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
726 * implementing it as RAZ means the "debug architecture version" bits
727 * will read as a reserved value, which should cause Linux to not try
728 * to use the debug hardware.
730 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
731 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
733 * MMU TLB control. Note that the wildcarding means we cover not just
734 * the unified TLB ops but also the dside/iside/inner-shareable variants.
736 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
737 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
738 .type
= ARM_CP_NO_RAW
},
739 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
740 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
741 .type
= ARM_CP_NO_RAW
},
742 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
743 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
744 .type
= ARM_CP_NO_RAW
},
745 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
746 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
747 .type
= ARM_CP_NO_RAW
},
748 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
749 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
750 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
751 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
754 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
759 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
760 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
762 * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
763 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
764 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
766 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
767 /* VFP coprocessor: cp10 & cp11 [23:20] */
768 mask
|= R_CPACR_ASEDIS_MASK
|
769 R_CPACR_D32DIS_MASK
|
773 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
774 /* ASEDIS [31] bit is RAO/WI */
775 value
|= R_CPACR_ASEDIS_MASK
;
779 * VFPv3 and upwards with NEON implement 32 double precision
780 * registers (D0-D31).
782 if (!cpu_isar_feature(aa32_simd_r32
, env_archcpu(env
))) {
783 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
784 value
|= R_CPACR_D32DIS_MASK
;
791 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
792 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
794 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
795 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
796 mask
= R_CPACR_CP11_MASK
| R_CPACR_CP10_MASK
;
797 value
= (value
& ~mask
) | (env
->cp15
.cpacr_el1
& mask
);
800 env
->cp15
.cpacr_el1
= value
;
803 static uint64_t cpacr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
806 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
807 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
809 uint64_t value
= env
->cp15
.cpacr_el1
;
811 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
812 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
813 value
= ~(R_CPACR_CP11_MASK
| R_CPACR_CP10_MASK
);
819 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
822 * Call cpacr_write() so that we reset with the correct RAO bits set
823 * for our CPU features.
825 cpacr_write(env
, ri
, 0);
828 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
831 if (arm_feature(env
, ARM_FEATURE_V8
)) {
832 /* Check if CPACR accesses are to be trapped to EL2 */
833 if (arm_current_el(env
) == 1 && arm_is_el2_enabled(env
) &&
834 FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TCPAC
)) {
835 return CP_ACCESS_TRAP_EL2
;
836 /* Check if CPACR accesses are to be trapped to EL3 */
837 } else if (arm_current_el(env
) < 3 &&
838 FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, TCPAC
)) {
839 return CP_ACCESS_TRAP_EL3
;
846 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
849 /* Check if CPTR accesses are set to trap to EL3 */
850 if (arm_current_el(env
) == 2 &&
851 FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, TCPAC
)) {
852 return CP_ACCESS_TRAP_EL3
;
858 static const ARMCPRegInfo v6_cp_reginfo
[] = {
859 /* prefetch by MVA in v6, NOP in v7 */
860 { .name
= "MVA_prefetch",
861 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
862 .access
= PL1_W
, .type
= ARM_CP_NOP
},
864 * We need to break the TB after ISB to execute self-modifying code
865 * correctly and also to take any pending interrupts immediately.
866 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
868 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
869 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
870 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
871 .access
= PL0_W
, .type
= ARM_CP_NOP
},
872 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
873 .access
= PL0_W
, .type
= ARM_CP_NOP
},
874 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
875 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
876 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
877 offsetof(CPUARMState
, cp15
.ifar_ns
) },
880 * Watchpoint Fault Address Register : should actually only be present
881 * for 1136, 1176, 11MPCore.
883 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
884 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
885 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
886 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
887 .fgt
= FGT_CPACR_EL1
,
888 .nv2_redirect_offset
= 0x100 | NV2_REDIR_NV1
,
889 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
890 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
, .readfn
= cpacr_read
},
893 typedef struct pm_event
{
894 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
895 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
896 bool (*supported
)(CPUARMState
*);
898 * Retrieve the current count of the underlying event. The programmed
899 * counters hold a difference from the return value from this function
901 uint64_t (*get_count
)(CPUARMState
*);
903 * Return how many nanoseconds it will take (at a minimum) for count events
904 * to occur. A negative value indicates the counter will never overflow, or
905 * that the counter has otherwise arranged for the overflow bit to be set
906 * and the PMU interrupt to be raised on overflow.
908 int64_t (*ns_per_count
)(uint64_t);
911 static bool event_always_supported(CPUARMState
*env
)
916 static uint64_t swinc_get_count(CPUARMState
*env
)
919 * SW_INCR events are written directly to the pmevcntr's by writes to
920 * PMSWINC, so there is no underlying count maintained by the PMU itself
925 static int64_t swinc_ns_per(uint64_t ignored
)
931 * Return the underlying cycle count for the PMU cycle counters. If we're in
932 * usermode, simply return 0.
934 static uint64_t cycles_get_count(CPUARMState
*env
)
936 #ifndef CONFIG_USER_ONLY
937 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
938 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
940 return cpu_get_host_ticks();
944 #ifndef CONFIG_USER_ONLY
945 static int64_t cycles_ns_per(uint64_t cycles
)
947 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
950 static bool instructions_supported(CPUARMState
*env
)
952 /* Precise instruction counting */
953 return icount_enabled() == ICOUNT_PRECISE
;
956 static uint64_t instructions_get_count(CPUARMState
*env
)
958 assert(icount_enabled() == ICOUNT_PRECISE
);
959 return (uint64_t)icount_get_raw();
962 static int64_t instructions_ns_per(uint64_t icount
)
964 assert(icount_enabled() == ICOUNT_PRECISE
);
965 return icount_to_ns((int64_t)icount
);
969 static bool pmuv3p1_events_supported(CPUARMState
*env
)
971 /* For events which are supported in any v8.1 PMU */
972 return cpu_isar_feature(any_pmuv3p1
, env_archcpu(env
));
975 static bool pmuv3p4_events_supported(CPUARMState
*env
)
977 /* For events which are supported in any v8.1 PMU */
978 return cpu_isar_feature(any_pmuv3p4
, env_archcpu(env
));
981 static uint64_t zero_event_get_count(CPUARMState
*env
)
983 /* For events which on QEMU never fire, so their count is always zero */
987 static int64_t zero_event_ns_per(uint64_t cycles
)
989 /* An event which never fires can never overflow */
993 static const pm_event pm_events
[] = {
994 { .number
= 0x000, /* SW_INCR */
995 .supported
= event_always_supported
,
996 .get_count
= swinc_get_count
,
997 .ns_per_count
= swinc_ns_per
,
999 #ifndef CONFIG_USER_ONLY
1000 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1001 .supported
= instructions_supported
,
1002 .get_count
= instructions_get_count
,
1003 .ns_per_count
= instructions_ns_per
,
1005 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1006 .supported
= event_always_supported
,
1007 .get_count
= cycles_get_count
,
1008 .ns_per_count
= cycles_ns_per
,
1011 { .number
= 0x023, /* STALL_FRONTEND */
1012 .supported
= pmuv3p1_events_supported
,
1013 .get_count
= zero_event_get_count
,
1014 .ns_per_count
= zero_event_ns_per
,
1016 { .number
= 0x024, /* STALL_BACKEND */
1017 .supported
= pmuv3p1_events_supported
,
1018 .get_count
= zero_event_get_count
,
1019 .ns_per_count
= zero_event_ns_per
,
1021 { .number
= 0x03c, /* STALL */
1022 .supported
= pmuv3p4_events_supported
,
1023 .get_count
= zero_event_get_count
,
1024 .ns_per_count
= zero_event_ns_per
,
1029 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1030 * events (i.e. the statistical profiling extension), this implementation
1031 * should first be updated to something sparse instead of the current
1032 * supported_event_map[] array.
1034 #define MAX_EVENT_ID 0x3c
1035 #define UNSUPPORTED_EVENT UINT16_MAX
1036 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1039 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1040 * of ARM event numbers to indices in our pm_events array.
1042 * Note: Events in the 0x40XX range are not currently supported.
1044 void pmu_init(ARMCPU
*cpu
)
1049 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1052 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1053 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1058 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1059 const pm_event
*cnt
= &pm_events
[i
];
1060 assert(cnt
->number
<= MAX_EVENT_ID
);
1061 /* We do not currently support events in the 0x40xx range */
1062 assert(cnt
->number
<= 0x3f);
1064 if (cnt
->supported(&cpu
->env
)) {
1065 supported_event_map
[cnt
->number
] = i
;
1066 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1067 if (cnt
->number
& 0x20) {
1068 cpu
->pmceid1
|= event_mask
;
1070 cpu
->pmceid0
|= event_mask
;
1077 * Check at runtime whether a PMU event is supported for the current machine
1079 static bool event_supported(uint16_t number
)
1081 if (number
> MAX_EVENT_ID
) {
1084 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1087 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1091 * Performance monitor registers user accessibility is controlled
1092 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1093 * trapping to EL2 or EL3 for other accesses.
1095 int el
= arm_current_el(env
);
1096 uint64_t mdcr_el2
= arm_mdcr_el2_eff(env
);
1098 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1099 return CP_ACCESS_TRAP
;
1101 if (el
< 2 && (mdcr_el2
& MDCR_TPM
)) {
1102 return CP_ACCESS_TRAP_EL2
;
1104 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1105 return CP_ACCESS_TRAP_EL3
;
1108 return CP_ACCESS_OK
;
1111 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1112 const ARMCPRegInfo
*ri
,
1115 /* ER: event counter read trap control */
1116 if (arm_feature(env
, ARM_FEATURE_V8
)
1117 && arm_current_el(env
) == 0
1118 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1120 return CP_ACCESS_OK
;
1123 return pmreg_access(env
, ri
, isread
);
1126 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1127 const ARMCPRegInfo
*ri
,
1130 /* SW: software increment write trap control */
1131 if (arm_feature(env
, ARM_FEATURE_V8
)
1132 && arm_current_el(env
) == 0
1133 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1135 return CP_ACCESS_OK
;
1138 return pmreg_access(env
, ri
, isread
);
1141 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1142 const ARMCPRegInfo
*ri
,
1145 /* ER: event counter read trap control */
1146 if (arm_feature(env
, ARM_FEATURE_V8
)
1147 && arm_current_el(env
) == 0
1148 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1149 return CP_ACCESS_OK
;
1152 return pmreg_access(env
, ri
, isread
);
1155 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1156 const ARMCPRegInfo
*ri
,
1159 /* CR: cycle counter read trap control */
1160 if (arm_feature(env
, ARM_FEATURE_V8
)
1161 && arm_current_el(env
) == 0
1162 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1164 return CP_ACCESS_OK
;
1167 return pmreg_access(env
, ri
, isread
);
1171 * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
1172 * We use these to decide whether we need to wrap a write to MDCR_EL2
1173 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
1175 #define MDCR_EL2_PMU_ENABLE_BITS \
1176 (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
1177 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
1180 * Returns true if the counter (pass 31 for PMCCNTR) should count events using
1181 * the current EL, security state, and register configuration.
1183 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1186 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1187 bool enabled
, prohibited
= false, filtered
;
1188 bool secure
= arm_is_secure(env
);
1189 int el
= arm_current_el(env
);
1194 * We might be called for M-profile cores where MDCR_EL2 doesn't
1195 * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
1196 * must be before we read that value.
1198 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1202 mdcr_el2
= arm_mdcr_el2_eff(env
);
1203 hpmn
= mdcr_el2
& MDCR_HPMN
;
1205 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1206 (counter
< hpmn
|| counter
== 31)) {
1207 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1209 e
= mdcr_el2
& MDCR_HPME
;
1211 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1213 /* Is event counting prohibited? */
1214 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1215 prohibited
= mdcr_el2
& MDCR_HPMD
;
1218 prohibited
= prohibited
|| !(env
->cp15
.mdcr_el3
& MDCR_SPME
);
1221 if (counter
== 31) {
1223 * The cycle counter defaults to running. PMCR.DP says "disable
1224 * the cycle counter when event counting is prohibited".
1225 * Some MDCR bits disable the cycle counter specifically.
1227 prohibited
= prohibited
&& env
->cp15
.c9_pmcr
& PMCRDP
;
1228 if (cpu_isar_feature(any_pmuv3p5
, env_archcpu(env
))) {
1230 prohibited
= prohibited
|| (env
->cp15
.mdcr_el3
& MDCR_SCCD
);
1233 prohibited
= prohibited
|| (mdcr_el2
& MDCR_HCCD
);
1238 if (counter
== 31) {
1239 filter
= env
->cp15
.pmccfiltr_el0
;
1241 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1244 p
= filter
& PMXEVTYPER_P
;
1245 u
= filter
& PMXEVTYPER_U
;
1246 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1247 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1248 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1249 m
= arm_el_is_aa64(env
, 1) &&
1250 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1253 filtered
= secure
? u
: u
!= nsu
;
1254 } else if (el
== 1) {
1255 filtered
= secure
? p
: p
!= nsk
;
1256 } else if (el
== 2) {
1262 if (counter
!= 31) {
1264 * If not checking PMCCNTR, ensure the counter is setup to an event we
1267 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1268 if (!event_supported(event
)) {
1273 return enabled
&& !prohibited
&& !filtered
;
1276 static void pmu_update_irq(CPUARMState
*env
)
1278 ARMCPU
*cpu
= env_archcpu(env
);
1279 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1280 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1283 static bool pmccntr_clockdiv_enabled(CPUARMState
*env
)
1286 * Return true if the clock divider is enabled and the cycle counter
1287 * is supposed to tick only once every 64 clock cycles. This is
1288 * controlled by PMCR.D, but if PMCR.LC is set to enable the long
1289 * (64-bit) cycle counter PMCR.D has no effect.
1291 return (env
->cp15
.c9_pmcr
& (PMCRD
| PMCRLC
)) == PMCRD
;
1294 static bool pmevcntr_is_64_bit(CPUARMState
*env
, int counter
)
1296 /* Return true if the specified event counter is configured to be 64 bit */
1298 /* This isn't intended to be used with the cycle counter */
1299 assert(counter
< 31);
1301 if (!cpu_isar_feature(any_pmuv3p5
, env_archcpu(env
))) {
1305 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
1307 * MDCR_EL2.HLP still applies even when EL2 is disabled in the
1308 * current security state, so we don't use arm_mdcr_el2_eff() here.
1310 bool hlp
= env
->cp15
.mdcr_el2
& MDCR_HLP
;
1311 int hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1313 if (counter
>= hpmn
) {
1317 return env
->cp15
.c9_pmcr
& PMCRLP
;
1321 * Ensure c15_ccnt is the guest-visible count so that operations such as
1322 * enabling/disabling the counter or filtering, modifying the count itself,
1323 * etc. can be done logically. This is essentially a no-op if the counter is
1324 * not enabled at the time of the call.
1326 static void pmccntr_op_start(CPUARMState
*env
)
1328 uint64_t cycles
= cycles_get_count(env
);
1330 if (pmu_counter_enabled(env
, 31)) {
1331 uint64_t eff_cycles
= cycles
;
1332 if (pmccntr_clockdiv_enabled(env
)) {
1336 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1338 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1339 1ull << 63 : 1ull << 31;
1340 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1341 env
->cp15
.c9_pmovsr
|= (1ULL << 31);
1342 pmu_update_irq(env
);
1345 env
->cp15
.c15_ccnt
= new_pmccntr
;
1347 env
->cp15
.c15_ccnt_delta
= cycles
;
1351 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1352 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1355 static void pmccntr_op_finish(CPUARMState
*env
)
1357 if (pmu_counter_enabled(env
, 31)) {
1358 #ifndef CONFIG_USER_ONLY
1359 /* Calculate when the counter will next overflow */
1360 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1361 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1362 remaining_cycles
= (uint32_t)remaining_cycles
;
1364 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1366 if (overflow_in
> 0) {
1367 int64_t overflow_at
;
1369 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1370 overflow_in
, &overflow_at
)) {
1371 ARMCPU
*cpu
= env_archcpu(env
);
1372 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1377 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1378 if (pmccntr_clockdiv_enabled(env
)) {
1381 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1385 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1388 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1390 if (event_supported(event
)) {
1391 uint16_t event_idx
= supported_event_map
[event
];
1392 count
= pm_events
[event_idx
].get_count(env
);
1395 if (pmu_counter_enabled(env
, counter
)) {
1396 uint64_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1397 uint64_t overflow_mask
= pmevcntr_is_64_bit(env
, counter
) ?
1398 1ULL << 63 : 1ULL << 31;
1400 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& overflow_mask
) {
1401 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1402 pmu_update_irq(env
);
1404 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1406 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1409 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1411 if (pmu_counter_enabled(env
, counter
)) {
1412 #ifndef CONFIG_USER_ONLY
1413 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1414 uint16_t event_idx
= supported_event_map
[event
];
1415 uint64_t delta
= -(env
->cp15
.c14_pmevcntr
[counter
] + 1);
1416 int64_t overflow_in
;
1418 if (!pmevcntr_is_64_bit(env
, counter
)) {
1419 delta
= (uint32_t)delta
;
1421 overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1423 if (overflow_in
> 0) {
1424 int64_t overflow_at
;
1426 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1427 overflow_in
, &overflow_at
)) {
1428 ARMCPU
*cpu
= env_archcpu(env
);
1429 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1434 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1435 env
->cp15
.c14_pmevcntr
[counter
];
1439 void pmu_op_start(CPUARMState
*env
)
1442 pmccntr_op_start(env
);
1443 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1444 pmevcntr_op_start(env
, i
);
1448 void pmu_op_finish(CPUARMState
*env
)
1451 pmccntr_op_finish(env
);
1452 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1453 pmevcntr_op_finish(env
, i
);
1457 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1459 pmu_op_start(&cpu
->env
);
1462 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1464 pmu_op_finish(&cpu
->env
);
1467 void arm_pmu_timer_cb(void *opaque
)
1469 ARMCPU
*cpu
= opaque
;
1472 * Update all the counter values based on the current underlying counts,
1473 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1474 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1475 * counter may expire.
1477 pmu_op_start(&cpu
->env
);
1478 pmu_op_finish(&cpu
->env
);
1481 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1486 if (value
& PMCRC
) {
1487 /* The counter has been reset */
1488 env
->cp15
.c15_ccnt
= 0;
1491 if (value
& PMCRP
) {
1493 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1494 env
->cp15
.c14_pmevcntr
[i
] = 0;
1498 env
->cp15
.c9_pmcr
&= ~PMCR_WRITABLE_MASK
;
1499 env
->cp15
.c9_pmcr
|= (value
& PMCR_WRITABLE_MASK
);
1504 static uint64_t pmcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1506 uint64_t pmcr
= env
->cp15
.c9_pmcr
;
1509 * If EL2 is implemented and enabled for the current security state, reads
1510 * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
1512 if (arm_current_el(env
) <= 1 && arm_is_el2_enabled(env
)) {
1513 pmcr
&= ~PMCRN_MASK
;
1514 pmcr
|= (env
->cp15
.mdcr_el2
& MDCR_HPMN
) << PMCRN_SHIFT
;
1520 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1524 uint64_t overflow_mask
, new_pmswinc
;
1526 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1527 /* Increment a counter's count iff: */
1528 if ((value
& (1 << i
)) && /* counter's bit is set */
1529 /* counter is enabled and not filtered */
1530 pmu_counter_enabled(env
, i
) &&
1531 /* counter is SW_INCR */
1532 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1533 pmevcntr_op_start(env
, i
);
1536 * Detect if this write causes an overflow since we can't predict
1537 * PMSWINC overflows like we can for other events
1539 new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1541 overflow_mask
= pmevcntr_is_64_bit(env
, i
) ?
1542 1ULL << 63 : 1ULL << 31;
1544 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& overflow_mask
) {
1545 env
->cp15
.c9_pmovsr
|= (1 << i
);
1546 pmu_update_irq(env
);
1549 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1551 pmevcntr_op_finish(env
, i
);
1556 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1559 pmccntr_op_start(env
);
1560 ret
= env
->cp15
.c15_ccnt
;
1561 pmccntr_op_finish(env
);
1565 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1569 * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1570 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1571 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1574 env
->cp15
.c9_pmselr
= value
& 0x1f;
1577 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1580 pmccntr_op_start(env
);
1581 env
->cp15
.c15_ccnt
= value
;
1582 pmccntr_op_finish(env
);
1585 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1588 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1590 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1593 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1596 pmccntr_op_start(env
);
1597 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1598 pmccntr_op_finish(env
);
1601 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1604 pmccntr_op_start(env
);
1605 /* M is not accessible from AArch32 */
1606 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1607 (value
& PMCCFILTR
);
1608 pmccntr_op_finish(env
);
1611 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1613 /* M is not visible in AArch32 */
1614 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1617 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1621 value
&= pmu_counter_mask(env
);
1622 env
->cp15
.c9_pmcnten
|= value
;
1626 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1630 value
&= pmu_counter_mask(env
);
1631 env
->cp15
.c9_pmcnten
&= ~value
;
1635 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1638 value
&= pmu_counter_mask(env
);
1639 env
->cp15
.c9_pmovsr
&= ~value
;
1640 pmu_update_irq(env
);
1643 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1646 value
&= pmu_counter_mask(env
);
1647 env
->cp15
.c9_pmovsr
|= value
;
1648 pmu_update_irq(env
);
1651 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1652 uint64_t value
, const uint8_t counter
)
1654 if (counter
== 31) {
1655 pmccfiltr_write(env
, ri
, value
);
1656 } else if (counter
< pmu_num_counters(env
)) {
1657 pmevcntr_op_start(env
, counter
);
1660 * If this counter's event type is changing, store the current
1661 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1662 * pmevcntr_op_finish has the correct baseline when it converts back to
1665 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1666 PMXEVTYPER_EVTCOUNT
;
1667 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1668 if (old_event
!= new_event
) {
1670 if (event_supported(new_event
)) {
1671 uint16_t event_idx
= supported_event_map
[new_event
];
1672 count
= pm_events
[event_idx
].get_count(env
);
1674 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1677 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1678 pmevcntr_op_finish(env
, counter
);
1681 * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1682 * PMSELR value is equal to or greater than the number of implemented
1683 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1687 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1688 const uint8_t counter
)
1690 if (counter
== 31) {
1691 return env
->cp15
.pmccfiltr_el0
;
1692 } else if (counter
< pmu_num_counters(env
)) {
1693 return env
->cp15
.c14_pmevtyper
[counter
];
1696 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1697 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1703 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1706 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1707 pmevtyper_write(env
, ri
, value
, counter
);
1710 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1713 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1714 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1717 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1718 * pmu_op_finish calls when loading saved state for a migration. Because
1719 * we're potentially updating the type of event here, the value written to
1720 * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
1721 * different counter type. Therefore, we need to set this value to the
1722 * current count for the counter type we're writing so that pmu_op_finish
1723 * has the correct count for its calculation.
1725 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1726 if (event_supported(event
)) {
1727 uint16_t event_idx
= supported_event_map
[event
];
1728 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1729 pm_events
[event_idx
].get_count(env
);
1733 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1735 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1736 return pmevtyper_read(env
, ri
, counter
);
1739 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1742 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1745 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1747 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1750 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1751 uint64_t value
, uint8_t counter
)
1753 if (!cpu_isar_feature(any_pmuv3p5
, env_archcpu(env
))) {
1754 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1755 value
&= MAKE_64BIT_MASK(0, 32);
1757 if (counter
< pmu_num_counters(env
)) {
1758 pmevcntr_op_start(env
, counter
);
1759 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1760 pmevcntr_op_finish(env
, counter
);
1763 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1764 * are CONSTRAINED UNPREDICTABLE.
1768 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1771 if (counter
< pmu_num_counters(env
)) {
1773 pmevcntr_op_start(env
, counter
);
1774 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1775 pmevcntr_op_finish(env
, counter
);
1776 if (!cpu_isar_feature(any_pmuv3p5
, env_archcpu(env
))) {
1777 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1778 ret
&= MAKE_64BIT_MASK(0, 32);
1783 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1784 * are CONSTRAINED UNPREDICTABLE.
1790 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1793 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1794 pmevcntr_write(env
, ri
, value
, counter
);
1797 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1799 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1800 return pmevcntr_read(env
, ri
, counter
);
1803 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1806 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1807 assert(counter
< pmu_num_counters(env
));
1808 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1809 pmevcntr_write(env
, ri
, value
, counter
);
1812 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1814 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1815 assert(counter
< pmu_num_counters(env
));
1816 return env
->cp15
.c14_pmevcntr
[counter
];
1819 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1822 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1825 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1827 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1830 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1833 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1834 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1836 env
->cp15
.c9_pmuserenr
= value
& 1;
1840 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1843 /* We have no event counters so only the C bit can be changed */
1844 value
&= pmu_counter_mask(env
);
1845 env
->cp15
.c9_pminten
|= value
;
1846 pmu_update_irq(env
);
1849 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1852 value
&= pmu_counter_mask(env
);
1853 env
->cp15
.c9_pminten
&= ~value
;
1854 pmu_update_irq(env
);
1857 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1861 * Note that even though the AArch64 view of this register has bits
1862 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1863 * architectural requirements for bits which are RES0 only in some
1864 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1865 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1867 raw_write(env
, ri
, value
& ~0x1FULL
);
1870 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1872 /* Begin with base v8.0 state. */
1873 uint64_t valid_mask
= 0x3fff;
1874 ARMCPU
*cpu
= env_archcpu(env
);
1878 * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1879 * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1880 * Instead, choose the format based on the mode of EL3.
1882 if (arm_el_is_aa64(env
, 3)) {
1883 value
|= SCR_FW
| SCR_AW
; /* RES1 */
1884 valid_mask
&= ~SCR_NET
; /* RES0 */
1886 if (!cpu_isar_feature(aa64_aa32_el1
, cpu
) &&
1887 !cpu_isar_feature(aa64_aa32_el2
, cpu
)) {
1888 value
|= SCR_RW
; /* RAO/WI */
1890 if (cpu_isar_feature(aa64_ras
, cpu
)) {
1891 valid_mask
|= SCR_TERR
;
1893 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1894 valid_mask
|= SCR_TLOR
;
1896 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1897 valid_mask
|= SCR_API
| SCR_APK
;
1899 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
1900 valid_mask
|= SCR_EEL2
;
1901 } else if (cpu_isar_feature(aa64_rme
, cpu
)) {
1902 /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
1905 if (cpu_isar_feature(aa64_mte
, cpu
)) {
1906 valid_mask
|= SCR_ATA
;
1908 if (cpu_isar_feature(aa64_scxtnum
, cpu
)) {
1909 valid_mask
|= SCR_ENSCXT
;
1911 if (cpu_isar_feature(aa64_doublefault
, cpu
)) {
1912 valid_mask
|= SCR_EASE
| SCR_NMEA
;
1914 if (cpu_isar_feature(aa64_sme
, cpu
)) {
1915 valid_mask
|= SCR_ENTP2
;
1917 if (cpu_isar_feature(aa64_hcx
, cpu
)) {
1918 valid_mask
|= SCR_HXEN
;
1920 if (cpu_isar_feature(aa64_fgt
, cpu
)) {
1921 valid_mask
|= SCR_FGTEN
;
1923 if (cpu_isar_feature(aa64_rme
, cpu
)) {
1924 valid_mask
|= SCR_NSE
| SCR_GPF
;
1926 if (cpu_isar_feature(aa64_ecv
, cpu
)) {
1927 valid_mask
|= SCR_ECVEN
;
1930 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1931 if (cpu_isar_feature(aa32_ras
, cpu
)) {
1932 valid_mask
|= SCR_TERR
;
1936 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1937 valid_mask
&= ~SCR_HCE
;
1940 * On ARMv7, SMD (or SCD as it is called in v7) is only
1941 * supported if EL2 exists. The bit is UNK/SBZP when
1942 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1943 * when EL2 is unavailable.
1944 * On ARMv8, this bit is always available.
1946 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1947 !arm_feature(env
, ARM_FEATURE_V8
)) {
1948 valid_mask
&= ~SCR_SMD
;
1952 /* Clear all-context RES0 bits. */
1953 value
&= valid_mask
;
1954 changed
= env
->cp15
.scr_el3
^ value
;
1955 env
->cp15
.scr_el3
= value
;
1958 * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
1959 * we must invalidate all TLBs below EL3.
1961 if (changed
& (SCR_NS
| SCR_NSE
)) {
1962 tlb_flush_by_mmuidx(env_cpu(env
), (ARMMMUIdxBit_E10_0
|
1963 ARMMMUIdxBit_E20_0
|
1964 ARMMMUIdxBit_E10_1
|
1965 ARMMMUIdxBit_E20_2
|
1966 ARMMMUIdxBit_E10_1_PAN
|
1967 ARMMMUIdxBit_E20_2_PAN
|
1972 static void scr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1975 * scr_write will set the RES1 bits on an AArch64-only CPU.
1976 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1978 scr_write(env
, ri
, 0);
1981 static CPAccessResult
access_tid4(CPUARMState
*env
,
1982 const ARMCPRegInfo
*ri
,
1985 if (arm_current_el(env
) == 1 &&
1986 (arm_hcr_el2_eff(env
) & (HCR_TID2
| HCR_TID4
))) {
1987 return CP_ACCESS_TRAP_EL2
;
1990 return CP_ACCESS_OK
;
1993 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1995 ARMCPU
*cpu
= env_archcpu(env
);
1998 * Acquire the CSSELR index from the bank corresponding to the CCSIDR
2001 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
2002 ri
->secure
& ARM_CP_SECSTATE_S
);
2004 return cpu
->ccsidr
[index
];
2007 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2010 raw_write(env
, ri
, value
& 0xf);
2013 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2015 CPUState
*cs
= env_cpu(env
);
2016 bool el1
= arm_current_el(env
) == 1;
2017 uint64_t hcr_el2
= el1
? arm_hcr_el2_eff(env
) : 0;
2020 if (hcr_el2
& HCR_IMO
) {
2021 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
2024 if (cs
->interrupt_request
& CPU_INTERRUPT_VINMI
) {
2029 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
2033 if (cs
->interrupt_request
& CPU_INTERRUPT_NMI
) {
2039 if (hcr_el2
& HCR_FMO
) {
2040 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
2043 if (cs
->interrupt_request
& CPU_INTERRUPT_VFNMI
) {
2048 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
2053 if (hcr_el2
& HCR_AMO
) {
2054 if (cs
->interrupt_request
& CPU_INTERRUPT_VSERR
) {
2062 static CPAccessResult
access_aa64_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2065 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID1
)) {
2066 return CP_ACCESS_TRAP_EL2
;
2069 return CP_ACCESS_OK
;
2072 static CPAccessResult
access_aa32_tid1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2075 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2076 return access_aa64_tid1(env
, ri
, isread
);
2079 return CP_ACCESS_OK
;
2082 static const ARMCPRegInfo v7_cp_reginfo
[] = {
2083 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2084 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
2085 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2087 * Performance monitors are implementation defined in v7,
2088 * but with an ARM recommended set of registers, which we
2091 * Performance registers fall into three categories:
2092 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2093 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2094 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2095 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2096 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2098 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
2099 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2100 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2101 .writefn
= pmcntenset_write
,
2102 .accessfn
= pmreg_access
,
2104 .raw_writefn
= raw_write
},
2105 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
, .type
= ARM_CP_IO
,
2106 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
2107 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2109 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
2110 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
2111 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
2113 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
2114 .accessfn
= pmreg_access
,
2116 .writefn
= pmcntenclr_write
,
2117 .type
= ARM_CP_ALIAS
| ARM_CP_IO
},
2118 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2119 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
2120 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2122 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2123 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
2124 .writefn
= pmcntenclr_write
},
2125 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
2126 .access
= PL0_RW
, .type
= ARM_CP_IO
,
2127 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2128 .accessfn
= pmreg_access
,
2130 .writefn
= pmovsr_write
,
2131 .raw_writefn
= raw_write
},
2132 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
2133 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
2134 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2136 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2137 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2138 .writefn
= pmovsr_write
,
2139 .raw_writefn
= raw_write
},
2140 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
2141 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2142 .fgt
= FGT_PMSWINC_EL0
,
2143 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2144 .writefn
= pmswinc_write
},
2145 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
2146 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
2147 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
2148 .fgt
= FGT_PMSWINC_EL0
,
2149 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2150 .writefn
= pmswinc_write
},
2151 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
2152 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
2153 .fgt
= FGT_PMSELR_EL0
,
2154 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
2155 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
2156 .raw_writefn
= raw_write
},
2157 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
2158 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
2159 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
2160 .fgt
= FGT_PMSELR_EL0
,
2161 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
2162 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
2163 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
2164 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2165 .fgt
= FGT_PMCCNTR_EL0
,
2166 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
2167 .accessfn
= pmreg_access_ccntr
},
2168 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2169 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2170 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2171 .fgt
= FGT_PMCCNTR_EL0
,
2173 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2174 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2175 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2176 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2177 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2178 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2179 .fgt
= FGT_PMCCFILTR_EL0
,
2180 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2182 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2183 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2184 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2185 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2186 .fgt
= FGT_PMCCFILTR_EL0
,
2188 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2190 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2191 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2192 .accessfn
= pmreg_access
,
2193 .fgt
= FGT_PMEVTYPERN_EL0
,
2194 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2195 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2196 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2197 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2198 .accessfn
= pmreg_access
,
2199 .fgt
= FGT_PMEVTYPERN_EL0
,
2200 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2201 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2202 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2203 .accessfn
= pmreg_access_xevcntr
,
2204 .fgt
= FGT_PMEVCNTRN_EL0
,
2205 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2206 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2207 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2208 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2209 .accessfn
= pmreg_access_xevcntr
,
2210 .fgt
= FGT_PMEVCNTRN_EL0
,
2211 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2212 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2213 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2214 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2216 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2217 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2218 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2219 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2220 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2222 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2223 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2224 .access
= PL1_RW
, .accessfn
= access_tpm
,
2226 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2227 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2229 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2230 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2231 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2232 .access
= PL1_RW
, .accessfn
= access_tpm
,
2235 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2236 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2237 .resetvalue
= 0x0 },
2238 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2239 .access
= PL1_RW
, .accessfn
= access_tpm
,
2241 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2242 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2243 .writefn
= pmintenclr_write
, },
2244 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2245 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2246 .access
= PL1_RW
, .accessfn
= access_tpm
,
2248 .type
= ARM_CP_ALIAS
| ARM_CP_IO
| ARM_CP_NO_RAW
,
2249 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2250 .writefn
= pmintenclr_write
},
2251 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2252 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2254 .accessfn
= access_tid4
,
2255 .fgt
= FGT_CCSIDR_EL1
,
2256 .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2257 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2258 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2260 .accessfn
= access_tid4
,
2261 .fgt
= FGT_CSSELR_EL1
,
2262 .writefn
= csselr_write
, .resetvalue
= 0,
2263 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2264 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2266 * Auxiliary ID register: this actually has an IMPDEF value but for now
2267 * just RAZ for all cores:
2269 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2270 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2271 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2272 .accessfn
= access_aa64_tid1
,
2273 .fgt
= FGT_AIDR_EL1
,
2276 * Auxiliary fault status registers: these also are IMPDEF, and we
2277 * choose to RAZ/WI for all cores.
2279 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2280 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2281 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2282 .fgt
= FGT_AFSR0_EL1
,
2283 .nv2_redirect_offset
= 0x128 | NV2_REDIR_NV1
,
2284 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2285 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2286 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2287 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2288 .fgt
= FGT_AFSR1_EL1
,
2289 .nv2_redirect_offset
= 0x130 | NV2_REDIR_NV1
,
2290 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2292 * MAIR can just read-as-written because we don't implement caches
2293 * and so don't need to care about memory attributes.
2295 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2296 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2297 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2298 .fgt
= FGT_MAIR_EL1
,
2299 .nv2_redirect_offset
= 0x140 | NV2_REDIR_NV1
,
2300 .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2302 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2303 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2304 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2307 * For non-long-descriptor page tables these are PRRR and NMRR;
2308 * regardless they still act as reads-as-written for QEMU.
2311 * MAIR0/1 are defined separately from their 64-bit counterpart which
2312 * allows them to assign the correct fieldoffset based on the endianness
2313 * handled in the field definitions.
2315 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2316 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2317 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2318 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2319 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2320 .resetfn
= arm_cp_reset_ignore
},
2321 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2322 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1,
2323 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
2324 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2325 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2326 .resetfn
= arm_cp_reset_ignore
},
2327 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2328 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2330 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2331 /* 32 bit ITLB invalidates */
2332 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2333 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2334 .writefn
= tlbiall_write
},
2335 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2336 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2337 .writefn
= tlbimva_write
},
2338 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2339 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2340 .writefn
= tlbiasid_write
},
2341 /* 32 bit DTLB invalidates */
2342 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2343 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2344 .writefn
= tlbiall_write
},
2345 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2346 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2347 .writefn
= tlbimva_write
},
2348 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2349 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2350 .writefn
= tlbiasid_write
},
2351 /* 32 bit TLB invalidates */
2352 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2353 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2354 .writefn
= tlbiall_write
},
2355 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2356 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2357 .writefn
= tlbimva_write
},
2358 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2359 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2360 .writefn
= tlbiasid_write
},
2361 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2362 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
2363 .writefn
= tlbimvaa_write
},
2366 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2367 /* 32 bit TLB invalidates, Inner Shareable */
2368 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2369 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlbis
,
2370 .writefn
= tlbiall_is_write
},
2371 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2372 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlbis
,
2373 .writefn
= tlbimva_is_write
},
2374 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2375 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlbis
,
2376 .writefn
= tlbiasid_is_write
},
2377 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2378 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlbis
,
2379 .writefn
= tlbimvaa_is_write
},
2382 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2383 /* PMOVSSET is not implemented in v7 before v7ve */
2384 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2385 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2387 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2388 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2389 .writefn
= pmovsset_write
,
2390 .raw_writefn
= raw_write
},
2391 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2392 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2393 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2395 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2396 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2397 .writefn
= pmovsset_write
,
2398 .raw_writefn
= raw_write
},
2401 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2408 static CPAccessResult
teecr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2412 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2413 * at all, so we don't need to check whether we're v8A.
2415 if (arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
2416 (env
->cp15
.hstr_el2
& HSTR_TTEE
)) {
2417 return CP_ACCESS_TRAP_EL2
;
2419 return CP_ACCESS_OK
;
2422 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2425 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2426 return CP_ACCESS_TRAP
;
2428 return teecr_access(env
, ri
, isread
);
2431 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2432 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2433 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2435 .writefn
= teecr_write
, .accessfn
= teecr_access
},
2436 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2437 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2438 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2441 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2442 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2443 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2445 .fgt
= FGT_TPIDR_EL0
,
2446 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2447 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2449 .fgt
= FGT_TPIDR_EL0
,
2450 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2451 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2452 .resetfn
= arm_cp_reset_ignore
},
2453 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2454 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2455 .access
= PL0_R
| PL1_W
,
2456 .fgt
= FGT_TPIDRRO_EL0
,
2457 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2459 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2460 .access
= PL0_R
| PL1_W
,
2461 .fgt
= FGT_TPIDRRO_EL0
,
2462 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2463 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2464 .resetfn
= arm_cp_reset_ignore
},
2465 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2466 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2468 .fgt
= FGT_TPIDR_EL1
,
2469 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2470 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2472 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2473 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2477 static void arm_gt_cntfrq_reset(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2479 ARMCPU
*cpu
= env_archcpu(env
);
2481 cpu
->env
.cp15
.c14_cntfrq
= cpu
->gt_cntfrq_hz
;
2484 #ifndef CONFIG_USER_ONLY
2486 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2490 * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2491 * Writable only at the highest implemented exception level.
2493 int el
= arm_current_el(env
);
2499 hcr
= arm_hcr_el2_eff(env
);
2500 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2501 cntkctl
= env
->cp15
.cnthctl_el2
;
2503 cntkctl
= env
->cp15
.c14_cntkctl
;
2505 if (!extract32(cntkctl
, 0, 2)) {
2506 return CP_ACCESS_TRAP
;
2510 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2511 arm_is_secure_below_el3(env
)) {
2512 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2513 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2521 if (!isread
&& el
< arm_highest_el(env
)) {
2522 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2525 return CP_ACCESS_OK
;
2528 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2531 unsigned int cur_el
= arm_current_el(env
);
2532 bool has_el2
= arm_is_el2_enabled(env
);
2533 uint64_t hcr
= arm_hcr_el2_eff(env
);
2537 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2538 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2539 return (extract32(env
->cp15
.cnthctl_el2
, timeridx
, 1)
2540 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2543 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2544 if (!extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2545 return CP_ACCESS_TRAP
;
2549 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2550 if (has_el2
&& timeridx
== GTIMER_PHYS
&&
2552 ? !extract32(env
->cp15
.cnthctl_el2
, 10, 1)
2553 : !extract32(env
->cp15
.cnthctl_el2
, 0, 1))) {
2554 return CP_ACCESS_TRAP_EL2
;
2556 if (has_el2
&& timeridx
== GTIMER_VIRT
) {
2557 if (FIELD_EX64(env
->cp15
.cnthctl_el2
, CNTHCTL
, EL1TVCT
)) {
2558 return CP_ACCESS_TRAP_EL2
;
2563 return CP_ACCESS_OK
;
2566 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2569 unsigned int cur_el
= arm_current_el(env
);
2570 bool has_el2
= arm_is_el2_enabled(env
);
2571 uint64_t hcr
= arm_hcr_el2_eff(env
);
2575 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2576 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2577 return (extract32(env
->cp15
.cnthctl_el2
, 9 - timeridx
, 1)
2578 ? CP_ACCESS_OK
: CP_ACCESS_TRAP_EL2
);
2582 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2583 * EL0 if EL0[PV]TEN is zero.
2585 if (!extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2586 return CP_ACCESS_TRAP
;
2591 if (has_el2
&& timeridx
== GTIMER_PHYS
) {
2592 if (hcr
& HCR_E2H
) {
2593 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2594 if (!extract32(env
->cp15
.cnthctl_el2
, 11, 1)) {
2595 return CP_ACCESS_TRAP_EL2
;
2598 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2599 if (!extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2600 return CP_ACCESS_TRAP_EL2
;
2604 if (has_el2
&& timeridx
== GTIMER_VIRT
) {
2605 if (FIELD_EX64(env
->cp15
.cnthctl_el2
, CNTHCTL
, EL1TVT
)) {
2606 return CP_ACCESS_TRAP_EL2
;
2611 return CP_ACCESS_OK
;
2614 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2615 const ARMCPRegInfo
*ri
,
2618 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2621 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2622 const ARMCPRegInfo
*ri
,
2625 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2628 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2631 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2634 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2637 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2640 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2641 const ARMCPRegInfo
*ri
,
2645 * The AArch64 register view of the secure physical timer is
2646 * always accessible from EL3, and configurably accessible from
2649 switch (arm_current_el(env
)) {
2651 if (!arm_is_secure(env
)) {
2652 return CP_ACCESS_TRAP
;
2654 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2655 return CP_ACCESS_TRAP_EL3
;
2657 return CP_ACCESS_OK
;
2660 return CP_ACCESS_TRAP
;
2662 return CP_ACCESS_OK
;
2664 g_assert_not_reached();
2668 uint64_t gt_get_countervalue(CPUARMState
*env
)
2670 ARMCPU
*cpu
= env_archcpu(env
);
2672 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / gt_cntfrq_period_ns(cpu
);
2675 static void gt_update_irq(ARMCPU
*cpu
, int timeridx
)
2677 CPUARMState
*env
= &cpu
->env
;
2678 uint64_t cnthctl
= env
->cp15
.cnthctl_el2
;
2679 ARMSecuritySpace ss
= arm_security_space(env
);
2680 /* ISTATUS && !IMASK */
2681 int irqstate
= (env
->cp15
.c14_timer
[timeridx
].ctl
& 6) == 4;
2684 * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
2685 * It is RES0 in Secure and NonSecure state.
2687 if ((ss
== ARMSS_Root
|| ss
== ARMSS_Realm
) &&
2688 ((timeridx
== GTIMER_VIRT
&& (cnthctl
& R_CNTHCTL_CNTVMASK_MASK
)) ||
2689 (timeridx
== GTIMER_PHYS
&& (cnthctl
& R_CNTHCTL_CNTPMASK_MASK
)))) {
2693 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2694 trace_arm_gt_update_irq(timeridx
, irqstate
);
2697 void gt_rme_post_el_change(ARMCPU
*cpu
, void *ignored
)
2700 * Changing security state between Root and Secure/NonSecure, which may
2701 * happen when switching EL, can change the effective value of CNTHCTL_EL2
2702 * mask bits. Update the IRQ state accordingly.
2704 gt_update_irq(cpu
, GTIMER_VIRT
);
2705 gt_update_irq(cpu
, GTIMER_PHYS
);
2708 static uint64_t gt_phys_raw_cnt_offset(CPUARMState
*env
)
2710 if ((env
->cp15
.scr_el3
& SCR_ECVEN
) &&
2711 FIELD_EX64(env
->cp15
.cnthctl_el2
, CNTHCTL
, ECV
) &&
2712 arm_is_el2_enabled(env
) &&
2713 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
2714 return env
->cp15
.cntpoff_el2
;
2719 static uint64_t gt_phys_cnt_offset(CPUARMState
*env
)
2721 if (arm_current_el(env
) >= 2) {
2724 return gt_phys_raw_cnt_offset(env
);
2727 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2729 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2733 * Timer enabled: calculate and set current ISTATUS, irq, and
2734 * reset timer to when ISTATUS next has to change
2736 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2737 cpu
->env
.cp15
.cntvoff_el2
: gt_phys_raw_cnt_offset(&cpu
->env
);
2738 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2739 /* Note that this must be unsigned 64 bit arithmetic: */
2740 int istatus
= count
- offset
>= gt
->cval
;
2743 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2747 * Next transition is when (count - offset) rolls back over to 0.
2748 * If offset > count then this is when count == offset;
2749 * if offset <= count then this is when count == offset + 2^64
2750 * For the latter case we set nexttick to an "as far in future
2751 * as possible" value and let the code below handle it.
2753 if (offset
> count
) {
2756 nexttick
= UINT64_MAX
;
2760 * Next transition is when (count - offset) == cval, i.e.
2761 * when count == (cval + offset).
2762 * If that would overflow, then again we set up the next interrupt
2763 * for "as far in the future as possible" for the code below.
2765 if (uadd64_overflow(gt
->cval
, offset
, &nexttick
)) {
2766 nexttick
= UINT64_MAX
;
2770 * Note that the desired next expiry time might be beyond the
2771 * signed-64-bit range of a QEMUTimer -- in this case we just
2772 * set the timer for as far in the future as possible. When the
2773 * timer expires we will reset the timer for any remaining period.
2775 if (nexttick
> INT64_MAX
/ gt_cntfrq_period_ns(cpu
)) {
2776 timer_mod_ns(cpu
->gt_timer
[timeridx
], INT64_MAX
);
2778 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2780 trace_arm_gt_recalc(timeridx
, nexttick
);
2782 /* Timer disabled: ISTATUS and timer output always clear */
2784 timer_del(cpu
->gt_timer
[timeridx
]);
2785 trace_arm_gt_recalc_disabled(timeridx
);
2787 gt_update_irq(cpu
, timeridx
);
2790 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2793 ARMCPU
*cpu
= env_archcpu(env
);
2795 timer_del(cpu
->gt_timer
[timeridx
]);
2798 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2800 return gt_get_countervalue(env
) - gt_phys_cnt_offset(env
);
2803 uint64_t gt_virt_cnt_offset(CPUARMState
*env
)
2807 switch (arm_current_el(env
)) {
2809 hcr
= arm_hcr_el2_eff(env
);
2810 if (hcr
& HCR_E2H
) {
2815 hcr
= arm_hcr_el2_eff(env
);
2816 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
2822 return env
->cp15
.cntvoff_el2
;
2825 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2827 return gt_get_countervalue(env
) - gt_virt_cnt_offset(env
);
2830 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2834 trace_arm_gt_cval_write(timeridx
, value
);
2835 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2836 gt_recalc_timer(env_archcpu(env
), timeridx
);
2839 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2842 uint64_t offset
= 0;
2846 case GTIMER_HYPVIRT
:
2847 offset
= gt_virt_cnt_offset(env
);
2850 offset
= gt_phys_cnt_offset(env
);
2854 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2855 (gt_get_countervalue(env
) - offset
));
2858 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2862 uint64_t offset
= 0;
2866 case GTIMER_HYPVIRT
:
2867 offset
= gt_virt_cnt_offset(env
);
2870 offset
= gt_phys_cnt_offset(env
);
2874 trace_arm_gt_tval_write(timeridx
, value
);
2875 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2876 sextract64(value
, 0, 32);
2877 gt_recalc_timer(env_archcpu(env
), timeridx
);
2880 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2884 ARMCPU
*cpu
= env_archcpu(env
);
2885 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2887 trace_arm_gt_ctl_write(timeridx
, value
);
2888 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2889 if ((oldval
^ value
) & 1) {
2890 /* Enable toggled */
2891 gt_recalc_timer(cpu
, timeridx
);
2892 } else if ((oldval
^ value
) & 2) {
2894 * IMASK toggled: don't need to recalculate,
2895 * just set the interrupt line based on ISTATUS
2897 trace_arm_gt_imask_toggle(timeridx
);
2898 gt_update_irq(cpu
, timeridx
);
2902 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2904 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2907 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2910 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2913 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2915 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2918 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2921 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2924 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2927 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2930 static int gt_phys_redir_timeridx(CPUARMState
*env
)
2932 switch (arm_mmu_idx(env
)) {
2933 case ARMMMUIdx_E20_0
:
2934 case ARMMMUIdx_E20_2
:
2935 case ARMMMUIdx_E20_2_PAN
:
2942 static int gt_virt_redir_timeridx(CPUARMState
*env
)
2944 switch (arm_mmu_idx(env
)) {
2945 case ARMMMUIdx_E20_0
:
2946 case ARMMMUIdx_E20_2
:
2947 case ARMMMUIdx_E20_2_PAN
:
2948 return GTIMER_HYPVIRT
;
2954 static uint64_t gt_phys_redir_cval_read(CPUARMState
*env
,
2955 const ARMCPRegInfo
*ri
)
2957 int timeridx
= gt_phys_redir_timeridx(env
);
2958 return env
->cp15
.c14_timer
[timeridx
].cval
;
2961 static void gt_phys_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2964 int timeridx
= gt_phys_redir_timeridx(env
);
2965 gt_cval_write(env
, ri
, timeridx
, value
);
2968 static uint64_t gt_phys_redir_tval_read(CPUARMState
*env
,
2969 const ARMCPRegInfo
*ri
)
2971 int timeridx
= gt_phys_redir_timeridx(env
);
2972 return gt_tval_read(env
, ri
, timeridx
);
2975 static void gt_phys_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2978 int timeridx
= gt_phys_redir_timeridx(env
);
2979 gt_tval_write(env
, ri
, timeridx
, value
);
2982 static uint64_t gt_phys_redir_ctl_read(CPUARMState
*env
,
2983 const ARMCPRegInfo
*ri
)
2985 int timeridx
= gt_phys_redir_timeridx(env
);
2986 return env
->cp15
.c14_timer
[timeridx
].ctl
;
2989 static void gt_phys_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2992 int timeridx
= gt_phys_redir_timeridx(env
);
2993 gt_ctl_write(env
, ri
, timeridx
, value
);
2996 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2998 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
3001 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3004 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
3007 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3009 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
3012 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3015 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
3018 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3021 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
3024 static void gt_cnthctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3027 ARMCPU
*cpu
= env_archcpu(env
);
3028 uint32_t oldval
= env
->cp15
.cnthctl_el2
;
3029 uint32_t valid_mask
=
3030 R_CNTHCTL_EL0PCTEN_E2H1_MASK
|
3031 R_CNTHCTL_EL0VCTEN_E2H1_MASK
|
3032 R_CNTHCTL_EVNTEN_MASK
|
3033 R_CNTHCTL_EVNTDIR_MASK
|
3034 R_CNTHCTL_EVNTI_MASK
|
3035 R_CNTHCTL_EL0VTEN_MASK
|
3036 R_CNTHCTL_EL0PTEN_MASK
|
3037 R_CNTHCTL_EL1PCTEN_E2H1_MASK
|
3038 R_CNTHCTL_EL1PTEN_MASK
;
3040 if (cpu_isar_feature(aa64_rme
, cpu
)) {
3041 valid_mask
|= R_CNTHCTL_CNTVMASK_MASK
| R_CNTHCTL_CNTPMASK_MASK
;
3043 if (cpu_isar_feature(aa64_ecv_traps
, cpu
)) {
3045 R_CNTHCTL_EL1TVT_MASK
|
3046 R_CNTHCTL_EL1TVCT_MASK
|
3047 R_CNTHCTL_EL1NVPCT_MASK
|
3048 R_CNTHCTL_EL1NVVCT_MASK
|
3049 R_CNTHCTL_EVNTIS_MASK
;
3051 if (cpu_isar_feature(aa64_ecv
, cpu
)) {
3052 valid_mask
|= R_CNTHCTL_ECV_MASK
;
3055 /* Clear RES0 bits */
3056 value
&= valid_mask
;
3058 raw_write(env
, ri
, value
);
3060 if ((oldval
^ value
) & R_CNTHCTL_CNTVMASK_MASK
) {
3061 gt_update_irq(cpu
, GTIMER_VIRT
);
3062 } else if ((oldval
^ value
) & R_CNTHCTL_CNTPMASK_MASK
) {
3063 gt_update_irq(cpu
, GTIMER_PHYS
);
3067 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3070 ARMCPU
*cpu
= env_archcpu(env
);
3072 trace_arm_gt_cntvoff_write(value
);
3073 raw_write(env
, ri
, value
);
3074 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3077 static uint64_t gt_virt_redir_cval_read(CPUARMState
*env
,
3078 const ARMCPRegInfo
*ri
)
3080 int timeridx
= gt_virt_redir_timeridx(env
);
3081 return env
->cp15
.c14_timer
[timeridx
].cval
;
3084 static void gt_virt_redir_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3087 int timeridx
= gt_virt_redir_timeridx(env
);
3088 gt_cval_write(env
, ri
, timeridx
, value
);
3091 static uint64_t gt_virt_redir_tval_read(CPUARMState
*env
,
3092 const ARMCPRegInfo
*ri
)
3094 int timeridx
= gt_virt_redir_timeridx(env
);
3095 return gt_tval_read(env
, ri
, timeridx
);
3098 static void gt_virt_redir_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3101 int timeridx
= gt_virt_redir_timeridx(env
);
3102 gt_tval_write(env
, ri
, timeridx
, value
);
3105 static uint64_t gt_virt_redir_ctl_read(CPUARMState
*env
,
3106 const ARMCPRegInfo
*ri
)
3108 int timeridx
= gt_virt_redir_timeridx(env
);
3109 return env
->cp15
.c14_timer
[timeridx
].ctl
;
3112 static void gt_virt_redir_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3115 int timeridx
= gt_virt_redir_timeridx(env
);
3116 gt_ctl_write(env
, ri
, timeridx
, value
);
3119 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3121 gt_timer_reset(env
, ri
, GTIMER_HYP
);
3124 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3127 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
3130 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3132 return gt_tval_read(env
, ri
, GTIMER_HYP
);
3135 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3138 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
3141 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3144 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
3147 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3149 gt_timer_reset(env
, ri
, GTIMER_SEC
);
3152 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3155 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
3158 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3160 return gt_tval_read(env
, ri
, GTIMER_SEC
);
3163 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3166 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
3169 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3172 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
3175 static void gt_hv_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3177 gt_timer_reset(env
, ri
, GTIMER_HYPVIRT
);
3180 static void gt_hv_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3183 gt_cval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3186 static uint64_t gt_hv_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3188 return gt_tval_read(env
, ri
, GTIMER_HYPVIRT
);
3191 static void gt_hv_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3194 gt_tval_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3197 static void gt_hv_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3200 gt_ctl_write(env
, ri
, GTIMER_HYPVIRT
, value
);
3203 void arm_gt_ptimer_cb(void *opaque
)
3205 ARMCPU
*cpu
= opaque
;
3207 gt_recalc_timer(cpu
, GTIMER_PHYS
);
3210 void arm_gt_vtimer_cb(void *opaque
)
3212 ARMCPU
*cpu
= opaque
;
3214 gt_recalc_timer(cpu
, GTIMER_VIRT
);
3217 void arm_gt_htimer_cb(void *opaque
)
3219 ARMCPU
*cpu
= opaque
;
3221 gt_recalc_timer(cpu
, GTIMER_HYP
);
3224 void arm_gt_stimer_cb(void *opaque
)
3226 ARMCPU
*cpu
= opaque
;
3228 gt_recalc_timer(cpu
, GTIMER_SEC
);
3231 void arm_gt_hvtimer_cb(void *opaque
)
3233 ARMCPU
*cpu
= opaque
;
3235 gt_recalc_timer(cpu
, GTIMER_HYPVIRT
);
3238 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3240 * Note that CNTFRQ is purely reads-as-written for the benefit
3241 * of software; writing it doesn't actually change the timer frequency.
3242 * Our reset value matches the fixed frequency we implement the timer at.
3244 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
3245 .type
= ARM_CP_ALIAS
,
3246 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3247 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
3249 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3250 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3251 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
3252 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3253 .resetfn
= arm_gt_cntfrq_reset
,
3255 /* overall control: mostly access permissions */
3256 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
3257 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
3259 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
3262 /* per-timer control */
3263 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3264 .secure
= ARM_CP_SECSTATE_NS
,
3265 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3266 .accessfn
= gt_ptimer_access
,
3267 .fieldoffset
= offsetoflow32(CPUARMState
,
3268 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3269 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3270 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3272 { .name
= "CNTP_CTL_S",
3273 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
3274 .secure
= ARM_CP_SECSTATE_S
,
3275 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3276 .accessfn
= gt_ptimer_access
,
3277 .fieldoffset
= offsetoflow32(CPUARMState
,
3278 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3279 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3281 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3282 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
3283 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3284 .accessfn
= gt_ptimer_access
,
3285 .nv2_redirect_offset
= 0x180 | NV2_REDIR_NV1
,
3286 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
3288 .readfn
= gt_phys_redir_ctl_read
, .raw_readfn
= raw_read
,
3289 .writefn
= gt_phys_redir_ctl_write
, .raw_writefn
= raw_write
,
3291 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
3292 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
3293 .accessfn
= gt_vtimer_access
,
3294 .fieldoffset
= offsetoflow32(CPUARMState
,
3295 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3296 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3297 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3299 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
3300 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
3301 .type
= ARM_CP_IO
, .access
= PL0_RW
,
3302 .accessfn
= gt_vtimer_access
,
3303 .nv2_redirect_offset
= 0x170 | NV2_REDIR_NV1
,
3304 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
3306 .readfn
= gt_virt_redir_ctl_read
, .raw_readfn
= raw_read
,
3307 .writefn
= gt_virt_redir_ctl_write
, .raw_writefn
= raw_write
,
3309 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3310 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3311 .secure
= ARM_CP_SECSTATE_NS
,
3312 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3313 .accessfn
= gt_ptimer_access
,
3314 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3316 { .name
= "CNTP_TVAL_S",
3317 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
3318 .secure
= ARM_CP_SECSTATE_S
,
3319 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3320 .accessfn
= gt_ptimer_access
,
3321 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
3323 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3324 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
3325 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3326 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
3327 .readfn
= gt_phys_redir_tval_read
, .writefn
= gt_phys_redir_tval_write
,
3329 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
3330 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3331 .accessfn
= gt_vtimer_access
,
3332 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3334 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3335 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
3336 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
3337 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
3338 .readfn
= gt_virt_redir_tval_read
, .writefn
= gt_virt_redir_tval_write
,
3340 /* The counter itself */
3341 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
3342 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3343 .accessfn
= gt_pct_access
,
3344 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3346 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
3347 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
3348 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3349 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3351 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
3352 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3353 .accessfn
= gt_vct_access
,
3354 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3356 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3357 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3358 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3359 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3361 /* Comparison value, indicating when the timer goes off */
3362 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
3363 .secure
= ARM_CP_SECSTATE_NS
,
3365 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3366 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3367 .accessfn
= gt_ptimer_access
,
3368 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3369 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3371 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
3372 .secure
= ARM_CP_SECSTATE_S
,
3374 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3375 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3376 .accessfn
= gt_ptimer_access
,
3377 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3379 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3380 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
3383 .nv2_redirect_offset
= 0x178 | NV2_REDIR_NV1
,
3384 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
3385 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
3386 .readfn
= gt_phys_redir_cval_read
, .raw_readfn
= raw_read
,
3387 .writefn
= gt_phys_redir_cval_write
, .raw_writefn
= raw_write
,
3389 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
3391 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
3392 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3393 .accessfn
= gt_vtimer_access
,
3394 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3395 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3397 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
3398 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
3401 .nv2_redirect_offset
= 0x168 | NV2_REDIR_NV1
,
3402 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
3403 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
3404 .readfn
= gt_virt_redir_cval_read
, .raw_readfn
= raw_read
,
3405 .writefn
= gt_virt_redir_cval_write
, .raw_writefn
= raw_write
,
3408 * Secure timer -- this is actually restricted to only EL3
3409 * and configurably Secure-EL1 via the accessfn.
3411 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3412 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
3413 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
3414 .accessfn
= gt_stimer_access
,
3415 .readfn
= gt_sec_tval_read
,
3416 .writefn
= gt_sec_tval_write
,
3417 .resetfn
= gt_sec_timer_reset
,
3419 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
3420 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
3421 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3422 .accessfn
= gt_stimer_access
,
3423 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
3425 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
3427 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
3428 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
3429 .type
= ARM_CP_IO
, .access
= PL1_RW
,
3430 .accessfn
= gt_stimer_access
,
3431 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
3432 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
3437 * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
3438 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
3439 * so our implementations here are identical to the normal registers.
3441 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo
[] = {
3442 { .name
= "CNTVCTSS", .cp
= 15, .crm
= 14, .opc1
= 9,
3443 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3444 .accessfn
= gt_vct_access
,
3445 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3447 { .name
= "CNTVCTSS_EL0", .state
= ARM_CP_STATE_AA64
,
3448 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 6,
3449 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3450 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
3452 { .name
= "CNTPCTSS", .cp
= 15, .crm
= 14, .opc1
= 8,
3453 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
3454 .accessfn
= gt_pct_access
,
3455 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
3457 { .name
= "CNTPCTSS_EL0", .state
= ARM_CP_STATE_AA64
,
3458 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 5,
3459 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3460 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
3464 static CPAccessResult
gt_cntpoff_access(CPUARMState
*env
,
3465 const ARMCPRegInfo
*ri
,
3468 if (arm_current_el(env
) == 2 && arm_feature(env
, ARM_FEATURE_EL3
) &&
3469 !(env
->cp15
.scr_el3
& SCR_ECVEN
)) {
3470 return CP_ACCESS_TRAP_EL3
;
3472 return CP_ACCESS_OK
;
3475 static void gt_cntpoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3478 ARMCPU
*cpu
= env_archcpu(env
);
3480 trace_arm_gt_cntpoff_write(value
);
3481 raw_write(env
, ri
, value
);
3482 gt_recalc_timer(cpu
, GTIMER_PHYS
);
3485 static const ARMCPRegInfo gen_timer_cntpoff_reginfo
= {
3486 .name
= "CNTPOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3487 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 6,
3488 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
3489 .accessfn
= gt_cntpoff_access
, .writefn
= gt_cntpoff_write
,
3490 .nv2_redirect_offset
= 0x1a8,
3491 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntpoff_el2
),
3496 * In user-mode most of the generic timer registers are inaccessible
3497 * however modern kernels (4.12+) allow access to cntvct_el0
3500 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3502 ARMCPU
*cpu
= env_archcpu(env
);
3505 * Currently we have no support for QEMUTimer in linux-user so we
3506 * can't call gt_get_countervalue(env), instead we directly
3507 * call the lower level functions.
3509 return cpu_get_clock() / gt_cntfrq_period_ns(cpu
);
3512 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
3513 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
3514 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
3515 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
3516 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
3517 .resetfn
= arm_gt_cntfrq_reset
,
3519 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
3520 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
3521 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3522 .readfn
= gt_virt_cnt_read
,
3527 * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
3528 * is exposed to userspace by Linux.
3530 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo
[] = {
3531 { .name
= "CNTVCTSS_EL0", .state
= ARM_CP_STATE_AA64
,
3532 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 6,
3533 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
3534 .readfn
= gt_virt_cnt_read
,
3540 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3542 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3543 raw_write(env
, ri
, value
);
3544 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3545 raw_write(env
, ri
, value
& 0xfffff6ff);
3547 raw_write(env
, ri
, value
& 0xfffff1ff);
3551 #ifndef CONFIG_USER_ONLY
3552 /* get_phys_addr() isn't present for user-mode-only targets */
3554 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3559 * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3560 * Secure EL1 (which can only happen if EL3 is AArch64).
3561 * They are simply UNDEF if executed from NS EL1.
3562 * They function normally from EL2 or EL3.
3564 if (arm_current_el(env
) == 1) {
3565 if (arm_is_secure_below_el3(env
)) {
3566 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
3567 return CP_ACCESS_TRAP_EL2
;
3569 return CP_ACCESS_TRAP_EL3
;
3571 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3574 return CP_ACCESS_OK
;
3578 static int par_el1_shareability(GetPhysAddrResult
*res
)
3581 * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
3582 * memory -- see pseudocode PAREncodeShareability().
3584 if (((res
->cacheattrs
.attrs
& 0xf0) == 0) ||
3585 res
->cacheattrs
.attrs
== 0x44 || res
->cacheattrs
.attrs
== 0x40) {
3588 return res
->cacheattrs
.shareability
;
3591 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
3592 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
3593 ARMSecuritySpace ss
)
3597 bool format64
= false;
3598 ARMMMUFaultInfo fi
= {};
3599 GetPhysAddrResult res
= {};
3602 * I_MXTJT: Granule protection checks are not performed on the final
3603 * address of a successful translation. This is a translation not a
3604 * memory reference, so "memop = none = 0".
3606 ret
= get_phys_addr_with_space_nogpc(env
, value
, access_type
, 0,
3607 mmu_idx
, ss
, &res
, &fi
);
3610 * ATS operations only do S1 or S1+S2 translations, so we never
3611 * have to deal with the ARMCacheAttrs format for S2 only.
3613 assert(!res
.cacheattrs
.is_s2_format
);
3617 * Some kinds of translation fault must cause exceptions rather
3618 * than being reported in the PAR.
3620 int current_el
= arm_current_el(env
);
3622 uint32_t syn
, fsr
, fsc
;
3623 bool take_exc
= false;
3625 if (fi
.s1ptw
&& current_el
== 1
3626 && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
3628 * Synchronous stage 2 fault on an access made as part of the
3629 * translation table walk for AT S1E0* or AT S1E1* insn
3630 * executed from NS EL1. If this is a synchronous external abort
3631 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3632 * to EL3. Otherwise the fault is taken as an exception to EL2,
3633 * and HPFAR_EL2 holds the faulting IPA.
3635 if (fi
.type
== ARMFault_SyncExternalOnWalk
&&
3636 (env
->cp15
.scr_el3
& SCR_EA
)) {
3639 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
3640 if (arm_is_secure_below_el3(env
) && fi
.s1ns
) {
3641 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
3646 } else if (fi
.type
== ARMFault_SyncExternalOnWalk
) {
3648 * Synchronous external aborts during a translation table walk
3649 * are taken as Data Abort exceptions.
3652 if (current_el
== 3) {
3658 target_el
= exception_target_el(env
);
3664 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3665 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
3666 arm_s1_regime_using_lpae_format(env
, mmu_idx
)) {
3667 fsr
= arm_fi_to_lfsc(&fi
);
3668 fsc
= extract32(fsr
, 0, 6);
3670 fsr
= arm_fi_to_sfsc(&fi
);
3674 * Report exception with ESR indicating a fault due to a
3675 * translation table walk for a cache maintenance instruction.
3677 syn
= syn_data_abort_no_iss(current_el
== target_el
, 0,
3678 fi
.ea
, 1, fi
.s1ptw
, 1, fsc
);
3679 env
->exception
.vaddress
= value
;
3680 env
->exception
.fsr
= fsr
;
3681 raise_exception(env
, EXCP_DATA_ABORT
, syn
, target_el
);
3687 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3690 * * TTBCR.EAE determines whether the result is returned using the
3691 * 32-bit or the 64-bit PAR format
3692 * * Instructions executed in Hyp mode always use the 64bit format
3694 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3695 * * The Non-secure TTBCR.EAE bit is set to 1
3696 * * The implementation includes EL2, and the value of HCR.VM is 1
3698 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3700 * ATS1Hx always uses the 64bit format.
3702 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
3704 if (arm_feature(env
, ARM_FEATURE_EL2
) && !arm_aa32_secure_pl1_0(env
)) {
3705 if (mmu_idx
== ARMMMUIdx_E10_0
||
3706 mmu_idx
== ARMMMUIdx_E10_1
||
3707 mmu_idx
== ARMMMUIdx_E10_1_PAN
) {
3708 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
3710 format64
|= arm_current_el(env
) == 2;
3716 /* Create a 64-bit PAR */
3717 par64
= (1 << 11); /* LPAE bit always set */
3719 par64
|= res
.f
.phys_addr
& ~0xfffULL
;
3720 if (!res
.f
.attrs
.secure
) {
3721 par64
|= (1 << 9); /* NS */
3723 par64
|= (uint64_t)res
.cacheattrs
.attrs
<< 56; /* ATTR */
3724 par64
|= par_el1_shareability(&res
) << 7; /* SH */
3726 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
3729 par64
|= (fsr
& 0x3f) << 1; /* FS */
3731 par64
|= (1 << 9); /* S */
3734 par64
|= (1 << 8); /* PTW */
3739 * fsr is a DFSR/IFSR value for the short descriptor
3740 * translation table format (with WnR always clear).
3741 * Convert it to a 32-bit PAR.
3744 /* We do not set any attribute bits in the PAR */
3745 if (res
.f
.lg_page_size
== 24
3746 && arm_feature(env
, ARM_FEATURE_V7
)) {
3747 par64
= (res
.f
.phys_addr
& 0xff000000) | (1 << 1);
3749 par64
= res
.f
.phys_addr
& 0xfffff000;
3751 if (!res
.f
.attrs
.secure
) {
3752 par64
|= (1 << 9); /* NS */
3755 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
3757 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
3758 ((fsr
& 0xf) << 1) | 1;
3763 #endif /* CONFIG_TCG */
3765 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3768 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3771 int el
= arm_current_el(env
);
3772 ARMSecuritySpace ss
= arm_security_space(env
);
3774 switch (ri
->opc2
& 6) {
3776 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3779 g_assert(ss
!= ARMSS_Secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3783 if (ri
->crm
== 9 && arm_pan_enabled(env
)) {
3784 mmu_idx
= ARMMMUIdx_Stage1_E1_PAN
;
3786 mmu_idx
= ARMMMUIdx_Stage1_E1
;
3790 g_assert_not_reached();
3794 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3797 mmu_idx
= ARMMMUIdx_E10_0
;
3800 g_assert(ss
!= ARMSS_Secure
); /* ARMv8.4-SecEL2 is 64-bit only */
3801 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3804 mmu_idx
= ARMMMUIdx_Stage1_E0
;
3807 g_assert_not_reached();
3811 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3812 mmu_idx
= ARMMMUIdx_E10_1
;
3813 ss
= ARMSS_NonSecure
;
3816 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3817 mmu_idx
= ARMMMUIdx_E10_0
;
3818 ss
= ARMSS_NonSecure
;
3821 g_assert_not_reached();
3824 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
, ss
);
3826 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3828 /* Handled by hardware accelerator. */
3829 g_assert_not_reached();
3830 #endif /* CONFIG_TCG */
3833 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3837 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3840 /* There is no SecureEL2 for AArch32. */
3841 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_E2
,
3844 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3846 /* Handled by hardware accelerator. */
3847 g_assert_not_reached();
3848 #endif /* CONFIG_TCG */
3851 static CPAccessResult
at_e012_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3855 * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
3856 * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
3857 * only happen when executing at EL3 because that combination also causes an
3858 * illegal exception return. We don't need to check FEAT_RME either, because
3859 * scr_write() ensures that the NSE bit is not set otherwise.
3861 if ((env
->cp15
.scr_el3
& (SCR_NSE
| SCR_NS
)) == SCR_NSE
) {
3862 return CP_ACCESS_TRAP
;
3864 return CP_ACCESS_OK
;
3867 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3870 if (arm_current_el(env
) == 3 &&
3871 !(env
->cp15
.scr_el3
& (SCR_NS
| SCR_EEL2
))) {
3872 return CP_ACCESS_TRAP
;
3874 return at_e012_access(env
, ri
, isread
);
3877 static CPAccessResult
at_s1e01_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3880 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_AT
)) {
3881 return CP_ACCESS_TRAP_EL2
;
3883 return at_e012_access(env
, ri
, isread
);
3886 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3890 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3892 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
3893 bool regime_e20
= (hcr_el2
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
);
3894 bool for_el3
= false;
3895 ARMSecuritySpace ss
;
3897 switch (ri
->opc2
& 6) {
3900 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3901 if (ri
->crm
== 9 && arm_pan_enabled(env
)) {
3902 mmu_idx
= regime_e20
?
3903 ARMMMUIdx_E20_2_PAN
: ARMMMUIdx_Stage1_E1_PAN
;
3905 mmu_idx
= regime_e20
? ARMMMUIdx_E20_2
: ARMMMUIdx_Stage1_E1
;
3908 case 4: /* AT S1E2R, AT S1E2W */
3909 mmu_idx
= hcr_el2
& HCR_E2H
? ARMMMUIdx_E20_2
: ARMMMUIdx_E2
;
3911 case 6: /* AT S1E3R, AT S1E3W */
3912 mmu_idx
= ARMMMUIdx_E3
;
3916 g_assert_not_reached();
3919 case 2: /* AT S1E0R, AT S1E0W */
3920 mmu_idx
= regime_e20
? ARMMMUIdx_E20_0
: ARMMMUIdx_Stage1_E0
;
3922 case 4: /* AT S12E1R, AT S12E1W */
3923 mmu_idx
= regime_e20
? ARMMMUIdx_E20_2
: ARMMMUIdx_E10_1
;
3925 case 6: /* AT S12E0R, AT S12E0W */
3926 mmu_idx
= regime_e20
? ARMMMUIdx_E20_0
: ARMMMUIdx_E10_0
;
3929 g_assert_not_reached();
3932 ss
= for_el3
? arm_security_space(env
) : arm_security_space_below_el3(env
);
3933 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
, ss
);
3935 /* Handled by hardware accelerator. */
3936 g_assert_not_reached();
3937 #endif /* CONFIG_TCG */
3941 /* Return basic MPU access permission bits. */
3942 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3949 for (i
= 0; i
< 16; i
+= 2) {
3950 ret
|= (val
>> i
) & mask
;
3956 /* Pad basic MPU access permission bits to extended format. */
3957 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3964 for (i
= 0; i
< 16; i
+= 2) {
3965 ret
|= (val
& mask
) << i
;
3971 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3974 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3977 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3979 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3982 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3985 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3988 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3990 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3993 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3995 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
4001 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
4005 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4008 ARMCPU
*cpu
= env_archcpu(env
);
4009 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
4015 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
4016 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
4020 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4023 ARMCPU
*cpu
= env_archcpu(env
);
4024 uint32_t nrgs
= cpu
->pmsav7_dregion
;
4026 if (value
>= nrgs
) {
4027 qemu_log_mask(LOG_GUEST_ERROR
,
4028 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
4029 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
4033 raw_write(env
, ri
, value
);
4036 static void prbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4039 ARMCPU
*cpu
= env_archcpu(env
);
4041 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
4042 env
->pmsav8
.rbar
[M_REG_NS
][env
->pmsav7
.rnr
[M_REG_NS
]] = value
;
4045 static uint64_t prbar_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4047 return env
->pmsav8
.rbar
[M_REG_NS
][env
->pmsav7
.rnr
[M_REG_NS
]];
4050 static void prlar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4053 ARMCPU
*cpu
= env_archcpu(env
);
4055 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
4056 env
->pmsav8
.rlar
[M_REG_NS
][env
->pmsav7
.rnr
[M_REG_NS
]] = value
;
4059 static uint64_t prlar_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4061 return env
->pmsav8
.rlar
[M_REG_NS
][env
->pmsav7
.rnr
[M_REG_NS
]];
4064 static void prselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4067 ARMCPU
*cpu
= env_archcpu(env
);
4070 * Ignore writes that would select not implemented region.
4071 * This is architecturally UNPREDICTABLE.
4073 if (value
>= cpu
->pmsav7_dregion
) {
4077 env
->pmsav7
.rnr
[M_REG_NS
] = value
;
4080 static void hprbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4083 ARMCPU
*cpu
= env_archcpu(env
);
4085 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
4086 env
->pmsav8
.hprbar
[env
->pmsav8
.hprselr
] = value
;
4089 static uint64_t hprbar_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4091 return env
->pmsav8
.hprbar
[env
->pmsav8
.hprselr
];
4094 static void hprlar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4097 ARMCPU
*cpu
= env_archcpu(env
);
4099 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
4100 env
->pmsav8
.hprlar
[env
->pmsav8
.hprselr
] = value
;
4103 static uint64_t hprlar_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4105 return env
->pmsav8
.hprlar
[env
->pmsav8
.hprselr
];
4108 static void hprenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4113 ARMCPU
*cpu
= env_archcpu(env
);
4115 /* Ignore writes to unimplemented regions */
4116 int rmax
= MIN(cpu
->pmsav8r_hdregion
, 32);
4117 value
&= MAKE_64BIT_MASK(0, rmax
);
4119 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
4121 /* Register alias is only valid for first 32 indexes */
4122 for (n
= 0; n
< rmax
; ++n
) {
4123 bit
= extract32(value
, n
, 1);
4124 env
->pmsav8
.hprlar
[n
] = deposit32(
4125 env
->pmsav8
.hprlar
[n
], 0, 1, bit
);
4129 static uint64_t hprenr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4132 uint32_t result
= 0x0;
4133 ARMCPU
*cpu
= env_archcpu(env
);
4135 /* Register alias is only valid for first 32 indexes */
4136 for (n
= 0; n
< MIN(cpu
->pmsav8r_hdregion
, 32); ++n
) {
4137 if (env
->pmsav8
.hprlar
[n
] & 0x1) {
4138 result
|= (0x1 << n
);
4144 static void hprselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4147 ARMCPU
*cpu
= env_archcpu(env
);
4150 * Ignore writes that would select not implemented region.
4151 * This is architecturally UNPREDICTABLE.
4153 if (value
>= cpu
->pmsav8r_hdregion
) {
4157 env
->pmsav8
.hprselr
= value
;
4160 static void pmsav8r_regn_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4163 ARMCPU
*cpu
= env_archcpu(env
);
4164 uint8_t index
= (extract32(ri
->opc0
, 0, 1) << 4) |
4165 (extract32(ri
->crm
, 0, 3) << 1) | extract32(ri
->opc2
, 2, 1);
4167 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
4170 if (index
>= cpu
->pmsav8r_hdregion
) {
4173 if (ri
->opc2
& 0x1) {
4174 env
->pmsav8
.hprlar
[index
] = value
;
4176 env
->pmsav8
.hprbar
[index
] = value
;
4179 if (index
>= cpu
->pmsav7_dregion
) {
4182 if (ri
->opc2
& 0x1) {
4183 env
->pmsav8
.rlar
[M_REG_NS
][index
] = value
;
4185 env
->pmsav8
.rbar
[M_REG_NS
][index
] = value
;
4190 static uint64_t pmsav8r_regn_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4192 ARMCPU
*cpu
= env_archcpu(env
);
4193 uint8_t index
= (extract32(ri
->opc0
, 0, 1) << 4) |
4194 (extract32(ri
->crm
, 0, 3) << 1) | extract32(ri
->opc2
, 2, 1);
4197 if (index
>= cpu
->pmsav8r_hdregion
) {
4200 if (ri
->opc2
& 0x1) {
4201 return env
->pmsav8
.hprlar
[index
];
4203 return env
->pmsav8
.hprbar
[index
];
4206 if (index
>= cpu
->pmsav7_dregion
) {
4209 if (ri
->opc2
& 0x1) {
4210 return env
->pmsav8
.rlar
[M_REG_NS
][index
];
4212 return env
->pmsav8
.rbar
[M_REG_NS
][index
];
4217 static const ARMCPRegInfo pmsav8r_cp_reginfo
[] = {
4219 .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 3, .opc2
= 0,
4220 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
4221 .accessfn
= access_tvm_trvm
,
4222 .readfn
= prbar_read
, .writefn
= prbar_write
},
4224 .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 3, .opc2
= 1,
4225 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
4226 .accessfn
= access_tvm_trvm
,
4227 .readfn
= prlar_read
, .writefn
= prlar_write
},
4228 { .name
= "PRSELR", .resetvalue
= 0,
4229 .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 2, .opc2
= 1,
4230 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4231 .writefn
= prselr_write
,
4232 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]) },
4233 { .name
= "HPRBAR", .resetvalue
= 0,
4234 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 3, .opc2
= 0,
4235 .access
= PL2_RW
, .type
= ARM_CP_NO_RAW
,
4236 .readfn
= hprbar_read
, .writefn
= hprbar_write
},
4238 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 3, .opc2
= 1,
4239 .access
= PL2_RW
, .type
= ARM_CP_NO_RAW
,
4240 .readfn
= hprlar_read
, .writefn
= hprlar_write
},
4241 { .name
= "HPRSELR", .resetvalue
= 0,
4242 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 2, .opc2
= 1,
4244 .writefn
= hprselr_write
,
4245 .fieldoffset
= offsetof(CPUARMState
, pmsav8
.hprselr
) },
4247 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 1, .opc2
= 1,
4248 .access
= PL2_RW
, .type
= ARM_CP_NO_RAW
,
4249 .readfn
= hprenr_read
, .writefn
= hprenr_write
},
4252 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
4254 * Reset for all these registers is handled in arm_cpu_reset(),
4255 * because the PMSAv7 is also used by M-profile CPUs, which do
4256 * not register cpregs but still need the state to be reset.
4258 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
4259 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
4260 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
4261 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
4262 .resetfn
= arm_cp_reset_ignore
},
4263 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
4264 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
4265 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
4266 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
4267 .resetfn
= arm_cp_reset_ignore
},
4268 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
4269 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
4270 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
4271 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
4272 .resetfn
= arm_cp_reset_ignore
},
4273 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
4275 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
4276 .writefn
= pmsav7_rgnr_write
,
4277 .resetfn
= arm_cp_reset_ignore
},
4280 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
4281 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
4282 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
4283 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
4284 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
4285 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
4286 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
4287 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
4288 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
4289 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
4291 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
4293 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
4295 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
4297 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4299 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
4300 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
4302 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
4303 /* Protection region base and size registers */
4304 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
4305 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4306 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
4307 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
4308 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4309 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
4310 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
4311 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4312 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
4313 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
4314 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4315 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
4316 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
4317 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4318 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
4319 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
4320 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4321 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
4322 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
4323 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4324 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
4325 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
4326 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
4327 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
4330 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4333 ARMCPU
*cpu
= env_archcpu(env
);
4335 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
4336 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
4338 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
4339 * using Long-descriptor translation table format
4341 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
4342 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4344 * In an implementation that includes the Security Extensions
4345 * TTBCR has additional fields PD0 [4] and PD1 [5] for
4346 * Short-descriptor translation table format.
4348 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
4354 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
4356 * With LPAE the TTBCR could result in a change of ASID
4357 * via the TTBCR.A1 bit, so do a TLB flush.
4359 tlb_flush(CPU(cpu
));
4361 raw_write(env
, ri
, value
);
4364 static void vmsa_tcr_el12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4367 ARMCPU
*cpu
= env_archcpu(env
);
4369 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
4370 tlb_flush(CPU(cpu
));
4371 raw_write(env
, ri
, value
);
4374 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4377 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
4378 if (cpreg_field_is_64bit(ri
) &&
4379 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
4380 ARMCPU
*cpu
= env_archcpu(env
);
4381 tlb_flush(CPU(cpu
));
4383 raw_write(env
, ri
, value
);
4386 static void vmsa_tcr_ttbr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4390 * If we are running with E2&0 regime, then an ASID is active.
4391 * Flush if that might be changing. Note we're not checking
4392 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4393 * holds the active ASID, only checking the field that might.
4395 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) &&
4396 (arm_hcr_el2_eff(env
) & HCR_E2H
)) {
4397 uint16_t mask
= ARMMMUIdxBit_E20_2
|
4398 ARMMMUIdxBit_E20_2_PAN
|
4400 tlb_flush_by_mmuidx(env_cpu(env
), mask
);
4402 raw_write(env
, ri
, value
);
4405 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4408 ARMCPU
*cpu
= env_archcpu(env
);
4409 CPUState
*cs
= CPU(cpu
);
4412 * A change in VMID to the stage2 page table (Stage2) invalidates
4413 * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
4415 if (extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
4416 tlb_flush_by_mmuidx(cs
, alle1_tlbmask(env
));
4418 raw_write(env
, ri
, value
);
4421 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
4422 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
4423 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .type
= ARM_CP_ALIAS
,
4424 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
4425 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
4426 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
4427 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4428 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
4429 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
4430 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
4431 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
4432 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
4433 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
4434 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
4435 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
4436 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4438 .nv2_redirect_offset
= 0x220 | NV2_REDIR_NV1
,
4439 .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
4443 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
4444 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
4445 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
4446 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4448 .nv2_redirect_offset
= 0x138 | NV2_REDIR_NV1
,
4449 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
4450 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
4451 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
4452 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4453 .fgt
= FGT_TTBR0_EL1
,
4454 .nv2_redirect_offset
= 0x200 | NV2_REDIR_NV1
,
4455 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0, .raw_writefn
= raw_write
,
4456 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4457 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
4458 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
4459 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
4460 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4461 .fgt
= FGT_TTBR1_EL1
,
4462 .nv2_redirect_offset
= 0x210 | NV2_REDIR_NV1
,
4463 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0, .raw_writefn
= raw_write
,
4464 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4465 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
4466 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
4467 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4468 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4470 .nv2_redirect_offset
= 0x120 | NV2_REDIR_NV1
,
4471 .writefn
= vmsa_tcr_el12_write
,
4472 .raw_writefn
= raw_write
,
4474 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
4475 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
4476 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4477 .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
4478 .raw_writefn
= raw_write
,
4479 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
4480 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
4484 * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4485 * qemu tlbs nor adjusting cached masks.
4487 static const ARMCPRegInfo ttbcr2_reginfo
= {
4488 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
4489 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4490 .type
= ARM_CP_ALIAS
,
4491 .bank_fieldoffsets
= {
4492 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
4493 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]),
4497 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4500 env
->cp15
.c15_ticonfig
= value
& 0xe7;
4501 /* The OS_TYPE bit in this register changes the reported CPUID! */
4502 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
4503 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
4506 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4509 env
->cp15
.c15_threadid
= value
& 0xffff;
4512 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4515 /* Wait-for-interrupt (deprecated) */
4516 cpu_interrupt(env_cpu(env
), CPU_INTERRUPT_HALT
);
4519 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4523 * On OMAP there are registers indicating the max/min index of dcache lines
4524 * containing a dirty line; cache flush operations have to reset these.
4526 env
->cp15
.c15_i_max
= 0x000;
4527 env
->cp15
.c15_i_min
= 0xff0;
4530 static const ARMCPRegInfo omap_cp_reginfo
[] = {
4531 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
4532 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
4533 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
4535 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
4536 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4537 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
4539 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
4540 .writefn
= omap_ticonfig_write
},
4541 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
4543 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
4544 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
4545 .access
= PL1_RW
, .resetvalue
= 0xff0,
4546 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
4547 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
4549 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
4550 .writefn
= omap_threadid_write
},
4551 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
4552 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4553 .type
= ARM_CP_NO_RAW
,
4554 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
4556 * TODO: Peripheral port remap register:
4557 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4558 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4561 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
4562 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
4563 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
4564 .writefn
= omap_cachemaint_write
},
4565 { .name
= "C9", .cp
= 15, .crn
= 9,
4566 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
4567 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
4570 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4573 env
->cp15
.c15_cpar
= value
& 0x3fff;
4576 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
4577 { .name
= "XSCALE_CPAR",
4578 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
4579 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
4580 .writefn
= xscale_cpar_write
, },
4581 { .name
= "XSCALE_AUXCR",
4582 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
4583 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
4586 * XScale specific cache-lockdown: since we have no cache we NOP these
4587 * and hope the guest does not really rely on cache behaviour.
4589 { .name
= "XSCALE_LOCK_ICACHE_LINE",
4590 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
4591 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4592 { .name
= "XSCALE_UNLOCK_ICACHE",
4593 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
4594 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4595 { .name
= "XSCALE_DCACHE_LOCK",
4596 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
4597 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
4598 { .name
= "XSCALE_UNLOCK_DCACHE",
4599 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
4600 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4603 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
4605 * RAZ/WI the whole crn=15 space, when we don't have a more specific
4606 * implementation of this implementation-defined space.
4607 * Ideally this should eventually disappear in favour of actually
4608 * implementing the correct behaviour for all cores.
4610 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
4611 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4613 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
4617 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
4618 /* Cache status: RAZ because we have no cache so it's always clean */
4619 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
4620 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4624 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
4625 /* We never have a block transfer operation in progress */
4626 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
4627 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4629 /* The cache ops themselves: these all NOP for QEMU */
4630 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
4631 .access
= PL1_W
, .type
= ARM_CP_NOP
| ARM_CP_64BIT
},
4632 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
4633 .access
= PL1_W
, .type
= ARM_CP_NOP
| ARM_CP_64BIT
},
4634 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
4635 .access
= PL0_W
, .type
= ARM_CP_NOP
| ARM_CP_64BIT
},
4636 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
4637 .access
= PL0_W
, .type
= ARM_CP_NOP
| ARM_CP_64BIT
},
4638 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
4639 .access
= PL0_W
, .type
= ARM_CP_NOP
| ARM_CP_64BIT
},
4640 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
4641 .access
= PL1_W
, .type
= ARM_CP_NOP
| ARM_CP_64BIT
},
4644 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
4646 * The cache test-and-clean instructions always return (1 << 30)
4647 * to indicate that there are no dirty cache lines.
4649 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
4650 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4651 .resetvalue
= (1 << 30) },
4652 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
4653 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
4654 .resetvalue
= (1 << 30) },
4657 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
4658 /* Ignore ReadBuffer accesses */
4659 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
4660 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
4661 .access
= PL1_RW
, .resetvalue
= 0,
4662 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
4665 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4667 unsigned int cur_el
= arm_current_el(env
);
4669 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4670 return env
->cp15
.vpidr_el2
;
4672 return raw_read(env
, ri
);
4675 static uint64_t mpidr_read_val(CPUARMState
*env
)
4677 ARMCPU
*cpu
= env_archcpu(env
);
4678 uint64_t mpidr
= cpu
->mp_affinity
;
4680 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
4681 mpidr
|= (1U << 31);
4683 * Cores which are uniprocessor (non-coherent)
4684 * but still implement the MP extensions set
4685 * bit 30. (For instance, Cortex-R5).
4687 if (cpu
->mp_is_up
) {
4688 mpidr
|= (1u << 30);
4694 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4696 unsigned int cur_el
= arm_current_el(env
);
4698 if (arm_is_el2_enabled(env
) && cur_el
== 1) {
4699 return env
->cp15
.vmpidr_el2
;
4701 return mpidr_read_val(env
);
4704 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
4706 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
4707 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
4708 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4709 .fgt
= FGT_AMAIR_EL1
,
4710 .nv2_redirect_offset
= 0x148 | NV2_REDIR_NV1
,
4711 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4712 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4713 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
4714 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4715 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4716 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
4717 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
4718 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
4719 offsetof(CPUARMState
, cp15
.par_ns
)} },
4720 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
4721 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4722 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4723 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
4724 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
4725 .writefn
= vmsa_ttbr_write
, .raw_writefn
= raw_write
},
4726 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
4727 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
4728 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4729 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
4730 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
4731 .writefn
= vmsa_ttbr_write
, .raw_writefn
= raw_write
},
4734 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4736 return vfp_get_fpcr(env
);
4739 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4742 vfp_set_fpcr(env
, value
);
4745 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4747 return vfp_get_fpsr(env
);
4750 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4753 vfp_set_fpsr(env
, value
);
4756 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4759 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
4760 return CP_ACCESS_TRAP
;
4762 return CP_ACCESS_OK
;
4765 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4768 env
->daif
= value
& PSTATE_DAIF
;
4771 static uint64_t aa64_pan_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4773 return env
->pstate
& PSTATE_PAN
;
4776 static void aa64_pan_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4779 env
->pstate
= (env
->pstate
& ~PSTATE_PAN
) | (value
& PSTATE_PAN
);
4782 static const ARMCPRegInfo pan_reginfo
= {
4783 .name
= "PAN", .state
= ARM_CP_STATE_AA64
,
4784 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 3,
4785 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4786 .readfn
= aa64_pan_read
, .writefn
= aa64_pan_write
4789 static uint64_t aa64_uao_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4791 return env
->pstate
& PSTATE_UAO
;
4794 static void aa64_uao_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4797 env
->pstate
= (env
->pstate
& ~PSTATE_UAO
) | (value
& PSTATE_UAO
);
4800 static const ARMCPRegInfo uao_reginfo
= {
4801 .name
= "UAO", .state
= ARM_CP_STATE_AA64
,
4802 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 4,
4803 .type
= ARM_CP_NO_RAW
, .access
= PL1_RW
,
4804 .readfn
= aa64_uao_read
, .writefn
= aa64_uao_write
4807 static uint64_t aa64_dit_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4809 return env
->pstate
& PSTATE_DIT
;
4812 static void aa64_dit_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4815 env
->pstate
= (env
->pstate
& ~PSTATE_DIT
) | (value
& PSTATE_DIT
);
4818 static const ARMCPRegInfo dit_reginfo
= {
4819 .name
= "DIT", .state
= ARM_CP_STATE_AA64
,
4820 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 5,
4821 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4822 .readfn
= aa64_dit_read
, .writefn
= aa64_dit_write
4825 static uint64_t aa64_ssbs_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4827 return env
->pstate
& PSTATE_SSBS
;
4830 static void aa64_ssbs_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4833 env
->pstate
= (env
->pstate
& ~PSTATE_SSBS
) | (value
& PSTATE_SSBS
);
4836 static const ARMCPRegInfo ssbs_reginfo
= {
4837 .name
= "SSBS", .state
= ARM_CP_STATE_AA64
,
4838 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 6,
4839 .type
= ARM_CP_NO_RAW
, .access
= PL0_RW
,
4840 .readfn
= aa64_ssbs_read
, .writefn
= aa64_ssbs_write
4843 static CPAccessResult
aa64_cacheop_poc_access(CPUARMState
*env
,
4844 const ARMCPRegInfo
*ri
,
4847 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4848 switch (arm_current_el(env
)) {
4850 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4851 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4852 return CP_ACCESS_TRAP
;
4856 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4857 if (arm_hcr_el2_eff(env
) & HCR_TPCP
) {
4858 return CP_ACCESS_TRAP_EL2
;
4862 return CP_ACCESS_OK
;
4865 static CPAccessResult
do_cacheop_pou_access(CPUARMState
*env
, uint64_t hcrflags
)
4867 /* Cache invalidate/clean to Point of Unification... */
4868 switch (arm_current_el(env
)) {
4870 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4871 if (!(arm_sctlr(env
, 0) & SCTLR_UCI
)) {
4872 return CP_ACCESS_TRAP
;
4876 /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */
4877 if (arm_hcr_el2_eff(env
) & hcrflags
) {
4878 return CP_ACCESS_TRAP_EL2
;
4882 return CP_ACCESS_OK
;
4885 static CPAccessResult
access_ticab(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4888 return do_cacheop_pou_access(env
, HCR_TICAB
| HCR_TPU
);
4891 static CPAccessResult
access_tocu(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4894 return do_cacheop_pou_access(env
, HCR_TOCU
| HCR_TPU
);
4898 * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4899 * Page D4-1736 (DDI0487A.b)
4902 static int vae1_tlbmask(CPUARMState
*env
)
4904 uint64_t hcr
= arm_hcr_el2_eff(env
);
4907 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4908 mask
= ARMMMUIdxBit_E20_2
|
4909 ARMMMUIdxBit_E20_2_PAN
|
4912 mask
= ARMMMUIdxBit_E10_1
|
4913 ARMMMUIdxBit_E10_1_PAN
|
4919 static int vae2_tlbmask(CPUARMState
*env
)
4921 uint64_t hcr
= arm_hcr_el2_eff(env
);
4924 if (hcr
& HCR_E2H
) {
4925 mask
= ARMMMUIdxBit_E20_2
|
4926 ARMMMUIdxBit_E20_2_PAN
|
4929 mask
= ARMMMUIdxBit_E2
;
4934 /* Return 56 if TBI is enabled, 64 otherwise. */
4935 static int tlbbits_for_regime(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
4938 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
4939 int tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
4940 int select
= extract64(addr
, 55, 1);
4942 return (tbi
>> select
) & 1 ? 56 : 64;
4945 static int vae1_tlbbits(CPUARMState
*env
, uint64_t addr
)
4947 uint64_t hcr
= arm_hcr_el2_eff(env
);
4950 /* Only the regime of the mmu_idx below is significant. */
4951 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
4952 mmu_idx
= ARMMMUIdx_E20_0
;
4954 mmu_idx
= ARMMMUIdx_E10_0
;
4957 return tlbbits_for_regime(env
, mmu_idx
, addr
);
4960 static int vae2_tlbbits(CPUARMState
*env
, uint64_t addr
)
4962 uint64_t hcr
= arm_hcr_el2_eff(env
);
4966 * Only the regime of the mmu_idx below is significant.
4967 * Regime EL2&0 has two ranges with separate TBI configuration, while EL2
4970 if (hcr
& HCR_E2H
) {
4971 mmu_idx
= ARMMMUIdx_E20_2
;
4973 mmu_idx
= ARMMMUIdx_E2
;
4976 return tlbbits_for_regime(env
, mmu_idx
, addr
);
4979 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4982 CPUState
*cs
= env_cpu(env
);
4983 int mask
= vae1_tlbmask(env
);
4985 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4988 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4991 CPUState
*cs
= env_cpu(env
);
4992 int mask
= vae1_tlbmask(env
);
4994 if (tlb_force_broadcast(env
)) {
4995 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
4997 tlb_flush_by_mmuidx(cs
, mask
);
5001 static int e2_tlbmask(CPUARMState
*env
)
5003 return (ARMMMUIdxBit_E20_0
|
5004 ARMMMUIdxBit_E20_2
|
5005 ARMMMUIdxBit_E20_2_PAN
|
5009 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5012 CPUState
*cs
= env_cpu(env
);
5013 int mask
= alle1_tlbmask(env
);
5015 tlb_flush_by_mmuidx(cs
, mask
);
5018 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5021 CPUState
*cs
= env_cpu(env
);
5022 int mask
= e2_tlbmask(env
);
5024 tlb_flush_by_mmuidx(cs
, mask
);
5027 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5030 ARMCPU
*cpu
= env_archcpu(env
);
5031 CPUState
*cs
= CPU(cpu
);
5033 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_E3
);
5036 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5039 CPUState
*cs
= env_cpu(env
);
5040 int mask
= alle1_tlbmask(env
);
5042 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
5045 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5048 CPUState
*cs
= env_cpu(env
);
5049 int mask
= e2_tlbmask(env
);
5051 tlb_flush_by_mmuidx_all_cpus_synced(cs
, mask
);
5054 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5057 CPUState
*cs
= env_cpu(env
);
5059 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_E3
);
5062 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5066 * Invalidate by VA, EL2
5067 * Currently handles both VAE2 and VALE2, since we don't support
5068 * flush-last-level-only.
5070 CPUState
*cs
= env_cpu(env
);
5071 int mask
= vae2_tlbmask(env
);
5072 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5073 int bits
= vae2_tlbbits(env
, pageaddr
);
5075 tlb_flush_page_bits_by_mmuidx(cs
, pageaddr
, mask
, bits
);
5078 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5082 * Invalidate by VA, EL3
5083 * Currently handles both VAE3 and VALE3, since we don't support
5084 * flush-last-level-only.
5086 ARMCPU
*cpu
= env_archcpu(env
);
5087 CPUState
*cs
= CPU(cpu
);
5088 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5090 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_E3
);
5093 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5096 CPUState
*cs
= env_cpu(env
);
5097 int mask
= vae1_tlbmask(env
);
5098 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5099 int bits
= vae1_tlbbits(env
, pageaddr
);
5101 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
5104 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5108 * Invalidate by VA, EL1&0 (AArch64 version).
5109 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
5110 * since we don't support flush-for-specific-ASID-only or
5111 * flush-last-level-only.
5113 CPUState
*cs
= env_cpu(env
);
5114 int mask
= vae1_tlbmask(env
);
5115 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5116 int bits
= vae1_tlbbits(env
, pageaddr
);
5118 if (tlb_force_broadcast(env
)) {
5119 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
5121 tlb_flush_page_bits_by_mmuidx(cs
, pageaddr
, mask
, bits
);
5125 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5128 CPUState
*cs
= env_cpu(env
);
5129 int mask
= vae2_tlbmask(env
);
5130 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5131 int bits
= vae2_tlbbits(env
, pageaddr
);
5133 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
, bits
);
5136 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5139 CPUState
*cs
= env_cpu(env
);
5140 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5141 int bits
= tlbbits_for_regime(env
, ARMMMUIdx_E3
, pageaddr
);
5143 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
5144 ARMMMUIdxBit_E3
, bits
);
5147 static int ipas2e1_tlbmask(CPUARMState
*env
, int64_t value
)
5150 * The MSB of value is the NS field, which only applies if SEL2
5151 * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
5154 && cpu_isar_feature(aa64_sel2
, env_archcpu(env
))
5155 && arm_is_secure_below_el3(env
)
5156 ? ARMMMUIdxBit_Stage2_S
5157 : ARMMMUIdxBit_Stage2
);
5160 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5163 CPUState
*cs
= env_cpu(env
);
5164 int mask
= ipas2e1_tlbmask(env
, value
);
5165 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5167 if (tlb_force_broadcast(env
)) {
5168 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
5170 tlb_flush_page_by_mmuidx(cs
, pageaddr
, mask
);
5174 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5177 CPUState
*cs
= env_cpu(env
);
5178 int mask
= ipas2e1_tlbmask(env
, value
);
5179 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
5181 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
, mask
);
5184 #ifdef TARGET_AARCH64
5190 static ARMGranuleSize
tlbi_range_tg_to_gran_size(int tg
)
5193 * Note that the TLBI range TG field encoding differs from both
5194 * TG0 and TG1 encodings.
5208 static TLBIRange
tlbi_aa64_get_range(CPUARMState
*env
, ARMMMUIdx mmuidx
,
5211 unsigned int page_size_granule
, page_shift
, num
, scale
, exponent
;
5212 /* Extract one bit to represent the va selector in use. */
5213 uint64_t select
= sextract64(value
, 36, 1);
5214 ARMVAParameters param
= aa64_va_parameters(env
, select
, mmuidx
, true, false);
5215 TLBIRange ret
= { };
5216 ARMGranuleSize gran
;
5218 page_size_granule
= extract64(value
, 46, 2);
5219 gran
= tlbi_range_tg_to_gran_size(page_size_granule
);
5221 /* The granule encoded in value must match the granule in use. */
5222 if (gran
!= param
.gran
) {
5223 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid tlbi page size granule %d\n",
5228 page_shift
= arm_granule_bits(gran
);
5229 num
= extract64(value
, 39, 5);
5230 scale
= extract64(value
, 44, 2);
5231 exponent
= (5 * scale
) + 1;
5233 ret
.length
= (num
+ 1) << (exponent
+ page_shift
);
5236 ret
.base
= sextract64(value
, 0, 37);
5238 ret
.base
= extract64(value
, 0, 37);
5242 * With DS=1, BaseADDR is always shifted 16 so that it is able
5243 * to address all 52 va bits. The input address is perforce
5244 * aligned on a 64k boundary regardless of translation granule.
5248 ret
.base
<<= page_shift
;
5253 static void do_rvae_write(CPUARMState
*env
, uint64_t value
,
5254 int idxmap
, bool synced
)
5256 ARMMMUIdx one_idx
= ARM_MMU_IDX_A
| ctz32(idxmap
);
5260 range
= tlbi_aa64_get_range(env
, one_idx
, value
);
5261 bits
= tlbbits_for_regime(env
, one_idx
, range
.base
);
5264 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env
),
5270 tlb_flush_range_by_mmuidx(env_cpu(env
), range
.base
,
5271 range
.length
, idxmap
, bits
);
5275 static void tlbi_aa64_rvae1_write(CPUARMState
*env
,
5276 const ARMCPRegInfo
*ri
,
5280 * Invalidate by VA range, EL1&0.
5281 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
5282 * since we don't support flush-for-specific-ASID-only or
5283 * flush-last-level-only.
5286 do_rvae_write(env
, value
, vae1_tlbmask(env
),
5287 tlb_force_broadcast(env
));
5290 static void tlbi_aa64_rvae1is_write(CPUARMState
*env
,
5291 const ARMCPRegInfo
*ri
,
5295 * Invalidate by VA range, Inner/Outer Shareable EL1&0.
5296 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
5297 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
5298 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
5299 * shareable specific flushes.
5302 do_rvae_write(env
, value
, vae1_tlbmask(env
), true);
5305 static void tlbi_aa64_rvae2_write(CPUARMState
*env
,
5306 const ARMCPRegInfo
*ri
,
5310 * Invalidate by VA range, EL2.
5311 * Currently handles all of RVAE2 and RVALE2,
5312 * since we don't support flush-for-specific-ASID-only or
5313 * flush-last-level-only.
5316 do_rvae_write(env
, value
, vae2_tlbmask(env
),
5317 tlb_force_broadcast(env
));
5322 static void tlbi_aa64_rvae2is_write(CPUARMState
*env
,
5323 const ARMCPRegInfo
*ri
,
5327 * Invalidate by VA range, Inner/Outer Shareable, EL2.
5328 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
5329 * since we don't support flush-for-specific-ASID-only,
5330 * flush-last-level-only or inner/outer shareable specific flushes.
5333 do_rvae_write(env
, value
, vae2_tlbmask(env
), true);
5337 static void tlbi_aa64_rvae3_write(CPUARMState
*env
,
5338 const ARMCPRegInfo
*ri
,
5342 * Invalidate by VA range, EL3.
5343 * Currently handles all of RVAE3 and RVALE3,
5344 * since we don't support flush-for-specific-ASID-only or
5345 * flush-last-level-only.
5348 do_rvae_write(env
, value
, ARMMMUIdxBit_E3
, tlb_force_broadcast(env
));
5351 static void tlbi_aa64_rvae3is_write(CPUARMState
*env
,
5352 const ARMCPRegInfo
*ri
,
5356 * Invalidate by VA range, EL3, Inner/Outer Shareable.
5357 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
5358 * since we don't support flush-for-specific-ASID-only,
5359 * flush-last-level-only or inner/outer specific flushes.
5362 do_rvae_write(env
, value
, ARMMMUIdxBit_E3
, true);
5365 static void tlbi_aa64_ripas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5368 do_rvae_write(env
, value
, ipas2e1_tlbmask(env
, value
),
5369 tlb_force_broadcast(env
));
5372 static void tlbi_aa64_ripas2e1is_write(CPUARMState
*env
,
5373 const ARMCPRegInfo
*ri
,
5376 do_rvae_write(env
, value
, ipas2e1_tlbmask(env
, value
), true);
5380 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5383 int cur_el
= arm_current_el(env
);
5386 uint64_t hcr
= arm_hcr_el2_eff(env
);
5389 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
5390 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_DZE
)) {
5391 return CP_ACCESS_TRAP_EL2
;
5394 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
5395 return CP_ACCESS_TRAP
;
5397 if (hcr
& HCR_TDZ
) {
5398 return CP_ACCESS_TRAP_EL2
;
5401 } else if (hcr
& HCR_TDZ
) {
5402 return CP_ACCESS_TRAP_EL2
;
5405 return CP_ACCESS_OK
;
5408 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5410 ARMCPU
*cpu
= env_archcpu(env
);
5411 int dzp_bit
= 1 << 4;
5413 /* DZP indicates whether DC ZVA access is allowed */
5414 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
5417 return cpu
->dcz_blocksize
| dzp_bit
;
5420 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5423 if (!(env
->pstate
& PSTATE_SP
)) {
5425 * Access to SP_EL0 is undefined if it's being used as
5426 * the stack pointer.
5428 return CP_ACCESS_TRAP_UNCATEGORIZED
;
5430 return CP_ACCESS_OK
;
5433 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5435 return env
->pstate
& PSTATE_SP
;
5438 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
5440 update_spsel(env
, val
);
5443 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5446 ARMCPU
*cpu
= env_archcpu(env
);
5448 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
5449 /* M bit is RAZ/WI for PMSA with no MPU implemented */
5453 /* ??? Lots of these bits are not implemented. */
5455 if (ri
->state
== ARM_CP_STATE_AA64
&& !cpu_isar_feature(aa64_mte
, cpu
)) {
5456 if (ri
->opc1
== 6) { /* SCTLR_EL3 */
5457 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF
| SCTLR_ATA
);
5459 value
&= ~(SCTLR_ITFSB
| SCTLR_TCF0
| SCTLR_TCF
|
5460 SCTLR_ATA0
| SCTLR_ATA
);
5464 if (raw_read(env
, ri
) == value
) {
5466 * Skip the TLB flush if nothing actually changed; Linux likes
5467 * to do a lot of pointless SCTLR writes.
5472 raw_write(env
, ri
, value
);
5474 /* This may enable/disable the MMU, so do a TLB flush. */
5475 tlb_flush(CPU(cpu
));
5477 if (tcg_enabled() && ri
->type
& ARM_CP_SUPPRESS_TB_END
) {
5479 * Normally we would always end the TB on an SCTLR write; see the
5480 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
5481 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
5482 * of hflags from the translator, so do it here.
5484 arm_rebuild_hflags(env
);
5488 static void mdcr_el3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5492 * Some MDCR_EL3 bits affect whether PMU counters are running:
5493 * if we are trying to change any of those then we must
5494 * bracket this update with PMU start/finish calls.
5496 bool pmu_op
= (env
->cp15
.mdcr_el3
^ value
) & MDCR_EL3_PMU_ENABLE_BITS
;
5501 env
->cp15
.mdcr_el3
= value
;
5507 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5510 /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
5511 mdcr_el3_write(env
, ri
, value
& SDCR_VALID_MASK
);
5514 static void mdcr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5518 * Some MDCR_EL2 bits affect whether PMU counters are running:
5519 * if we are trying to change any of those then we must
5520 * bracket this update with PMU start/finish calls.
5522 bool pmu_op
= (env
->cp15
.mdcr_el2
^ value
) & MDCR_EL2_PMU_ENABLE_BITS
;
5527 env
->cp15
.mdcr_el2
= value
;
5533 static CPAccessResult
access_nv1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5536 if (arm_current_el(env
) == 1) {
5537 uint64_t hcr_nv
= arm_hcr_el2_eff(env
) & (HCR_NV
| HCR_NV1
| HCR_NV2
);
5539 if (hcr_nv
== (HCR_NV
| HCR_NV1
)) {
5540 return CP_ACCESS_TRAP_EL2
;
5543 return CP_ACCESS_OK
;
5546 #ifdef CONFIG_USER_ONLY
5548 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
5549 * code to get around W^X restrictions, where one region is writable and the
5550 * other is executable.
5552 * Since the executable region is never written to we cannot detect code
5553 * changes when running in user mode, and rely on the emulated JIT telling us
5554 * that the code has changed by executing this instruction.
5556 static void ic_ivau_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5559 uint64_t icache_line_mask
, start_address
, end_address
;
5562 cpu
= env_archcpu(env
);
5564 icache_line_mask
= (4 << extract32(cpu
->ctr
, 0, 4)) - 1;
5565 start_address
= value
& ~icache_line_mask
;
5566 end_address
= value
| icache_line_mask
;
5570 tb_invalidate_phys_range(start_address
, end_address
);
5576 static const ARMCPRegInfo v8_cp_reginfo
[] = {
5578 * Minimal set of EL0-visible registers. This will need to be expanded
5579 * significantly for system emulation of AArch64 CPUs.
5581 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
5582 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
5583 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
5584 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
5585 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
5586 .type
= ARM_CP_NO_RAW
,
5587 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
5588 .fieldoffset
= offsetof(CPUARMState
, daif
),
5589 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
5590 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
5591 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
5592 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
5593 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
5594 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
5595 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
5596 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
5597 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
5598 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
5599 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
5600 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
5601 .fgt
= FGT_DCZID_EL0
,
5602 .readfn
= aa64_dczid_read
},
5603 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
5604 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
5605 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
5606 #ifndef CONFIG_USER_ONLY
5607 /* Avoid overhead of an access check that always passes in user-mode */
5608 .accessfn
= aa64_zva_access
,
5612 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
5613 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
5614 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
5616 * Instruction cache ops. All of these except `IC IVAU` NOP because we
5617 * don't emulate caches.
5619 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
5620 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
5621 .access
= PL1_W
, .type
= ARM_CP_NOP
,
5622 .fgt
= FGT_ICIALLUIS
,
5623 .accessfn
= access_ticab
},
5624 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
5625 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5626 .access
= PL1_W
, .type
= ARM_CP_NOP
,
5628 .accessfn
= access_tocu
},
5629 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
5630 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
5633 .accessfn
= access_tocu
,
5634 #ifdef CONFIG_USER_ONLY
5635 .type
= ARM_CP_NO_RAW
,
5636 .writefn
= ic_ivau_write
5641 /* Cache ops: all NOPs since we don't emulate caches */
5642 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
5643 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5644 .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
,
5646 .type
= ARM_CP_NOP
},
5647 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
5648 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5650 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
5651 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
5652 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
5653 .access
= PL0_W
, .type
= ARM_CP_NOP
,
5655 .accessfn
= aa64_cacheop_poc_access
},
5656 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
5657 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5659 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
5660 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
5661 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
5662 .access
= PL0_W
, .type
= ARM_CP_NOP
,
5664 .accessfn
= access_tocu
},
5665 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
5666 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
5667 .access
= PL0_W
, .type
= ARM_CP_NOP
,
5669 .accessfn
= aa64_cacheop_poc_access
},
5670 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
5671 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5673 .access
= PL1_W
, .accessfn
= access_tsw
, .type
= ARM_CP_NOP
},
5674 /* TLBI operations */
5675 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
5676 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
5677 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
5678 .fgt
= FGT_TLBIVMALLE1IS
,
5679 .writefn
= tlbi_aa64_vmalle1is_write
},
5680 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
5681 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
5682 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
5683 .fgt
= FGT_TLBIVAE1IS
,
5684 .writefn
= tlbi_aa64_vae1is_write
},
5685 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
5686 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
5687 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
5688 .fgt
= FGT_TLBIASIDE1IS
,
5689 .writefn
= tlbi_aa64_vmalle1is_write
},
5690 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
5691 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
5692 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
5693 .fgt
= FGT_TLBIVAAE1IS
,
5694 .writefn
= tlbi_aa64_vae1is_write
},
5695 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
5696 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
5697 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
5698 .fgt
= FGT_TLBIVALE1IS
,
5699 .writefn
= tlbi_aa64_vae1is_write
},
5700 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
5701 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
5702 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
5703 .fgt
= FGT_TLBIVAALE1IS
,
5704 .writefn
= tlbi_aa64_vae1is_write
},
5705 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
5706 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
5707 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5708 .fgt
= FGT_TLBIVMALLE1
,
5709 .writefn
= tlbi_aa64_vmalle1_write
},
5710 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
5711 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
5712 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5713 .fgt
= FGT_TLBIVAE1
,
5714 .writefn
= tlbi_aa64_vae1_write
},
5715 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
5716 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
5717 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5718 .fgt
= FGT_TLBIASIDE1
,
5719 .writefn
= tlbi_aa64_vmalle1_write
},
5720 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
5721 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
5722 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5723 .fgt
= FGT_TLBIVAAE1
,
5724 .writefn
= tlbi_aa64_vae1_write
},
5725 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
5726 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
5727 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5728 .fgt
= FGT_TLBIVALE1
,
5729 .writefn
= tlbi_aa64_vae1_write
},
5730 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
5731 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
5732 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
5733 .fgt
= FGT_TLBIVAALE1
,
5734 .writefn
= tlbi_aa64_vae1_write
},
5735 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
5736 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
5737 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5738 .writefn
= tlbi_aa64_ipas2e1is_write
},
5739 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
5740 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
5741 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5742 .writefn
= tlbi_aa64_ipas2e1is_write
},
5743 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
5744 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
5745 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5746 .writefn
= tlbi_aa64_alle1is_write
},
5747 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
5748 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
5749 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5750 .writefn
= tlbi_aa64_alle1is_write
},
5751 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
5752 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
5753 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5754 .writefn
= tlbi_aa64_ipas2e1_write
},
5755 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
5756 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
5757 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5758 .writefn
= tlbi_aa64_ipas2e1_write
},
5759 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
5760 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
5761 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5762 .writefn
= tlbi_aa64_alle1_write
},
5763 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
5764 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
5765 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
5766 .writefn
= tlbi_aa64_alle1is_write
},
5767 #ifndef CONFIG_USER_ONLY
5768 /* 64 bit address translation operations */
5769 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
5770 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
5771 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5773 .accessfn
= at_s1e01_access
, .writefn
= ats_write64
},
5774 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
5775 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
5776 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5778 .accessfn
= at_s1e01_access
, .writefn
= ats_write64
},
5779 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
5780 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
5781 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5783 .accessfn
= at_s1e01_access
, .writefn
= ats_write64
},
5784 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
5785 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
5786 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5788 .accessfn
= at_s1e01_access
, .writefn
= ats_write64
},
5789 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
5790 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
5791 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5792 .accessfn
= at_e012_access
, .writefn
= ats_write64
},
5793 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
5794 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
5795 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5796 .accessfn
= at_e012_access
, .writefn
= ats_write64
},
5797 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
5798 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
5799 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5800 .accessfn
= at_e012_access
, .writefn
= ats_write64
},
5801 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
5802 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
5803 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5804 .accessfn
= at_e012_access
, .writefn
= ats_write64
},
5805 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5806 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
5807 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
5808 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5809 .writefn
= ats_write64
},
5810 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
5811 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
5812 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
5813 .writefn
= ats_write64
},
5814 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
5815 .type
= ARM_CP_ALIAS
,
5816 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
5817 .access
= PL1_RW
, .resetvalue
= 0,
5819 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
5820 .writefn
= par_write
},
5822 /* TLB invalidate last level of translation table walk */
5823 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
5824 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlbis
,
5825 .writefn
= tlbimva_is_write
},
5826 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
5827 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlbis
,
5828 .writefn
= tlbimvaa_is_write
},
5829 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
5830 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5831 .writefn
= tlbimva_write
},
5832 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
5833 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .accessfn
= access_ttlb
,
5834 .writefn
= tlbimvaa_write
},
5835 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
5836 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5837 .writefn
= tlbimva_hyp_write
},
5838 { .name
= "TLBIMVALHIS",
5839 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
5840 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5841 .writefn
= tlbimva_hyp_is_write
},
5842 { .name
= "TLBIIPAS2",
5843 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
5844 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5845 .writefn
= tlbiipas2_hyp_write
},
5846 { .name
= "TLBIIPAS2IS",
5847 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
5848 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5849 .writefn
= tlbiipas2is_hyp_write
},
5850 { .name
= "TLBIIPAS2L",
5851 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
5852 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5853 .writefn
= tlbiipas2_hyp_write
},
5854 { .name
= "TLBIIPAS2LIS",
5855 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
5856 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
5857 .writefn
= tlbiipas2is_hyp_write
},
5858 /* 32 bit cache operations */
5859 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
5860 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_ticab
},
5861 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
5862 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5863 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
5864 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tocu
},
5865 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
5866 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tocu
},
5867 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
5868 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5869 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
5870 .type
= ARM_CP_NOP
, .access
= PL1_W
},
5871 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
5872 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5873 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
5874 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5875 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
5876 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5877 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
5878 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5879 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
5880 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tocu
},
5881 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
5882 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= aa64_cacheop_poc_access
},
5883 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
5884 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
5885 /* MMU Domain access control / MPU write buffer control */
5886 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
5887 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
, .resetvalue
= 0,
5888 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5889 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
5890 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
5891 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
5892 .type
= ARM_CP_ALIAS
,
5893 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
5894 .access
= PL1_RW
, .accessfn
= access_nv1
,
5895 .nv2_redirect_offset
= 0x230 | NV2_REDIR_NV1
,
5896 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
5897 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
5898 .type
= ARM_CP_ALIAS
,
5899 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
5900 .access
= PL1_RW
, .accessfn
= access_nv1
,
5901 .nv2_redirect_offset
= 0x160 | NV2_REDIR_NV1
,
5902 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
5904 * We rely on the access checks not allowing the guest to write to the
5905 * state field when SPSel indicates that it's being used as the stack
5908 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
5909 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
5910 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
5911 .type
= ARM_CP_ALIAS
,
5912 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
5913 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
5914 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
5915 .nv2_redirect_offset
= 0x240,
5916 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
| ARM_CP_EL3_NO_EL2_KEEP
,
5917 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
5918 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
5919 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
5920 .type
= ARM_CP_NO_RAW
,
5921 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
5922 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
5923 .type
= ARM_CP_ALIAS
,
5924 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
5926 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
5927 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
5928 .type
= ARM_CP_ALIAS
,
5929 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
5931 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
5932 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
5933 .type
= ARM_CP_ALIAS
,
5934 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
5936 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
5937 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
5938 .type
= ARM_CP_ALIAS
,
5939 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
5941 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
5942 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
5944 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
5947 .writefn
= mdcr_el3_write
,
5948 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
5949 { .name
= "SDCR", .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
5950 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
5951 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
5952 .writefn
= sdcr_write
,
5953 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
5956 /* These are present only when EL1 supports AArch32 */
5957 static const ARMCPRegInfo v8_aa32_el1_reginfo
[] = {
5958 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
5959 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
5961 .type
= ARM_CP_ALIAS
| ARM_CP_FPU
| ARM_CP_EL3_NO_EL2_KEEP
,
5962 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]) },
5963 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
5964 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
5965 .access
= PL2_RW
, .resetvalue
= 0, .type
= ARM_CP_EL3_NO_EL2_KEEP
,
5966 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
5967 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
5968 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
5969 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
5970 .access
= PL2_RW
, .resetvalue
= 0, .type
= ARM_CP_EL3_NO_EL2_KEEP
,
5971 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
5974 static void do_hcr_write(CPUARMState
*env
, uint64_t value
, uint64_t valid_mask
)
5976 ARMCPU
*cpu
= env_archcpu(env
);
5978 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5979 valid_mask
|= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5981 valid_mask
|= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5984 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5985 valid_mask
&= ~HCR_HCD
;
5986 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
5988 * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5989 * However, if we're using the SMC PSCI conduit then QEMU is
5990 * effectively acting like EL3 firmware and so the guest at
5991 * EL2 should retain the ability to prevent EL1 from being
5992 * able to make SMC calls into the ersatz firmware, so in
5993 * that case HCR.TSC should be read/write.
5995 valid_mask
&= ~HCR_TSC
;
5998 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5999 if (cpu_isar_feature(aa64_vh
, cpu
)) {
6000 valid_mask
|= HCR_E2H
;
6002 if (cpu_isar_feature(aa64_ras
, cpu
)) {
6003 valid_mask
|= HCR_TERR
| HCR_TEA
;
6005 if (cpu_isar_feature(aa64_lor
, cpu
)) {
6006 valid_mask
|= HCR_TLOR
;
6008 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
6009 valid_mask
|= HCR_API
| HCR_APK
;
6011 if (cpu_isar_feature(aa64_mte
, cpu
)) {
6012 valid_mask
|= HCR_ATA
| HCR_DCT
| HCR_TID5
;
6014 if (cpu_isar_feature(aa64_scxtnum
, cpu
)) {
6015 valid_mask
|= HCR_ENSCXT
;
6017 if (cpu_isar_feature(aa64_fwb
, cpu
)) {
6018 valid_mask
|= HCR_FWB
;
6020 if (cpu_isar_feature(aa64_rme
, cpu
)) {
6021 valid_mask
|= HCR_GPF
;
6023 if (cpu_isar_feature(aa64_nv
, cpu
)) {
6024 valid_mask
|= HCR_NV
| HCR_NV1
| HCR_AT
;
6026 if (cpu_isar_feature(aa64_nv2
, cpu
)) {
6027 valid_mask
|= HCR_NV2
;
6031 if (cpu_isar_feature(any_evt
, cpu
)) {
6032 valid_mask
|= HCR_TTLBIS
| HCR_TTLBOS
| HCR_TICAB
| HCR_TOCU
| HCR_TID4
;
6033 } else if (cpu_isar_feature(any_half_evt
, cpu
)) {
6034 valid_mask
|= HCR_TICAB
| HCR_TOCU
| HCR_TID4
;
6037 /* Clear RES0 bits. */
6038 value
&= valid_mask
;
6041 * These bits change the MMU setup:
6042 * HCR_VM enables stage 2 translation
6043 * HCR_PTW forbids certain page-table setups
6044 * HCR_DC disables stage1 and enables stage2 translation
6045 * HCR_DCT enables tagging on (disabled) stage1 translation
6046 * HCR_FWB changes the interpretation of stage2 descriptor bits
6047 * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
6049 if ((env
->cp15
.hcr_el2
^ value
) &
6050 (HCR_VM
| HCR_PTW
| HCR_DC
| HCR_DCT
| HCR_FWB
| HCR_NV
| HCR_NV1
)) {
6051 tlb_flush(CPU(cpu
));
6053 env
->cp15
.hcr_el2
= value
;
6056 * Updates to VI and VF require us to update the status of
6057 * virtual interrupts, which are the logical OR of these bits
6058 * and the state of the input lines from the GIC. (This requires
6059 * that we have the BQL, which is done by marking the
6060 * reginfo structs as ARM_CP_IO.)
6061 * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
6062 * VFNMI, it is never possible for it to be taken immediately
6063 * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
6064 * at EL0 or EL1, and HCR can only be written at EL2.
6066 g_assert(bql_locked());
6067 arm_cpu_update_virq(cpu
);
6068 arm_cpu_update_vfiq(cpu
);
6069 arm_cpu_update_vserr(cpu
);
6070 if (cpu_isar_feature(aa64_nmi
, cpu
)) {
6071 arm_cpu_update_vinmi(cpu
);
6072 arm_cpu_update_vfnmi(cpu
);
6076 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
6078 do_hcr_write(env
, value
, 0);
6081 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6084 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
6085 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
6086 do_hcr_write(env
, value
, MAKE_64BIT_MASK(0, 32));
6089 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6092 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
6093 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
6094 do_hcr_write(env
, value
, MAKE_64BIT_MASK(32, 32));
6098 * Return the effective value of HCR_EL2, at the given security state.
6099 * Bits that are not included here:
6100 * RW (read from SCR_EL3.RW as needed)
6102 uint64_t arm_hcr_el2_eff_secstate(CPUARMState
*env
, ARMSecuritySpace space
)
6104 uint64_t ret
= env
->cp15
.hcr_el2
;
6106 assert(space
!= ARMSS_Root
);
6108 if (!arm_is_el2_enabled_secstate(env
, space
)) {
6110 * "This register has no effect if EL2 is not enabled in the
6111 * current Security state". This is ARMv8.4-SecEL2 speak for
6112 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
6114 * Prior to that, the language was "In an implementation that
6115 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
6116 * as if this field is 0 for all purposes other than a direct
6117 * read or write access of HCR_EL2". With lots of enumeration
6118 * on a per-field basis. In current QEMU, this is condition
6119 * is arm_is_secure_below_el3.
6121 * Since the v8.4 language applies to the entire register, and
6122 * appears to be backward compatible, use that.
6128 * For a cpu that supports both aarch64 and aarch32, we can set bits
6129 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
6130 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
6132 if (!arm_el_is_aa64(env
, 2)) {
6133 uint64_t aa32_valid
;
6136 * These bits are up-to-date as of ARMv8.6.
6137 * For HCR, it's easiest to list just the 2 bits that are invalid.
6138 * For HCR2, list those that are valid.
6140 aa32_valid
= MAKE_64BIT_MASK(0, 32) & ~(HCR_RW
| HCR_TDZ
);
6141 aa32_valid
|= (HCR_CD
| HCR_ID
| HCR_TERR
| HCR_TEA
| HCR_MIOCNCE
|
6142 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_TTLBIS
);
6146 if (ret
& HCR_TGE
) {
6147 /* These bits are up-to-date as of ARMv8.6. */
6148 if (ret
& HCR_E2H
) {
6149 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
6150 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
6151 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
6152 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
|
6153 HCR_TID4
| HCR_TICAB
| HCR_TOCU
| HCR_ENSCXT
|
6154 HCR_TTLBIS
| HCR_TTLBOS
| HCR_TID5
);
6156 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
6158 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
6159 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
6160 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
6167 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
6169 if (arm_feature(env
, ARM_FEATURE_M
)) {
6172 return arm_hcr_el2_eff_secstate(env
, arm_security_space_below_el3(env
));
6176 * Corresponds to ARM pseudocode function ELIsInHost().
6178 bool el_is_in_host(CPUARMState
*env
, int el
)
6183 * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
6184 * Perform the simplest bit tests first, and validate EL2 afterward.
6187 return false; /* EL1 or EL3 */
6191 * Note that hcr_write() checks isar_feature_aa64_vh(),
6192 * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
6194 mask
= el
? HCR_E2H
: HCR_E2H
| HCR_TGE
;
6195 if ((env
->cp15
.hcr_el2
& mask
) != mask
) {
6199 /* TGE and/or E2H set: double check those bits are currently legal. */
6200 return arm_is_el2_enabled(env
) && arm_el_is_aa64(env
, 2);
6203 static void hcrx_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6206 ARMCPU
*cpu
= env_archcpu(env
);
6207 uint64_t valid_mask
= 0;
6209 /* FEAT_MOPS adds MSCEn and MCE2 */
6210 if (cpu_isar_feature(aa64_mops
, cpu
)) {
6211 valid_mask
|= HCRX_MSCEN
| HCRX_MCE2
;
6214 /* FEAT_NMI adds TALLINT, VINMI and VFNMI */
6215 if (cpu_isar_feature(aa64_nmi
, cpu
)) {
6216 valid_mask
|= HCRX_TALLINT
| HCRX_VINMI
| HCRX_VFNMI
;
6219 /* Clear RES0 bits. */
6220 env
->cp15
.hcrx_el2
= value
& valid_mask
;
6223 * Updates to VINMI and VFNMI require us to update the status of
6224 * virtual NMI, which are the logical OR of these bits
6225 * and the state of the input lines from the GIC. (This requires
6226 * that we have the BQL, which is done by marking the
6227 * reginfo structs as ARM_CP_IO.)
6228 * Note that if a write to HCRX pends a VINMI or VFNMI it is never
6229 * possible for it to be taken immediately, because VINMI and
6230 * VFNMI are masked unless running at EL0 or EL1, and HCRX
6231 * can only be written at EL2.
6233 if (cpu_isar_feature(aa64_nmi
, cpu
)) {
6234 g_assert(bql_locked());
6235 arm_cpu_update_vinmi(cpu
);
6236 arm_cpu_update_vfnmi(cpu
);
6240 static CPAccessResult
access_hxen(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6243 if (arm_current_el(env
) == 2
6244 && arm_feature(env
, ARM_FEATURE_EL3
)
6245 && !(env
->cp15
.scr_el3
& SCR_HXEN
)) {
6246 return CP_ACCESS_TRAP_EL3
;
6248 return CP_ACCESS_OK
;
6251 static const ARMCPRegInfo hcrx_el2_reginfo
= {
6252 .name
= "HCRX_EL2", .state
= ARM_CP_STATE_AA64
,
6254 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 2,
6255 .access
= PL2_RW
, .writefn
= hcrx_write
, .accessfn
= access_hxen
,
6256 .nv2_redirect_offset
= 0xa0,
6257 .fieldoffset
= offsetof(CPUARMState
, cp15
.hcrx_el2
),
6260 /* Return the effective value of HCRX_EL2. */
6261 uint64_t arm_hcrx_el2_eff(CPUARMState
*env
)
6264 * The bits in this register behave as 0 for all purposes other than
6265 * direct reads of the register if SCR_EL3.HXEn is 0.
6266 * If EL2 is not enabled in the current security state, then the
6267 * bit may behave as if 0, or as if 1, depending on the bit.
6268 * For the moment, we treat the EL2-disabled case as taking
6269 * priority over the HXEn-disabled case. This is true for the only
6270 * bit for a feature which we implement where the answer is different
6271 * for the two cases (MSCEn for FEAT_MOPS).
6272 * This may need to be revisited for future bits.
6274 if (!arm_is_el2_enabled(env
)) {
6276 if (cpu_isar_feature(aa64_mops
, env_archcpu(env
))) {
6277 /* MSCEn behaves as 1 if EL2 is not enabled */
6282 if (arm_feature(env
, ARM_FEATURE_EL3
) && !(env
->cp15
.scr_el3
& SCR_HXEN
)) {
6285 return env
->cp15
.hcrx_el2
;
6288 static void cptr_el2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6292 * For A-profile AArch32 EL3, if NSACR.CP10
6293 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6295 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
6296 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
6297 uint64_t mask
= R_HCPTR_TCP11_MASK
| R_HCPTR_TCP10_MASK
;
6298 value
= (value
& ~mask
) | (env
->cp15
.cptr_el
[2] & mask
);
6300 env
->cp15
.cptr_el
[2] = value
;
6303 static uint64_t cptr_el2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6306 * For A-profile AArch32 EL3, if NSACR.CP10
6307 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6309 uint64_t value
= env
->cp15
.cptr_el
[2];
6311 if (arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
6312 !arm_is_secure(env
) && !extract32(env
->cp15
.nsacr
, 10, 1)) {
6313 value
|= R_HCPTR_TCP11_MASK
| R_HCPTR_TCP10_MASK
;
6318 static const ARMCPRegInfo el2_cp_reginfo
[] = {
6319 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
6321 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
6322 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
6323 .nv2_redirect_offset
= 0x78,
6324 .writefn
= hcr_write
, .raw_writefn
= raw_write
},
6325 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
6326 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
6327 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
6328 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
6329 .writefn
= hcr_writelow
},
6330 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
6331 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
6332 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6333 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
6334 .type
= ARM_CP_ALIAS
| ARM_CP_NV2_REDIRECT
,
6335 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
6337 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
6338 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
6339 .type
= ARM_CP_NV2_REDIRECT
,
6340 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
6341 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
6342 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
6343 .type
= ARM_CP_NV2_REDIRECT
,
6344 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
6345 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
6346 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
6347 .type
= ARM_CP_ALIAS
,
6348 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
6350 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
6351 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
6352 .type
= ARM_CP_ALIAS
| ARM_CP_NV2_REDIRECT
,
6353 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
6355 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
6356 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
6357 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
6358 .access
= PL2_RW
, .writefn
= vbar_write
,
6359 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
6361 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
6362 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
6363 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
6364 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
6365 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
6366 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
6367 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
6368 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]),
6369 .readfn
= cptr_el2_read
, .writefn
= cptr_el2_write
},
6370 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
6371 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
6372 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
6374 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
6375 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
6376 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
6377 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
6378 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
6379 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
6380 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6382 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
6383 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
6384 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
6385 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6387 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
6388 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
6389 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6391 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
6392 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
6393 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6395 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
6396 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
6397 .access
= PL2_RW
, .writefn
= vmsa_tcr_el12_write
,
6398 .raw_writefn
= raw_write
,
6399 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
6400 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
6401 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
6402 .type
= ARM_CP_ALIAS
,
6403 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6404 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vtcr_el2
) },
6405 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
6406 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
6408 .nv2_redirect_offset
= 0x40,
6409 /* no .writefn needed as this can't cause an ASID change */
6410 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
6411 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
6412 .cp
= 15, .opc1
= 6, .crm
= 2,
6413 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
6414 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6415 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
6416 .writefn
= vttbr_write
, .raw_writefn
= raw_write
},
6417 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
6418 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
6419 .access
= PL2_RW
, .writefn
= vttbr_write
, .raw_writefn
= raw_write
,
6420 .nv2_redirect_offset
= 0x20,
6421 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
6422 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
6423 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
6424 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
6425 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
6426 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6427 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
6428 .access
= PL2_RW
, .resetvalue
= 0,
6429 .nv2_redirect_offset
= 0x90,
6430 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
6431 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
6432 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
6433 .access
= PL2_RW
, .resetvalue
= 0,
6434 .writefn
= vmsa_tcr_ttbr_el2_write
, .raw_writefn
= raw_write
,
6435 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
6436 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
6437 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
6438 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
6439 { .name
= "TLBIALLNSNH",
6440 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
6441 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
6442 .writefn
= tlbiall_nsnh_write
},
6443 { .name
= "TLBIALLNSNHIS",
6444 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
6445 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
6446 .writefn
= tlbiall_nsnh_is_write
},
6447 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
6448 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
6449 .writefn
= tlbiall_hyp_write
},
6450 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
6451 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
6452 .writefn
= tlbiall_hyp_is_write
},
6453 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
6454 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
6455 .writefn
= tlbimva_hyp_write
},
6456 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
6457 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
6458 .writefn
= tlbimva_hyp_is_write
},
6459 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
6460 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
6461 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6462 .writefn
= tlbi_aa64_alle2_write
},
6463 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
6464 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
6465 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6466 .writefn
= tlbi_aa64_vae2_write
},
6467 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
6468 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
6469 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6470 .writefn
= tlbi_aa64_vae2_write
},
6471 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
6472 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
6473 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6474 .writefn
= tlbi_aa64_alle2is_write
},
6475 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
6476 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
6477 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6478 .writefn
= tlbi_aa64_vae2is_write
},
6479 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
6480 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
6481 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
6482 .writefn
= tlbi_aa64_vae2is_write
},
6483 #ifndef CONFIG_USER_ONLY
6485 * Unlike the other EL2-related AT operations, these must
6486 * UNDEF from EL3 if EL2 is not implemented, which is why we
6487 * define them here rather than with the rest of the AT ops.
6489 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
6490 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
6491 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
6492 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
| ARM_CP_EL3_NO_EL2_UNDEF
,
6493 .writefn
= ats_write64
},
6494 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
6495 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
6496 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
6497 .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
| ARM_CP_EL3_NO_EL2_UNDEF
,
6498 .writefn
= ats_write64
},
6500 * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
6501 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
6502 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
6503 * to behave as if SCR.NS was 1.
6505 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
6507 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
6508 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
6510 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
6511 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
6512 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
6514 * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
6515 * reset values as IMPDEF. We choose to reset to 3 to comply with
6516 * both ARMv7 and ARMv8.
6518 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 3,
6519 .writefn
= gt_cnthctl_write
, .raw_writefn
= raw_write
,
6520 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
6521 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
6522 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
6523 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
6524 .writefn
= gt_cntvoff_write
,
6525 .nv2_redirect_offset
= 0x60,
6526 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
6527 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
6528 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
6529 .writefn
= gt_cntvoff_write
,
6530 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
6531 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
6532 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
6533 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
6534 .type
= ARM_CP_IO
, .access
= PL2_RW
,
6535 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
6536 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
6537 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
6538 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
6539 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
6540 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
6541 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
6542 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
6543 .resetfn
= gt_hyp_timer_reset
,
6544 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
6545 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
6547 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
6549 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
6551 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
6553 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
6554 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
6555 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6556 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
6557 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
6558 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
6560 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
6561 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
6562 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
6564 .nv2_redirect_offset
= 0x80,
6565 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
6568 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
6569 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
6570 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
6571 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
6573 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
6574 .writefn
= hcr_writehigh
},
6577 static CPAccessResult
sel2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6580 if (arm_current_el(env
) == 3 || arm_is_secure_below_el3(env
)) {
6581 return CP_ACCESS_OK
;
6583 return CP_ACCESS_TRAP_UNCATEGORIZED
;
6586 static const ARMCPRegInfo el2_sec_cp_reginfo
[] = {
6587 { .name
= "VSTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
6588 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 0,
6589 .access
= PL2_RW
, .accessfn
= sel2_access
,
6590 .nv2_redirect_offset
= 0x30,
6591 .fieldoffset
= offsetof(CPUARMState
, cp15
.vsttbr_el2
) },
6592 { .name
= "VSTCR_EL2", .state
= ARM_CP_STATE_AA64
,
6593 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 6, .opc2
= 2,
6594 .access
= PL2_RW
, .accessfn
= sel2_access
,
6595 .nv2_redirect_offset
= 0x48,
6596 .fieldoffset
= offsetof(CPUARMState
, cp15
.vstcr_el2
) },
6599 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6603 * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
6604 * At Secure EL1 it traps to EL3 or EL2.
6606 if (arm_current_el(env
) == 3) {
6607 return CP_ACCESS_OK
;
6609 if (arm_is_secure_below_el3(env
)) {
6610 if (env
->cp15
.scr_el3
& SCR_EEL2
) {
6611 return CP_ACCESS_TRAP_EL2
;
6613 return CP_ACCESS_TRAP_EL3
;
6615 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
6617 return CP_ACCESS_OK
;
6619 return CP_ACCESS_TRAP_UNCATEGORIZED
;
6622 static const ARMCPRegInfo el3_cp_reginfo
[] = {
6623 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
6624 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
6625 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
6626 .resetfn
= scr_reset
, .writefn
= scr_write
, .raw_writefn
= raw_write
},
6627 { .name
= "SCR", .type
= ARM_CP_ALIAS
| ARM_CP_NEWEL
,
6628 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
6629 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
6630 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
6631 .writefn
= scr_write
, .raw_writefn
= raw_write
},
6632 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
6633 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
6634 .access
= PL3_RW
, .resetvalue
= 0,
6635 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
6637 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
6638 .access
= PL3_RW
, .resetvalue
= 0,
6639 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
6640 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
6641 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
6642 .writefn
= vbar_write
, .resetvalue
= 0,
6643 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
6644 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
6645 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
6646 .access
= PL3_RW
, .resetvalue
= 0,
6647 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
6648 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
6649 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
6651 /* no .writefn needed as this can't cause an ASID change */
6653 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
6654 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
6655 .type
= ARM_CP_ALIAS
,
6656 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
6658 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
6659 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
6660 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
6661 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
6662 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
6663 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
6664 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
6665 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
6666 .type
= ARM_CP_ALIAS
,
6667 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
6669 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
6670 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6671 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
6672 .access
= PL3_RW
, .writefn
= vbar_write
,
6673 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
6675 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
6676 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
6677 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
6678 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
6679 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
6680 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
6681 .access
= PL3_RW
, .resetvalue
= 0,
6682 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
6683 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
6684 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
6685 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6687 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
6688 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
6689 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6691 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
6692 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
6693 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6695 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
6696 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
6697 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6698 .writefn
= tlbi_aa64_alle3is_write
},
6699 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
6700 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
6701 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6702 .writefn
= tlbi_aa64_vae3is_write
},
6703 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
6704 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
6705 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6706 .writefn
= tlbi_aa64_vae3is_write
},
6707 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
6708 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
6709 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6710 .writefn
= tlbi_aa64_alle3_write
},
6711 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
6712 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
6713 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6714 .writefn
= tlbi_aa64_vae3_write
},
6715 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
6716 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
6717 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
6718 .writefn
= tlbi_aa64_vae3_write
},
6721 #ifndef CONFIG_USER_ONLY
6723 static CPAccessResult
e2h_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6726 if (arm_current_el(env
) == 1) {
6727 /* This must be a FEAT_NV access */
6728 return CP_ACCESS_OK
;
6730 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
6731 return CP_ACCESS_TRAP_UNCATEGORIZED
;
6733 return CP_ACCESS_OK
;
6736 static CPAccessResult
access_el1nvpct(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6739 if (arm_current_el(env
) == 1) {
6740 /* This must be a FEAT_NV access with NVx == 101 */
6741 if (FIELD_EX64(env
->cp15
.cnthctl_el2
, CNTHCTL
, EL1NVPCT
)) {
6742 return CP_ACCESS_TRAP_EL2
;
6745 return e2h_access(env
, ri
, isread
);
6748 static CPAccessResult
access_el1nvvct(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6751 if (arm_current_el(env
) == 1) {
6752 /* This must be a FEAT_NV access with NVx == 101 */
6753 if (FIELD_EX64(env
->cp15
.cnthctl_el2
, CNTHCTL
, EL1NVVCT
)) {
6754 return CP_ACCESS_TRAP_EL2
;
6757 return e2h_access(env
, ri
, isread
);
6760 /* Test if system register redirection is to occur in the current state. */
6761 static bool redirect_for_e2h(CPUARMState
*env
)
6763 return arm_current_el(env
) == 2 && (arm_hcr_el2_eff(env
) & HCR_E2H
);
6766 static uint64_t el2_e2h_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6770 if (redirect_for_e2h(env
)) {
6771 /* Switch to the saved EL2 version of the register. */
6773 readfn
= ri
->readfn
;
6775 readfn
= ri
->orig_readfn
;
6777 if (readfn
== NULL
) {
6780 return readfn(env
, ri
);
6783 static void el2_e2h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6788 if (redirect_for_e2h(env
)) {
6789 /* Switch to the saved EL2 version of the register. */
6791 writefn
= ri
->writefn
;
6793 writefn
= ri
->orig_writefn
;
6795 if (writefn
== NULL
) {
6796 writefn
= raw_write
;
6798 writefn(env
, ri
, value
);
6801 static uint64_t el2_e2h_e12_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6803 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6804 return ri
->orig_readfn(env
, ri
->opaque
);
6807 static void el2_e2h_e12_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6810 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6811 return ri
->orig_writefn(env
, ri
->opaque
, value
);
6814 static CPAccessResult
el2_e2h_e12_access(CPUARMState
*env
,
6815 const ARMCPRegInfo
*ri
,
6818 if (arm_current_el(env
) == 1) {
6820 * This must be a FEAT_NV access (will either trap or redirect
6821 * to memory). None of the registers with _EL12 aliases want to
6822 * apply their trap controls for this kind of access, so don't
6823 * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
6825 return CP_ACCESS_OK
;
6827 /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
6828 if (!(arm_hcr_el2_eff(env
) & HCR_E2H
)) {
6829 return CP_ACCESS_TRAP_UNCATEGORIZED
;
6831 if (ri
->orig_accessfn
) {
6832 return ri
->orig_accessfn(env
, ri
->opaque
, isread
);
6834 return CP_ACCESS_OK
;
6837 static void define_arm_vh_e2h_redirects_aliases(ARMCPU
*cpu
)
6840 uint32_t src_key
, dst_key
, new_key
;
6841 const char *src_name
, *dst_name
, *new_name
;
6842 bool (*feature
)(const ARMISARegisters
*id
);
6845 #define K(op0, op1, crn, crm, op2) \
6846 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
6848 static const struct E2HAlias aliases
[] = {
6849 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
6850 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
6851 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
6852 "CPACR", "CPTR_EL2", "CPACR_EL12" },
6853 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
6854 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
6855 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
6856 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
6857 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
6858 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
6859 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
6860 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
6861 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
6862 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
6863 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
6864 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
6865 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
6866 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
6867 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
6868 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
6869 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
6870 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
6871 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
6872 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
6873 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
6874 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
6875 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
6876 "VBAR", "VBAR_EL2", "VBAR_EL12" },
6877 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
6878 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
6879 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
6880 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
6883 * Note that redirection of ZCR is mentioned in the description
6884 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
6885 * not in the summary table.
6887 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
6888 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve
},
6889 { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
6890 "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme
},
6892 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
6893 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte
},
6895 { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
6896 "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
6897 isar_feature_aa64_scxtnum
},
6899 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6900 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6906 for (i
= 0; i
< ARRAY_SIZE(aliases
); i
++) {
6907 const struct E2HAlias
*a
= &aliases
[i
];
6908 ARMCPRegInfo
*src_reg
, *dst_reg
, *new_reg
;
6911 if (a
->feature
&& !a
->feature(&cpu
->isar
)) {
6915 src_reg
= g_hash_table_lookup(cpu
->cp_regs
,
6916 (gpointer
)(uintptr_t)a
->src_key
);
6917 dst_reg
= g_hash_table_lookup(cpu
->cp_regs
,
6918 (gpointer
)(uintptr_t)a
->dst_key
);
6919 g_assert(src_reg
!= NULL
);
6920 g_assert(dst_reg
!= NULL
);
6922 /* Cross-compare names to detect typos in the keys. */
6923 g_assert(strcmp(src_reg
->name
, a
->src_name
) == 0);
6924 g_assert(strcmp(dst_reg
->name
, a
->dst_name
) == 0);
6926 /* None of the core system registers use opaque; we will. */
6927 g_assert(src_reg
->opaque
== NULL
);
6929 /* Create alias before redirection so we dup the right data. */
6930 new_reg
= g_memdup(src_reg
, sizeof(ARMCPRegInfo
));
6932 new_reg
->name
= a
->new_name
;
6933 new_reg
->type
|= ARM_CP_ALIAS
;
6934 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
6935 new_reg
->access
&= PL2_RW
| PL3_RW
;
6936 /* The new_reg op fields are as per new_key, not the target reg */
6937 new_reg
->crn
= (a
->new_key
& CP_REG_ARM64_SYSREG_CRN_MASK
)
6938 >> CP_REG_ARM64_SYSREG_CRN_SHIFT
;
6939 new_reg
->crm
= (a
->new_key
& CP_REG_ARM64_SYSREG_CRM_MASK
)
6940 >> CP_REG_ARM64_SYSREG_CRM_SHIFT
;
6941 new_reg
->opc0
= (a
->new_key
& CP_REG_ARM64_SYSREG_OP0_MASK
)
6942 >> CP_REG_ARM64_SYSREG_OP0_SHIFT
;
6943 new_reg
->opc1
= (a
->new_key
& CP_REG_ARM64_SYSREG_OP1_MASK
)
6944 >> CP_REG_ARM64_SYSREG_OP1_SHIFT
;
6945 new_reg
->opc2
= (a
->new_key
& CP_REG_ARM64_SYSREG_OP2_MASK
)
6946 >> CP_REG_ARM64_SYSREG_OP2_SHIFT
;
6947 new_reg
->opaque
= src_reg
;
6948 new_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
6949 new_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
6950 new_reg
->orig_accessfn
= src_reg
->accessfn
;
6951 if (!new_reg
->raw_readfn
) {
6952 new_reg
->raw_readfn
= raw_read
;
6954 if (!new_reg
->raw_writefn
) {
6955 new_reg
->raw_writefn
= raw_write
;
6957 new_reg
->readfn
= el2_e2h_e12_read
;
6958 new_reg
->writefn
= el2_e2h_e12_write
;
6959 new_reg
->accessfn
= el2_e2h_e12_access
;
6962 * If the _EL1 register is redirected to memory by FEAT_NV2,
6963 * then it shares the offset with the _EL12 register,
6964 * and which one is redirected depends on HCR_EL2.NV1.
6966 if (new_reg
->nv2_redirect_offset
) {
6967 assert(new_reg
->nv2_redirect_offset
& NV2_REDIR_NV1
);
6968 new_reg
->nv2_redirect_offset
&= ~NV2_REDIR_NV1
;
6969 new_reg
->nv2_redirect_offset
|= NV2_REDIR_NO_NV1
;
6972 ok
= g_hash_table_insert(cpu
->cp_regs
,
6973 (gpointer
)(uintptr_t)a
->new_key
, new_reg
);
6976 src_reg
->opaque
= dst_reg
;
6977 src_reg
->orig_readfn
= src_reg
->readfn
?: raw_read
;
6978 src_reg
->orig_writefn
= src_reg
->writefn
?: raw_write
;
6979 if (!src_reg
->raw_readfn
) {
6980 src_reg
->raw_readfn
= raw_read
;
6982 if (!src_reg
->raw_writefn
) {
6983 src_reg
->raw_writefn
= raw_write
;
6985 src_reg
->readfn
= el2_e2h_read
;
6986 src_reg
->writefn
= el2_e2h_write
;
6991 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6994 int cur_el
= arm_current_el(env
);
6997 uint64_t hcr
= arm_hcr_el2_eff(env
);
7000 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
7001 if (!(env
->cp15
.sctlr_el
[2] & SCTLR_UCT
)) {
7002 return CP_ACCESS_TRAP_EL2
;
7005 if (!(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
7006 return CP_ACCESS_TRAP
;
7008 if (hcr
& HCR_TID2
) {
7009 return CP_ACCESS_TRAP_EL2
;
7012 } else if (hcr
& HCR_TID2
) {
7013 return CP_ACCESS_TRAP_EL2
;
7017 if (arm_current_el(env
) < 2 && arm_hcr_el2_eff(env
) & HCR_TID2
) {
7018 return CP_ACCESS_TRAP_EL2
;
7021 return CP_ACCESS_OK
;
7025 * Check for traps to RAS registers, which are controlled
7026 * by HCR_EL2.TERR and SCR_EL3.TERR.
7028 static CPAccessResult
access_terr(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7031 int el
= arm_current_el(env
);
7033 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TERR
)) {
7034 return CP_ACCESS_TRAP_EL2
;
7036 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TERR
)) {
7037 return CP_ACCESS_TRAP_EL3
;
7039 return CP_ACCESS_OK
;
7042 static uint64_t disr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7044 int el
= arm_current_el(env
);
7046 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_AMO
)) {
7047 return env
->cp15
.vdisr_el2
;
7049 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_EA
)) {
7050 return 0; /* RAZ/WI */
7052 return env
->cp15
.disr_el1
;
7055 static void disr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
7057 int el
= arm_current_el(env
);
7059 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_AMO
)) {
7060 env
->cp15
.vdisr_el2
= val
;
7063 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_EA
)) {
7064 return; /* RAZ/WI */
7066 env
->cp15
.disr_el1
= val
;
7070 * Minimal RAS implementation with no Error Records.
7071 * Which means that all of the Error Record registers:
7079 * ERXPFGCDN_EL1 (RASv1p1)
7080 * ERXPFGCTL_EL1 (RASv1p1)
7081 * ERXPFGF_EL1 (RASv1p1)
7085 * may generate UNDEFINED, which is the effect we get by not
7086 * listing them at all.
7088 * These registers have fine-grained trap bits, but UNDEF-to-EL1
7089 * is higher priority than FGT-to-EL2 so we do not need to list them
7090 * in order to check for an FGT.
7092 static const ARMCPRegInfo minimal_ras_reginfo
[] = {
7093 { .name
= "DISR_EL1", .state
= ARM_CP_STATE_BOTH
,
7094 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 1,
7095 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.disr_el1
),
7096 .readfn
= disr_read
, .writefn
= disr_write
, .raw_writefn
= raw_write
},
7097 { .name
= "ERRIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
7098 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 3, .opc2
= 0,
7099 .access
= PL1_R
, .accessfn
= access_terr
,
7100 .fgt
= FGT_ERRIDR_EL1
,
7101 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7102 { .name
= "VDISR_EL2", .state
= ARM_CP_STATE_BOTH
,
7103 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 1, .opc2
= 1,
7104 .nv2_redirect_offset
= 0x500,
7105 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.vdisr_el2
) },
7106 { .name
= "VSESR_EL2", .state
= ARM_CP_STATE_BOTH
,
7107 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 3,
7108 .nv2_redirect_offset
= 0x508,
7109 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.vsesr_el2
) },
7113 * Return the exception level to which exceptions should be taken
7114 * via SVEAccessTrap. This excludes the check for whether the exception
7115 * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily
7116 * be found by testing 0 < fp_exception_el < sve_exception_el.
7118 * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the
7119 * pseudocode does *not* separate out the FP trap checks, but has them
7120 * all in one function.
7122 int sve_exception_el(CPUARMState
*env
, int el
)
7124 #ifndef CONFIG_USER_ONLY
7125 if (el
<= 1 && !el_is_in_host(env
, el
)) {
7126 switch (FIELD_EX64(env
->cp15
.cpacr_el1
, CPACR_EL1
, ZEN
)) {
7138 if (el
<= 2 && arm_is_el2_enabled(env
)) {
7139 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
7140 if (env
->cp15
.hcr_el2
& HCR_E2H
) {
7141 switch (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, ZEN
)) {
7143 if (el
!= 0 || !(env
->cp15
.hcr_el2
& HCR_TGE
)) {
7152 if (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TZ
)) {
7158 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
7159 if (arm_feature(env
, ARM_FEATURE_EL3
)
7160 && !FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, EZ
)) {
7168 * Return the exception level to which exceptions should be taken for SME.
7169 * C.f. the ARM pseudocode function CheckSMEAccess.
7171 int sme_exception_el(CPUARMState
*env
, int el
)
7173 #ifndef CONFIG_USER_ONLY
7174 if (el
<= 1 && !el_is_in_host(env
, el
)) {
7175 switch (FIELD_EX64(env
->cp15
.cpacr_el1
, CPACR_EL1
, SMEN
)) {
7187 if (el
<= 2 && arm_is_el2_enabled(env
)) {
7188 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
7189 if (env
->cp15
.hcr_el2
& HCR_E2H
) {
7190 switch (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, SMEN
)) {
7192 if (el
!= 0 || !(env
->cp15
.hcr_el2
& HCR_TGE
)) {
7201 if (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TSM
)) {
7207 /* CPTR_EL3. Since ESM is negative we must check for EL3. */
7208 if (arm_feature(env
, ARM_FEATURE_EL3
)
7209 && !FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, ESM
)) {
7217 * Given that SVE is enabled, return the vector length for EL.
7219 uint32_t sve_vqm1_for_el_sm(CPUARMState
*env
, int el
, bool sm
)
7221 ARMCPU
*cpu
= env_archcpu(env
);
7222 uint64_t *cr
= env
->vfp
.zcr_el
;
7223 uint32_t map
= cpu
->sve_vq
.map
;
7224 uint32_t len
= ARM_MAX_VQ
- 1;
7227 cr
= env
->vfp
.smcr_el
;
7228 map
= cpu
->sme_vq
.map
;
7231 if (el
<= 1 && !el_is_in_host(env
, el
)) {
7232 len
= MIN(len
, 0xf & (uint32_t)cr
[1]);
7234 if (el
<= 2 && arm_is_el2_enabled(env
)) {
7235 len
= MIN(len
, 0xf & (uint32_t)cr
[2]);
7237 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7238 len
= MIN(len
, 0xf & (uint32_t)cr
[3]);
7241 map
&= MAKE_64BIT_MASK(0, len
+ 1);
7243 return 31 - clz32(map
);
7246 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
7248 return ctz32(cpu
->sme_vq
.map
);
7251 uint32_t sve_vqm1_for_el(CPUARMState
*env
, int el
)
7253 return sve_vqm1_for_el_sm(env
, el
, FIELD_EX64(env
->svcr
, SVCR
, SM
));
7256 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7259 int cur_el
= arm_current_el(env
);
7260 int old_len
= sve_vqm1_for_el(env
, cur_el
);
7263 /* Bits other than [3:0] are RAZ/WI. */
7264 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> 16);
7265 raw_write(env
, ri
, value
& 0xf);
7268 * Because we arrived here, we know both FP and SVE are enabled;
7269 * otherwise we would have trapped access to the ZCR_ELn register.
7271 new_len
= sve_vqm1_for_el(env
, cur_el
);
7272 if (new_len
< old_len
) {
7273 aarch64_sve_narrow_vq(env
, new_len
+ 1);
7277 static const ARMCPRegInfo zcr_reginfo
[] = {
7278 { .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
7279 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
7280 .nv2_redirect_offset
= 0x1e0 | NV2_REDIR_NV1
,
7281 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
7282 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
7283 .writefn
= zcr_write
, .raw_writefn
= raw_write
},
7284 { .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
7285 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
7286 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
7287 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
7288 .writefn
= zcr_write
, .raw_writefn
= raw_write
},
7289 { .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
7290 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
7291 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
7292 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
7293 .writefn
= zcr_write
, .raw_writefn
= raw_write
},
7296 #ifdef TARGET_AARCH64
7297 static CPAccessResult
access_tpidr2(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7300 int el
= arm_current_el(env
);
7303 uint64_t sctlr
= arm_sctlr(env
, el
);
7304 if (!(sctlr
& SCTLR_EnTP2
)) {
7305 return CP_ACCESS_TRAP
;
7308 /* TODO: FEAT_FGT */
7310 && arm_feature(env
, ARM_FEATURE_EL3
)
7311 && !(env
->cp15
.scr_el3
& SCR_ENTP2
)) {
7312 return CP_ACCESS_TRAP_EL3
;
7314 return CP_ACCESS_OK
;
7317 static CPAccessResult
access_smprimap(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7320 /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
7321 if (arm_current_el(env
) == 2
7322 && arm_feature(env
, ARM_FEATURE_EL3
)
7323 && !FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, ESM
)) {
7324 return CP_ACCESS_TRAP_EL3
;
7326 return CP_ACCESS_OK
;
7329 static CPAccessResult
access_smpri(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7332 if (arm_current_el(env
) < 3
7333 && arm_feature(env
, ARM_FEATURE_EL3
)
7334 && !FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, ESM
)) {
7335 return CP_ACCESS_TRAP_EL3
;
7337 return CP_ACCESS_OK
;
7341 static void arm_reset_sve_state(CPUARMState
*env
)
7343 memset(env
->vfp
.zregs
, 0, sizeof(env
->vfp
.zregs
));
7344 /* Recall that FFR is stored as pregs[16]. */
7345 memset(env
->vfp
.pregs
, 0, sizeof(env
->vfp
.pregs
));
7346 vfp_set_fpcr(env
, 0x0800009f);
7349 void aarch64_set_svcr(CPUARMState
*env
, uint64_t new, uint64_t mask
)
7351 uint64_t change
= (env
->svcr
^ new) & mask
;
7356 env
->svcr
^= change
;
7358 if (change
& R_SVCR_SM_MASK
) {
7359 arm_reset_sve_state(env
);
7365 * SetPSTATE_ZA zeros on enable and disable. We can zero this only
7366 * on enable: while disabled, the storage is inaccessible and the
7367 * value does not matter. We're not saving the storage in vmstate
7368 * when disabled either.
7370 if (change
& new & R_SVCR_ZA_MASK
) {
7371 memset(env
->zarray
, 0, sizeof(env
->zarray
));
7374 if (tcg_enabled()) {
7375 arm_rebuild_hflags(env
);
7379 static void svcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7382 aarch64_set_svcr(env
, value
, -1);
7385 static void smcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7388 int cur_el
= arm_current_el(env
);
7389 int old_len
= sve_vqm1_for_el(env
, cur_el
);
7392 QEMU_BUILD_BUG_ON(ARM_MAX_VQ
> R_SMCR_LEN_MASK
+ 1);
7393 value
&= R_SMCR_LEN_MASK
| R_SMCR_FA64_MASK
;
7394 raw_write(env
, ri
, value
);
7397 * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
7398 * when SVL is widened (old values kept, or zeros). Choose to keep the
7399 * current values for simplicity. But for QEMU internals, we must still
7400 * apply the narrower SVL to the Zregs and Pregs -- see the comment
7401 * above aarch64_sve_narrow_vq.
7403 new_len
= sve_vqm1_for_el(env
, cur_el
);
7404 if (new_len
< old_len
) {
7405 aarch64_sve_narrow_vq(env
, new_len
+ 1);
7409 static const ARMCPRegInfo sme_reginfo
[] = {
7410 { .name
= "TPIDR2_EL0", .state
= ARM_CP_STATE_AA64
,
7411 .opc0
= 3, .opc1
= 3, .crn
= 13, .crm
= 0, .opc2
= 5,
7412 .access
= PL0_RW
, .accessfn
= access_tpidr2
,
7413 .fgt
= FGT_NTPIDR2_EL0
,
7414 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr2_el0
) },
7415 { .name
= "SVCR", .state
= ARM_CP_STATE_AA64
,
7416 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 2,
7417 .access
= PL0_RW
, .type
= ARM_CP_SME
,
7418 .fieldoffset
= offsetof(CPUARMState
, svcr
),
7419 .writefn
= svcr_write
, .raw_writefn
= raw_write
},
7420 { .name
= "SMCR_EL1", .state
= ARM_CP_STATE_AA64
,
7421 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 6,
7422 .nv2_redirect_offset
= 0x1f0 | NV2_REDIR_NV1
,
7423 .access
= PL1_RW
, .type
= ARM_CP_SME
,
7424 .fieldoffset
= offsetof(CPUARMState
, vfp
.smcr_el
[1]),
7425 .writefn
= smcr_write
, .raw_writefn
= raw_write
},
7426 { .name
= "SMCR_EL2", .state
= ARM_CP_STATE_AA64
,
7427 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 6,
7428 .access
= PL2_RW
, .type
= ARM_CP_SME
,
7429 .fieldoffset
= offsetof(CPUARMState
, vfp
.smcr_el
[2]),
7430 .writefn
= smcr_write
, .raw_writefn
= raw_write
},
7431 { .name
= "SMCR_EL3", .state
= ARM_CP_STATE_AA64
,
7432 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 6,
7433 .access
= PL3_RW
, .type
= ARM_CP_SME
,
7434 .fieldoffset
= offsetof(CPUARMState
, vfp
.smcr_el
[3]),
7435 .writefn
= smcr_write
, .raw_writefn
= raw_write
},
7436 { .name
= "SMIDR_EL1", .state
= ARM_CP_STATE_AA64
,
7437 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 6,
7438 .access
= PL1_R
, .accessfn
= access_aa64_tid1
,
7440 * IMPLEMENTOR = 0 (software)
7441 * REVISION = 0 (implementation defined)
7442 * SMPS = 0 (no streaming execution priority in QEMU)
7443 * AFFINITY = 0 (streaming sve mode not shared with other PEs)
7445 .type
= ARM_CP_CONST
, .resetvalue
= 0, },
7447 * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
7449 { .name
= "SMPRI_EL1", .state
= ARM_CP_STATE_AA64
,
7450 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 4,
7451 .access
= PL1_RW
, .accessfn
= access_smpri
,
7452 .fgt
= FGT_NSMPRI_EL1
,
7453 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7454 { .name
= "SMPRIMAP_EL2", .state
= ARM_CP_STATE_AA64
,
7455 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 5,
7456 .nv2_redirect_offset
= 0x1f8,
7457 .access
= PL2_RW
, .accessfn
= access_smprimap
,
7458 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7461 static void tlbi_aa64_paall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7464 CPUState
*cs
= env_cpu(env
);
7469 static void gpccr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7472 /* L0GPTSZ is RO; other bits not mentioned are RES0. */
7473 uint64_t rw_mask
= R_GPCCR_PPS_MASK
| R_GPCCR_IRGN_MASK
|
7474 R_GPCCR_ORGN_MASK
| R_GPCCR_SH_MASK
| R_GPCCR_PGS_MASK
|
7475 R_GPCCR_GPC_MASK
| R_GPCCR_GPCP_MASK
;
7477 env
->cp15
.gpccr_el3
= (value
& rw_mask
) | (env
->cp15
.gpccr_el3
& ~rw_mask
);
7480 static void gpccr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7482 env
->cp15
.gpccr_el3
= FIELD_DP64(0, GPCCR
, L0GPTSZ
,
7483 env_archcpu(env
)->reset_l0gptsz
);
7486 static void tlbi_aa64_paallos_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7489 CPUState
*cs
= env_cpu(env
);
7491 tlb_flush_all_cpus_synced(cs
);
7494 static const ARMCPRegInfo rme_reginfo
[] = {
7495 { .name
= "GPCCR_EL3", .state
= ARM_CP_STATE_AA64
,
7496 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 1, .opc2
= 6,
7497 .access
= PL3_RW
, .writefn
= gpccr_write
, .resetfn
= gpccr_reset
,
7498 .fieldoffset
= offsetof(CPUARMState
, cp15
.gpccr_el3
) },
7499 { .name
= "GPTBR_EL3", .state
= ARM_CP_STATE_AA64
,
7500 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 1, .opc2
= 4,
7501 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.gptbr_el3
) },
7502 { .name
= "MFAR_EL3", .state
= ARM_CP_STATE_AA64
,
7503 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 5,
7504 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mfar_el3
) },
7505 { .name
= "TLBI_PAALL", .state
= ARM_CP_STATE_AA64
,
7506 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 4,
7507 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7508 .writefn
= tlbi_aa64_paall_write
},
7509 { .name
= "TLBI_PAALLOS", .state
= ARM_CP_STATE_AA64
,
7510 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 4,
7511 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7512 .writefn
= tlbi_aa64_paallos_write
},
7514 * QEMU does not have a way to invalidate by physical address, thus
7515 * invalidating a range of physical addresses is accomplished by
7516 * flushing all tlb entries in the outer shareable domain,
7517 * just like PAALLOS.
7519 { .name
= "TLBI_RPALOS", .state
= ARM_CP_STATE_AA64
,
7520 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 4, .opc2
= 7,
7521 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7522 .writefn
= tlbi_aa64_paallos_write
},
7523 { .name
= "TLBI_RPAOS", .state
= ARM_CP_STATE_AA64
,
7524 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 4, .opc2
= 3,
7525 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7526 .writefn
= tlbi_aa64_paallos_write
},
7527 { .name
= "DC_CIPAPA", .state
= ARM_CP_STATE_AA64
,
7528 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 14, .opc2
= 1,
7529 .access
= PL3_W
, .type
= ARM_CP_NOP
},
7532 static const ARMCPRegInfo rme_mte_reginfo
[] = {
7533 { .name
= "DC_CIGDPAPA", .state
= ARM_CP_STATE_AA64
,
7534 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 14, .opc2
= 5,
7535 .access
= PL3_W
, .type
= ARM_CP_NOP
},
7538 static void aa64_allint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7541 env
->pstate
= (env
->pstate
& ~PSTATE_ALLINT
) | (value
& PSTATE_ALLINT
);
7544 static uint64_t aa64_allint_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7546 return env
->pstate
& PSTATE_ALLINT
;
7549 static CPAccessResult
aa64_allint_access(CPUARMState
*env
,
7550 const ARMCPRegInfo
*ri
, bool isread
)
7552 if (!isread
&& arm_current_el(env
) == 1 &&
7553 (arm_hcrx_el2_eff(env
) & HCRX_TALLINT
)) {
7554 return CP_ACCESS_TRAP_EL2
;
7556 return CP_ACCESS_OK
;
7559 static const ARMCPRegInfo nmi_reginfo
[] = {
7560 { .name
= "ALLINT", .state
= ARM_CP_STATE_AA64
,
7561 .opc0
= 3, .opc1
= 0, .opc2
= 0, .crn
= 4, .crm
= 3,
7562 .type
= ARM_CP_NO_RAW
,
7563 .access
= PL1_RW
, .accessfn
= aa64_allint_access
,
7564 .fieldoffset
= offsetof(CPUARMState
, pstate
),
7565 .writefn
= aa64_allint_write
, .readfn
= aa64_allint_read
,
7566 .resetfn
= arm_cp_reset_ignore
},
7568 #endif /* TARGET_AARCH64 */
7570 static void define_pmu_regs(ARMCPU
*cpu
)
7573 * v7 performance monitor control register: same implementor
7574 * field as main ID register, and we implement four counters in
7575 * addition to the cycle count register.
7577 unsigned int i
, pmcrn
= pmu_num_counters(&cpu
->env
);
7578 ARMCPRegInfo pmcr
= {
7579 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
7581 .fgt
= FGT_PMCR_EL0
,
7582 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7583 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
7584 .accessfn
= pmreg_access
,
7585 .readfn
= pmcr_read
, .raw_readfn
= raw_read
,
7586 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
7588 ARMCPRegInfo pmcr64
= {
7589 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
7590 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
7591 .access
= PL0_RW
, .accessfn
= pmreg_access
,
7592 .fgt
= FGT_PMCR_EL0
,
7594 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
7595 .resetvalue
= cpu
->isar
.reset_pmcr_el0
,
7596 .readfn
= pmcr_read
, .raw_readfn
= raw_read
,
7597 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
7600 define_one_arm_cp_reg(cpu
, &pmcr
);
7601 define_one_arm_cp_reg(cpu
, &pmcr64
);
7602 for (i
= 0; i
< pmcrn
; i
++) {
7603 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
7604 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
7605 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
7606 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
7607 ARMCPRegInfo pmev_regs
[] = {
7608 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
7609 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
7610 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7611 .fgt
= FGT_PMEVCNTRN_EL0
,
7612 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
7613 .accessfn
= pmreg_access_xevcntr
},
7614 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
7615 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
7616 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access_xevcntr
,
7618 .fgt
= FGT_PMEVCNTRN_EL0
,
7619 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
7620 .raw_readfn
= pmevcntr_rawread
,
7621 .raw_writefn
= pmevcntr_rawwrite
},
7622 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
7623 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
7624 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
7625 .fgt
= FGT_PMEVTYPERN_EL0
,
7626 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
7627 .accessfn
= pmreg_access
},
7628 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
7629 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
7630 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
7631 .fgt
= FGT_PMEVTYPERN_EL0
,
7633 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
7634 .raw_writefn
= pmevtyper_rawwrite
},
7636 define_arm_cp_regs(cpu
, pmev_regs
);
7637 g_free(pmevcntr_name
);
7638 g_free(pmevcntr_el0_name
);
7639 g_free(pmevtyper_name
);
7640 g_free(pmevtyper_el0_name
);
7642 if (cpu_isar_feature(aa32_pmuv3p1
, cpu
)) {
7643 ARMCPRegInfo v81_pmu_regs
[] = {
7644 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
7645 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
7646 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7647 .fgt
= FGT_PMCEIDN_EL0
,
7648 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
7649 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
7650 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
7651 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7652 .fgt
= FGT_PMCEIDN_EL0
,
7653 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
7655 define_arm_cp_regs(cpu
, v81_pmu_regs
);
7657 if (cpu_isar_feature(any_pmuv3p4
, cpu
)) {
7658 static const ARMCPRegInfo v84_pmmir
= {
7659 .name
= "PMMIR_EL1", .state
= ARM_CP_STATE_BOTH
,
7660 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 6,
7661 .access
= PL1_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
7662 .fgt
= FGT_PMMIR_EL1
,
7665 define_one_arm_cp_reg(cpu
, &v84_pmmir
);
7669 #ifndef CONFIG_USER_ONLY
7671 * We don't know until after realize whether there's a GICv3
7672 * attached, and that is what registers the gicv3 sysregs.
7673 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
7676 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7678 ARMCPU
*cpu
= env_archcpu(env
);
7679 uint64_t pfr1
= cpu
->isar
.id_pfr1
;
7681 if (env
->gicv3state
) {
7687 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7689 ARMCPU
*cpu
= env_archcpu(env
);
7690 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
7692 if (env
->gicv3state
) {
7700 * Shared logic between LORID and the rest of the LOR* registers.
7701 * Secure state exclusion has already been dealt with.
7703 static CPAccessResult
access_lor_ns(CPUARMState
*env
,
7704 const ARMCPRegInfo
*ri
, bool isread
)
7706 int el
= arm_current_el(env
);
7708 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
7709 return CP_ACCESS_TRAP_EL2
;
7711 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
7712 return CP_ACCESS_TRAP_EL3
;
7714 return CP_ACCESS_OK
;
7717 static CPAccessResult
access_lor_other(CPUARMState
*env
,
7718 const ARMCPRegInfo
*ri
, bool isread
)
7720 if (arm_is_secure_below_el3(env
)) {
7721 /* Access denied in secure mode. */
7722 return CP_ACCESS_TRAP
;
7724 return access_lor_ns(env
, ri
, isread
);
7728 * A trivial implementation of ARMv8.1-LOR leaves all of these
7729 * registers fixed at 0, which indicates that there are zero
7730 * supported Limited Ordering regions.
7732 static const ARMCPRegInfo lor_reginfo
[] = {
7733 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
7734 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
7735 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7736 .fgt
= FGT_LORSA_EL1
,
7737 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7738 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
7739 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
7740 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7741 .fgt
= FGT_LOREA_EL1
,
7742 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7743 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
7744 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
7745 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7746 .fgt
= FGT_LORN_EL1
,
7747 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7748 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
7749 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
7750 .access
= PL1_RW
, .accessfn
= access_lor_other
,
7751 .fgt
= FGT_LORC_EL1
,
7752 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7753 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
7754 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
7755 .access
= PL1_R
, .accessfn
= access_lor_ns
,
7756 .fgt
= FGT_LORID_EL1
,
7757 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
7760 #ifdef TARGET_AARCH64
7761 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7764 int el
= arm_current_el(env
);
7767 arm_is_el2_enabled(env
) &&
7768 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
7769 return CP_ACCESS_TRAP_EL2
;
7772 arm_feature(env
, ARM_FEATURE_EL3
) &&
7773 !(env
->cp15
.scr_el3
& SCR_APK
)) {
7774 return CP_ACCESS_TRAP_EL3
;
7776 return CP_ACCESS_OK
;
7779 static const ARMCPRegInfo pauth_reginfo
[] = {
7780 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7781 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
7782 .access
= PL1_RW
, .accessfn
= access_pauth
,
7784 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.lo
) },
7785 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7786 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
7787 .access
= PL1_RW
, .accessfn
= access_pauth
,
7789 .fieldoffset
= offsetof(CPUARMState
, keys
.apda
.hi
) },
7790 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7791 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
7792 .access
= PL1_RW
, .accessfn
= access_pauth
,
7794 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.lo
) },
7795 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7796 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
7797 .access
= PL1_RW
, .accessfn
= access_pauth
,
7799 .fieldoffset
= offsetof(CPUARMState
, keys
.apdb
.hi
) },
7800 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7801 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
7802 .access
= PL1_RW
, .accessfn
= access_pauth
,
7804 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.lo
) },
7805 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7806 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
7807 .access
= PL1_RW
, .accessfn
= access_pauth
,
7809 .fieldoffset
= offsetof(CPUARMState
, keys
.apga
.hi
) },
7810 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7811 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
7812 .access
= PL1_RW
, .accessfn
= access_pauth
,
7814 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.lo
) },
7815 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7816 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
7817 .access
= PL1_RW
, .accessfn
= access_pauth
,
7819 .fieldoffset
= offsetof(CPUARMState
, keys
.apia
.hi
) },
7820 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
7821 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
7822 .access
= PL1_RW
, .accessfn
= access_pauth
,
7824 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.lo
) },
7825 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
7826 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
7827 .access
= PL1_RW
, .accessfn
= access_pauth
,
7829 .fieldoffset
= offsetof(CPUARMState
, keys
.apib
.hi
) },
7832 static const ARMCPRegInfo tlbirange_reginfo
[] = {
7833 { .name
= "TLBI_RVAE1IS", .state
= ARM_CP_STATE_AA64
,
7834 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 1,
7835 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
7836 .fgt
= FGT_TLBIRVAE1IS
,
7837 .writefn
= tlbi_aa64_rvae1is_write
},
7838 { .name
= "TLBI_RVAAE1IS", .state
= ARM_CP_STATE_AA64
,
7839 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 3,
7840 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
7841 .fgt
= FGT_TLBIRVAAE1IS
,
7842 .writefn
= tlbi_aa64_rvae1is_write
},
7843 { .name
= "TLBI_RVALE1IS", .state
= ARM_CP_STATE_AA64
,
7844 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 5,
7845 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
7846 .fgt
= FGT_TLBIRVALE1IS
,
7847 .writefn
= tlbi_aa64_rvae1is_write
},
7848 { .name
= "TLBI_RVAALE1IS", .state
= ARM_CP_STATE_AA64
,
7849 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 2, .opc2
= 7,
7850 .access
= PL1_W
, .accessfn
= access_ttlbis
, .type
= ARM_CP_NO_RAW
,
7851 .fgt
= FGT_TLBIRVAALE1IS
,
7852 .writefn
= tlbi_aa64_rvae1is_write
},
7853 { .name
= "TLBI_RVAE1OS", .state
= ARM_CP_STATE_AA64
,
7854 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
7855 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7856 .fgt
= FGT_TLBIRVAE1OS
,
7857 .writefn
= tlbi_aa64_rvae1is_write
},
7858 { .name
= "TLBI_RVAAE1OS", .state
= ARM_CP_STATE_AA64
,
7859 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 3,
7860 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7861 .fgt
= FGT_TLBIRVAAE1OS
,
7862 .writefn
= tlbi_aa64_rvae1is_write
},
7863 { .name
= "TLBI_RVALE1OS", .state
= ARM_CP_STATE_AA64
,
7864 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 5,
7865 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7866 .fgt
= FGT_TLBIRVALE1OS
,
7867 .writefn
= tlbi_aa64_rvae1is_write
},
7868 { .name
= "TLBI_RVAALE1OS", .state
= ARM_CP_STATE_AA64
,
7869 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 7,
7870 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7871 .fgt
= FGT_TLBIRVAALE1OS
,
7872 .writefn
= tlbi_aa64_rvae1is_write
},
7873 { .name
= "TLBI_RVAE1", .state
= ARM_CP_STATE_AA64
,
7874 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
7875 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
7876 .fgt
= FGT_TLBIRVAE1
,
7877 .writefn
= tlbi_aa64_rvae1_write
},
7878 { .name
= "TLBI_RVAAE1", .state
= ARM_CP_STATE_AA64
,
7879 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 3,
7880 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
7881 .fgt
= FGT_TLBIRVAAE1
,
7882 .writefn
= tlbi_aa64_rvae1_write
},
7883 { .name
= "TLBI_RVALE1", .state
= ARM_CP_STATE_AA64
,
7884 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 5,
7885 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
7886 .fgt
= FGT_TLBIRVALE1
,
7887 .writefn
= tlbi_aa64_rvae1_write
},
7888 { .name
= "TLBI_RVAALE1", .state
= ARM_CP_STATE_AA64
,
7889 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 7,
7890 .access
= PL1_W
, .accessfn
= access_ttlb
, .type
= ARM_CP_NO_RAW
,
7891 .fgt
= FGT_TLBIRVAALE1
,
7892 .writefn
= tlbi_aa64_rvae1_write
},
7893 { .name
= "TLBI_RIPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
7894 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 2,
7895 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7896 .writefn
= tlbi_aa64_ripas2e1is_write
},
7897 { .name
= "TLBI_RIPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
7898 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 6,
7899 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7900 .writefn
= tlbi_aa64_ripas2e1is_write
},
7901 { .name
= "TLBI_RVAE2IS", .state
= ARM_CP_STATE_AA64
,
7902 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 1,
7903 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7904 .writefn
= tlbi_aa64_rvae2is_write
},
7905 { .name
= "TLBI_RVALE2IS", .state
= ARM_CP_STATE_AA64
,
7906 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 2, .opc2
= 5,
7907 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7908 .writefn
= tlbi_aa64_rvae2is_write
},
7909 { .name
= "TLBI_RIPAS2E1", .state
= ARM_CP_STATE_AA64
,
7910 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 2,
7911 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7912 .writefn
= tlbi_aa64_ripas2e1_write
},
7913 { .name
= "TLBI_RIPAS2LE1", .state
= ARM_CP_STATE_AA64
,
7914 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 6,
7915 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
7916 .writefn
= tlbi_aa64_ripas2e1_write
},
7917 { .name
= "TLBI_RVAE2OS", .state
= ARM_CP_STATE_AA64
,
7918 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 1,
7919 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7920 .writefn
= tlbi_aa64_rvae2is_write
},
7921 { .name
= "TLBI_RVALE2OS", .state
= ARM_CP_STATE_AA64
,
7922 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 5, .opc2
= 5,
7923 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7924 .writefn
= tlbi_aa64_rvae2is_write
},
7925 { .name
= "TLBI_RVAE2", .state
= ARM_CP_STATE_AA64
,
7926 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 1,
7927 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7928 .writefn
= tlbi_aa64_rvae2_write
},
7929 { .name
= "TLBI_RVALE2", .state
= ARM_CP_STATE_AA64
,
7930 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 6, .opc2
= 5,
7931 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7932 .writefn
= tlbi_aa64_rvae2_write
},
7933 { .name
= "TLBI_RVAE3IS", .state
= ARM_CP_STATE_AA64
,
7934 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 1,
7935 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7936 .writefn
= tlbi_aa64_rvae3is_write
},
7937 { .name
= "TLBI_RVALE3IS", .state
= ARM_CP_STATE_AA64
,
7938 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 2, .opc2
= 5,
7939 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7940 .writefn
= tlbi_aa64_rvae3is_write
},
7941 { .name
= "TLBI_RVAE3OS", .state
= ARM_CP_STATE_AA64
,
7942 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 1,
7943 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7944 .writefn
= tlbi_aa64_rvae3is_write
},
7945 { .name
= "TLBI_RVALE3OS", .state
= ARM_CP_STATE_AA64
,
7946 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 5, .opc2
= 5,
7947 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7948 .writefn
= tlbi_aa64_rvae3is_write
},
7949 { .name
= "TLBI_RVAE3", .state
= ARM_CP_STATE_AA64
,
7950 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 1,
7951 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7952 .writefn
= tlbi_aa64_rvae3_write
},
7953 { .name
= "TLBI_RVALE3", .state
= ARM_CP_STATE_AA64
,
7954 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 6, .opc2
= 5,
7955 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
7956 .writefn
= tlbi_aa64_rvae3_write
},
7959 static const ARMCPRegInfo tlbios_reginfo
[] = {
7960 { .name
= "TLBI_VMALLE1OS", .state
= ARM_CP_STATE_AA64
,
7961 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 0,
7962 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7963 .fgt
= FGT_TLBIVMALLE1OS
,
7964 .writefn
= tlbi_aa64_vmalle1is_write
},
7965 { .name
= "TLBI_VAE1OS", .state
= ARM_CP_STATE_AA64
,
7966 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 1,
7967 .fgt
= FGT_TLBIVAE1OS
,
7968 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7969 .writefn
= tlbi_aa64_vae1is_write
},
7970 { .name
= "TLBI_ASIDE1OS", .state
= ARM_CP_STATE_AA64
,
7971 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 2,
7972 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7973 .fgt
= FGT_TLBIASIDE1OS
,
7974 .writefn
= tlbi_aa64_vmalle1is_write
},
7975 { .name
= "TLBI_VAAE1OS", .state
= ARM_CP_STATE_AA64
,
7976 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 3,
7977 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7978 .fgt
= FGT_TLBIVAAE1OS
,
7979 .writefn
= tlbi_aa64_vae1is_write
},
7980 { .name
= "TLBI_VALE1OS", .state
= ARM_CP_STATE_AA64
,
7981 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 5,
7982 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7983 .fgt
= FGT_TLBIVALE1OS
,
7984 .writefn
= tlbi_aa64_vae1is_write
},
7985 { .name
= "TLBI_VAALE1OS", .state
= ARM_CP_STATE_AA64
,
7986 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 1, .opc2
= 7,
7987 .access
= PL1_W
, .accessfn
= access_ttlbos
, .type
= ARM_CP_NO_RAW
,
7988 .fgt
= FGT_TLBIVAALE1OS
,
7989 .writefn
= tlbi_aa64_vae1is_write
},
7990 { .name
= "TLBI_ALLE2OS", .state
= ARM_CP_STATE_AA64
,
7991 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 0,
7992 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7993 .writefn
= tlbi_aa64_alle2is_write
},
7994 { .name
= "TLBI_VAE2OS", .state
= ARM_CP_STATE_AA64
,
7995 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 1,
7996 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
7997 .writefn
= tlbi_aa64_vae2is_write
},
7998 { .name
= "TLBI_ALLE1OS", .state
= ARM_CP_STATE_AA64
,
7999 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 4,
8000 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
8001 .writefn
= tlbi_aa64_alle1is_write
},
8002 { .name
= "TLBI_VALE2OS", .state
= ARM_CP_STATE_AA64
,
8003 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 5,
8004 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_EL3_NO_EL2_UNDEF
,
8005 .writefn
= tlbi_aa64_vae2is_write
},
8006 { .name
= "TLBI_VMALLS12E1OS", .state
= ARM_CP_STATE_AA64
,
8007 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 1, .opc2
= 6,
8008 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
8009 .writefn
= tlbi_aa64_alle1is_write
},
8010 { .name
= "TLBI_IPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
8011 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 0,
8012 .access
= PL2_W
, .type
= ARM_CP_NOP
},
8013 { .name
= "TLBI_RIPAS2E1OS", .state
= ARM_CP_STATE_AA64
,
8014 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 3,
8015 .access
= PL2_W
, .type
= ARM_CP_NOP
},
8016 { .name
= "TLBI_IPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
8017 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 4,
8018 .access
= PL2_W
, .type
= ARM_CP_NOP
},
8019 { .name
= "TLBI_RIPAS2LE1OS", .state
= ARM_CP_STATE_AA64
,
8020 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 7,
8021 .access
= PL2_W
, .type
= ARM_CP_NOP
},
8022 { .name
= "TLBI_ALLE3OS", .state
= ARM_CP_STATE_AA64
,
8023 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 0,
8024 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
8025 .writefn
= tlbi_aa64_alle3is_write
},
8026 { .name
= "TLBI_VAE3OS", .state
= ARM_CP_STATE_AA64
,
8027 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 1,
8028 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
8029 .writefn
= tlbi_aa64_vae3is_write
},
8030 { .name
= "TLBI_VALE3OS", .state
= ARM_CP_STATE_AA64
,
8031 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 1, .opc2
= 5,
8032 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
8033 .writefn
= tlbi_aa64_vae3is_write
},
8036 static uint64_t rndr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
8041 /* Success sets NZCV = 0000. */
8042 env
->NF
= env
->CF
= env
->VF
= 0, env
->ZF
= 1;
8044 if (qemu_guest_getrandom(&ret
, sizeof(ret
), &err
) < 0) {
8046 * ??? Failed, for unknown reasons in the crypto subsystem.
8047 * The best we can do is log the reason and return the
8048 * timed-out indication to the guest. There is no reason
8049 * we know to expect this failure to be transitory, so the
8050 * guest may well hang retrying the operation.
8052 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
8053 ri
->name
, error_get_pretty(err
));
8056 env
->ZF
= 0; /* NZCF = 0100 */
8062 /* We do not support re-seeding, so the two registers operate the same. */
8063 static const ARMCPRegInfo rndr_reginfo
[] = {
8064 { .name
= "RNDR", .state
= ARM_CP_STATE_AA64
,
8065 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
8066 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 0,
8067 .access
= PL0_R
, .readfn
= rndr_readfn
},
8068 { .name
= "RNDRRS", .state
= ARM_CP_STATE_AA64
,
8069 .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
| ARM_CP_IO
,
8070 .opc0
= 3, .opc1
= 3, .crn
= 2, .crm
= 4, .opc2
= 1,
8071 .access
= PL0_R
, .readfn
= rndr_readfn
},
8074 static void dccvap_writefn(CPUARMState
*env
, const ARMCPRegInfo
*opaque
,
8078 ARMCPU
*cpu
= env_archcpu(env
);
8079 /* CTR_EL0 System register -> DminLine, bits [19:16] */
8080 uint64_t dline_size
= 4 << ((cpu
->ctr
>> 16) & 0xF);
8081 uint64_t vaddr_in
= (uint64_t) value
;
8082 uint64_t vaddr
= vaddr_in
& ~(dline_size
- 1);
8084 int mem_idx
= arm_env_mmu_index(env
);
8086 /* This won't be crossing page boundaries */
8087 haddr
= probe_read(env
, vaddr
, dline_size
, mem_idx
, GETPC());
8089 #ifndef CONFIG_USER_ONLY
8094 /* RCU lock is already being held */
8095 mr
= memory_region_from_host(haddr
, &offset
);
8098 memory_region_writeback(mr
, offset
, dline_size
);
8100 #endif /*CONFIG_USER_ONLY*/
8103 /* Handled by hardware accelerator. */
8104 g_assert_not_reached();
8105 #endif /* CONFIG_TCG */
8108 static const ARMCPRegInfo dcpop_reg
[] = {
8109 { .name
= "DC_CVAP", .state
= ARM_CP_STATE_AA64
,
8110 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 1,
8111 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
8113 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
8116 static const ARMCPRegInfo dcpodp_reg
[] = {
8117 { .name
= "DC_CVADP", .state
= ARM_CP_STATE_AA64
,
8118 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 1,
8119 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_SUPPRESS_TB_END
,
8121 .accessfn
= aa64_cacheop_poc_access
, .writefn
= dccvap_writefn
},
8124 static CPAccessResult
access_aa64_tid5(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8127 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID5
)) {
8128 return CP_ACCESS_TRAP_EL2
;
8131 return CP_ACCESS_OK
;
8134 static CPAccessResult
access_mte(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8137 int el
= arm_current_el(env
);
8138 if (el
< 2 && arm_is_el2_enabled(env
)) {
8139 uint64_t hcr
= arm_hcr_el2_eff(env
);
8140 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
8141 return CP_ACCESS_TRAP_EL2
;
8145 arm_feature(env
, ARM_FEATURE_EL3
) &&
8146 !(env
->cp15
.scr_el3
& SCR_ATA
)) {
8147 return CP_ACCESS_TRAP_EL3
;
8149 return CP_ACCESS_OK
;
8152 static CPAccessResult
access_tfsr_el1(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8155 CPAccessResult nv1
= access_nv1(env
, ri
, isread
);
8157 if (nv1
!= CP_ACCESS_OK
) {
8160 return access_mte(env
, ri
, isread
);
8163 static CPAccessResult
access_tfsr_el2(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8167 * TFSR_EL2: similar to generic access_mte(), but we need to
8168 * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
8169 * if NV2 is enabled then we will redirect this to TFSR_EL1
8170 * after doing the HCR and SCR ATA traps; otherwise this will
8171 * be a trap to EL2 and the HCR/SCR traps do not apply.
8173 int el
= arm_current_el(env
);
8175 if (el
== 1 && (arm_hcr_el2_eff(env
) & HCR_NV2
)) {
8176 return CP_ACCESS_OK
;
8178 if (el
< 2 && arm_is_el2_enabled(env
)) {
8179 uint64_t hcr
= arm_hcr_el2_eff(env
);
8180 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
8181 return CP_ACCESS_TRAP_EL2
;
8185 arm_feature(env
, ARM_FEATURE_EL3
) &&
8186 !(env
->cp15
.scr_el3
& SCR_ATA
)) {
8187 return CP_ACCESS_TRAP_EL3
;
8189 return CP_ACCESS_OK
;
8192 static uint64_t tco_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
8194 return env
->pstate
& PSTATE_TCO
;
8197 static void tco_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
8199 env
->pstate
= (env
->pstate
& ~PSTATE_TCO
) | (val
& PSTATE_TCO
);
8202 static const ARMCPRegInfo mte_reginfo
[] = {
8203 { .name
= "TFSRE0_EL1", .state
= ARM_CP_STATE_AA64
,
8204 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 1,
8205 .access
= PL1_RW
, .accessfn
= access_mte
,
8206 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[0]) },
8207 { .name
= "TFSR_EL1", .state
= ARM_CP_STATE_AA64
,
8208 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 6, .opc2
= 0,
8209 .access
= PL1_RW
, .accessfn
= access_tfsr_el1
,
8210 .nv2_redirect_offset
= 0x190 | NV2_REDIR_NV1
,
8211 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[1]) },
8212 { .name
= "TFSR_EL2", .state
= ARM_CP_STATE_AA64
,
8213 .type
= ARM_CP_NV2_REDIRECT
,
8214 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 6, .opc2
= 0,
8215 .access
= PL2_RW
, .accessfn
= access_tfsr_el2
,
8216 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[2]) },
8217 { .name
= "TFSR_EL3", .state
= ARM_CP_STATE_AA64
,
8218 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 6, .opc2
= 0,
8220 .fieldoffset
= offsetof(CPUARMState
, cp15
.tfsr_el
[3]) },
8221 { .name
= "RGSR_EL1", .state
= ARM_CP_STATE_AA64
,
8222 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 5,
8223 .access
= PL1_RW
, .accessfn
= access_mte
,
8224 .fieldoffset
= offsetof(CPUARMState
, cp15
.rgsr_el1
) },
8225 { .name
= "GCR_EL1", .state
= ARM_CP_STATE_AA64
,
8226 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 6,
8227 .access
= PL1_RW
, .accessfn
= access_mte
,
8228 .fieldoffset
= offsetof(CPUARMState
, cp15
.gcr_el1
) },
8229 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
8230 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
8231 .type
= ARM_CP_NO_RAW
,
8232 .access
= PL0_RW
, .readfn
= tco_read
, .writefn
= tco_write
},
8233 { .name
= "DC_IGVAC", .state
= ARM_CP_STATE_AA64
,
8234 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 3,
8235 .type
= ARM_CP_NOP
, .access
= PL1_W
,
8237 .accessfn
= aa64_cacheop_poc_access
},
8238 { .name
= "DC_IGSW", .state
= ARM_CP_STATE_AA64
,
8239 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 4,
8241 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
8242 { .name
= "DC_IGDVAC", .state
= ARM_CP_STATE_AA64
,
8243 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 5,
8244 .type
= ARM_CP_NOP
, .access
= PL1_W
,
8246 .accessfn
= aa64_cacheop_poc_access
},
8247 { .name
= "DC_IGDSW", .state
= ARM_CP_STATE_AA64
,
8248 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 6,
8250 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
8251 { .name
= "DC_CGSW", .state
= ARM_CP_STATE_AA64
,
8252 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 4,
8254 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
8255 { .name
= "DC_CGDSW", .state
= ARM_CP_STATE_AA64
,
8256 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 6,
8258 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
8259 { .name
= "DC_CIGSW", .state
= ARM_CP_STATE_AA64
,
8260 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 4,
8262 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
8263 { .name
= "DC_CIGDSW", .state
= ARM_CP_STATE_AA64
,
8264 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 6,
8266 .type
= ARM_CP_NOP
, .access
= PL1_W
, .accessfn
= access_tsw
},
8269 static const ARMCPRegInfo mte_tco_ro_reginfo
[] = {
8270 { .name
= "TCO", .state
= ARM_CP_STATE_AA64
,
8271 .opc0
= 3, .opc1
= 3, .crn
= 4, .crm
= 2, .opc2
= 7,
8272 .type
= ARM_CP_CONST
, .access
= PL0_RW
, },
8275 static const ARMCPRegInfo mte_el0_cacheop_reginfo
[] = {
8276 { .name
= "DC_CGVAC", .state
= ARM_CP_STATE_AA64
,
8277 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 3,
8278 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8280 .accessfn
= aa64_cacheop_poc_access
},
8281 { .name
= "DC_CGDVAC", .state
= ARM_CP_STATE_AA64
,
8282 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 5,
8283 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8285 .accessfn
= aa64_cacheop_poc_access
},
8286 { .name
= "DC_CGVAP", .state
= ARM_CP_STATE_AA64
,
8287 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 3,
8288 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8290 .accessfn
= aa64_cacheop_poc_access
},
8291 { .name
= "DC_CGDVAP", .state
= ARM_CP_STATE_AA64
,
8292 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 12, .opc2
= 5,
8293 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8295 .accessfn
= aa64_cacheop_poc_access
},
8296 { .name
= "DC_CGVADP", .state
= ARM_CP_STATE_AA64
,
8297 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 3,
8298 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8300 .accessfn
= aa64_cacheop_poc_access
},
8301 { .name
= "DC_CGDVADP", .state
= ARM_CP_STATE_AA64
,
8302 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 13, .opc2
= 5,
8303 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8305 .accessfn
= aa64_cacheop_poc_access
},
8306 { .name
= "DC_CIGVAC", .state
= ARM_CP_STATE_AA64
,
8307 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 3,
8308 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8310 .accessfn
= aa64_cacheop_poc_access
},
8311 { .name
= "DC_CIGDVAC", .state
= ARM_CP_STATE_AA64
,
8312 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 5,
8313 .type
= ARM_CP_NOP
, .access
= PL0_W
,
8315 .accessfn
= aa64_cacheop_poc_access
},
8316 { .name
= "DC_GVA", .state
= ARM_CP_STATE_AA64
,
8317 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 3,
8318 .access
= PL0_W
, .type
= ARM_CP_DC_GVA
,
8319 #ifndef CONFIG_USER_ONLY
8320 /* Avoid overhead of an access check that always passes in user-mode */
8321 .accessfn
= aa64_zva_access
,
8325 { .name
= "DC_GZVA", .state
= ARM_CP_STATE_AA64
,
8326 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 4,
8327 .access
= PL0_W
, .type
= ARM_CP_DC_GZVA
,
8328 #ifndef CONFIG_USER_ONLY
8329 /* Avoid overhead of an access check that always passes in user-mode */
8330 .accessfn
= aa64_zva_access
,
8336 static CPAccessResult
access_scxtnum(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8339 uint64_t hcr
= arm_hcr_el2_eff(env
);
8340 int el
= arm_current_el(env
);
8342 if (el
== 0 && !((hcr
& HCR_E2H
) && (hcr
& HCR_TGE
))) {
8343 if (env
->cp15
.sctlr_el
[1] & SCTLR_TSCXT
) {
8344 if (hcr
& HCR_TGE
) {
8345 return CP_ACCESS_TRAP_EL2
;
8347 return CP_ACCESS_TRAP
;
8349 } else if (el
< 2 && (env
->cp15
.sctlr_el
[2] & SCTLR_TSCXT
)) {
8350 return CP_ACCESS_TRAP_EL2
;
8352 if (el
< 2 && arm_is_el2_enabled(env
) && !(hcr
& HCR_ENSCXT
)) {
8353 return CP_ACCESS_TRAP_EL2
;
8356 && arm_feature(env
, ARM_FEATURE_EL3
)
8357 && !(env
->cp15
.scr_el3
& SCR_ENSCXT
)) {
8358 return CP_ACCESS_TRAP_EL3
;
8360 return CP_ACCESS_OK
;
8363 static CPAccessResult
access_scxtnum_el1(CPUARMState
*env
,
8364 const ARMCPRegInfo
*ri
,
8367 CPAccessResult nv1
= access_nv1(env
, ri
, isread
);
8369 if (nv1
!= CP_ACCESS_OK
) {
8372 return access_scxtnum(env
, ri
, isread
);
8375 static const ARMCPRegInfo scxtnum_reginfo
[] = {
8376 { .name
= "SCXTNUM_EL0", .state
= ARM_CP_STATE_AA64
,
8377 .opc0
= 3, .opc1
= 3, .crn
= 13, .crm
= 0, .opc2
= 7,
8378 .access
= PL0_RW
, .accessfn
= access_scxtnum
,
8379 .fgt
= FGT_SCXTNUM_EL0
,
8380 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[0]) },
8381 { .name
= "SCXTNUM_EL1", .state
= ARM_CP_STATE_AA64
,
8382 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 7,
8383 .access
= PL1_RW
, .accessfn
= access_scxtnum_el1
,
8384 .fgt
= FGT_SCXTNUM_EL1
,
8385 .nv2_redirect_offset
= 0x188 | NV2_REDIR_NV1
,
8386 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[1]) },
8387 { .name
= "SCXTNUM_EL2", .state
= ARM_CP_STATE_AA64
,
8388 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 7,
8389 .access
= PL2_RW
, .accessfn
= access_scxtnum
,
8390 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[2]) },
8391 { .name
= "SCXTNUM_EL3", .state
= ARM_CP_STATE_AA64
,
8392 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 7,
8394 .fieldoffset
= offsetof(CPUARMState
, scxtnum_el
[3]) },
8397 static CPAccessResult
access_fgt(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8400 if (arm_current_el(env
) == 2 &&
8401 arm_feature(env
, ARM_FEATURE_EL3
) && !(env
->cp15
.scr_el3
& SCR_FGTEN
)) {
8402 return CP_ACCESS_TRAP_EL3
;
8404 return CP_ACCESS_OK
;
8407 static const ARMCPRegInfo fgt_reginfo
[] = {
8408 { .name
= "HFGRTR_EL2", .state
= ARM_CP_STATE_AA64
,
8409 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
8410 .nv2_redirect_offset
= 0x1b8,
8411 .access
= PL2_RW
, .accessfn
= access_fgt
,
8412 .fieldoffset
= offsetof(CPUARMState
, cp15
.fgt_read
[FGTREG_HFGRTR
]) },
8413 { .name
= "HFGWTR_EL2", .state
= ARM_CP_STATE_AA64
,
8414 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 5,
8415 .nv2_redirect_offset
= 0x1c0,
8416 .access
= PL2_RW
, .accessfn
= access_fgt
,
8417 .fieldoffset
= offsetof(CPUARMState
, cp15
.fgt_write
[FGTREG_HFGWTR
]) },
8418 { .name
= "HDFGRTR_EL2", .state
= ARM_CP_STATE_AA64
,
8419 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 1, .opc2
= 4,
8420 .nv2_redirect_offset
= 0x1d0,
8421 .access
= PL2_RW
, .accessfn
= access_fgt
,
8422 .fieldoffset
= offsetof(CPUARMState
, cp15
.fgt_read
[FGTREG_HDFGRTR
]) },
8423 { .name
= "HDFGWTR_EL2", .state
= ARM_CP_STATE_AA64
,
8424 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 1, .opc2
= 5,
8425 .nv2_redirect_offset
= 0x1d8,
8426 .access
= PL2_RW
, .accessfn
= access_fgt
,
8427 .fieldoffset
= offsetof(CPUARMState
, cp15
.fgt_write
[FGTREG_HDFGWTR
]) },
8428 { .name
= "HFGITR_EL2", .state
= ARM_CP_STATE_AA64
,
8429 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 6,
8430 .nv2_redirect_offset
= 0x1c8,
8431 .access
= PL2_RW
, .accessfn
= access_fgt
,
8432 .fieldoffset
= offsetof(CPUARMState
, cp15
.fgt_exec
[FGTREG_HFGITR
]) },
8435 static void vncr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8439 * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
8440 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
8441 * about the RESS bits at the top -- we choose the "generate an EL2
8442 * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
8443 * the ptw.c code detect the resulting invalid address).
8445 env
->cp15
.vncr_el2
= value
& ~0xfffULL
;
8448 static const ARMCPRegInfo nv2_reginfo
[] = {
8449 { .name
= "VNCR_EL2", .state
= ARM_CP_STATE_AA64
,
8450 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 2, .opc2
= 0,
8452 .writefn
= vncr_write
,
8453 .nv2_redirect_offset
= 0xb0,
8454 .fieldoffset
= offsetof(CPUARMState
, cp15
.vncr_el2
) },
8457 #endif /* TARGET_AARCH64 */
8459 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8462 int el
= arm_current_el(env
);
8465 uint64_t sctlr
= arm_sctlr(env
, el
);
8466 if (!(sctlr
& SCTLR_EnRCTX
)) {
8467 return CP_ACCESS_TRAP
;
8469 } else if (el
== 1) {
8470 uint64_t hcr
= arm_hcr_el2_eff(env
);
8472 return CP_ACCESS_TRAP_EL2
;
8475 return CP_ACCESS_OK
;
8478 static const ARMCPRegInfo predinv_reginfo
[] = {
8479 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
8480 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
8482 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
8483 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
8484 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
8486 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
8487 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
8488 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
8490 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
8492 * Note the AArch32 opcodes have a different OPC1.
8494 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
8495 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
8497 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
8498 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
8499 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
8501 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
8502 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
8503 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
8505 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
8508 static uint64_t ccsidr2_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
8510 /* Read the high 32 bits of the current CCSIDR */
8511 return extract64(ccsidr_read(env
, ri
), 32, 32);
8514 static const ARMCPRegInfo ccsidr2_reginfo
[] = {
8515 { .name
= "CCSIDR2", .state
= ARM_CP_STATE_BOTH
,
8516 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 2,
8518 .accessfn
= access_tid4
,
8519 .readfn
= ccsidr2_read
, .type
= ARM_CP_NO_RAW
},
8522 static CPAccessResult
access_aa64_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8525 if ((arm_current_el(env
) < 2) && (arm_hcr_el2_eff(env
) & HCR_TID3
)) {
8526 return CP_ACCESS_TRAP_EL2
;
8529 return CP_ACCESS_OK
;
8532 static CPAccessResult
access_aa32_tid3(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8535 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8536 return access_aa64_tid3(env
, ri
, isread
);
8539 return CP_ACCESS_OK
;
8542 static CPAccessResult
access_jazelle(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
8545 if (arm_current_el(env
) == 1 && (arm_hcr_el2_eff(env
) & HCR_TID0
)) {
8546 return CP_ACCESS_TRAP_EL2
;
8549 return CP_ACCESS_OK
;
8552 static CPAccessResult
access_joscr_jmcr(CPUARMState
*env
,
8553 const ARMCPRegInfo
*ri
, bool isread
)
8556 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
8557 * in v7A, not in v8A.
8559 if (!arm_feature(env
, ARM_FEATURE_V8
) &&
8560 arm_current_el(env
) < 2 && !arm_is_secure_below_el3(env
) &&
8561 (env
->cp15
.hstr_el2
& HSTR_TJDBX
)) {
8562 return CP_ACCESS_TRAP_EL2
;
8564 return CP_ACCESS_OK
;
8567 static const ARMCPRegInfo jazelle_regs
[] = {
8569 .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 7, .opc2
= 0,
8570 .access
= PL1_R
, .accessfn
= access_jazelle
,
8571 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8573 .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 7, .opc2
= 0,
8574 .accessfn
= access_joscr_jmcr
,
8575 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8577 .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 7, .opc2
= 0,
8578 .accessfn
= access_joscr_jmcr
,
8579 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8582 static const ARMCPRegInfo contextidr_el2
= {
8583 .name
= "CONTEXTIDR_EL2", .state
= ARM_CP_STATE_AA64
,
8584 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 1,
8586 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[2])
8589 static const ARMCPRegInfo vhe_reginfo
[] = {
8590 { .name
= "TTBR1_EL2", .state
= ARM_CP_STATE_AA64
,
8591 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 1,
8592 .access
= PL2_RW
, .writefn
= vmsa_tcr_ttbr_el2_write
,
8593 .raw_writefn
= raw_write
,
8594 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el
[2]) },
8595 #ifndef CONFIG_USER_ONLY
8596 { .name
= "CNTHV_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
8597 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 2,
8599 offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].cval
),
8600 .type
= ARM_CP_IO
, .access
= PL2_RW
,
8601 .writefn
= gt_hv_cval_write
, .raw_writefn
= raw_write
},
8602 { .name
= "CNTHV_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
8603 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 0,
8604 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
8605 .resetfn
= gt_hv_timer_reset
,
8606 .readfn
= gt_hv_tval_read
, .writefn
= gt_hv_tval_write
},
8607 { .name
= "CNTHV_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
8609 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 3, .opc2
= 1,
8611 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYPVIRT
].ctl
),
8612 .writefn
= gt_hv_ctl_write
, .raw_writefn
= raw_write
},
8613 { .name
= "CNTP_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
8614 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 1,
8615 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
8616 .access
= PL2_RW
, .accessfn
= access_el1nvpct
,
8617 .nv2_redirect_offset
= 0x180 | NV2_REDIR_NO_NV1
,
8618 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
8619 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
},
8620 { .name
= "CNTV_CTL_EL02", .state
= ARM_CP_STATE_AA64
,
8621 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 1,
8622 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
8623 .access
= PL2_RW
, .accessfn
= access_el1nvvct
,
8624 .nv2_redirect_offset
= 0x170 | NV2_REDIR_NO_NV1
,
8625 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
8626 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
},
8627 { .name
= "CNTP_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
8628 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 0,
8629 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
8630 .access
= PL2_RW
, .accessfn
= e2h_access
,
8631 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
},
8632 { .name
= "CNTV_TVAL_EL02", .state
= ARM_CP_STATE_AA64
,
8633 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 0,
8634 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
| ARM_CP_ALIAS
,
8635 .access
= PL2_RW
, .accessfn
= e2h_access
,
8636 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
},
8637 { .name
= "CNTP_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
8638 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 2, .opc2
= 2,
8639 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
8640 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
8641 .nv2_redirect_offset
= 0x178 | NV2_REDIR_NO_NV1
,
8642 .access
= PL2_RW
, .accessfn
= access_el1nvpct
,
8643 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
},
8644 { .name
= "CNTV_CVAL_EL02", .state
= ARM_CP_STATE_AA64
,
8645 .opc0
= 3, .opc1
= 5, .crn
= 14, .crm
= 3, .opc2
= 2,
8646 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
8647 .nv2_redirect_offset
= 0x168 | NV2_REDIR_NO_NV1
,
8648 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
8649 .access
= PL2_RW
, .accessfn
= access_el1nvvct
,
8650 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
},
8654 #ifndef CONFIG_USER_ONLY
8655 static const ARMCPRegInfo ats1e1_reginfo
[] = {
8656 { .name
= "AT_S1E1RP", .state
= ARM_CP_STATE_AA64
,
8657 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
8658 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
8659 .fgt
= FGT_ATS1E1RP
,
8660 .accessfn
= at_s1e01_access
, .writefn
= ats_write64
},
8661 { .name
= "AT_S1E1WP", .state
= ARM_CP_STATE_AA64
,
8662 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
8663 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
8664 .fgt
= FGT_ATS1E1WP
,
8665 .accessfn
= at_s1e01_access
, .writefn
= ats_write64
},
8668 static const ARMCPRegInfo ats1cp_reginfo
[] = {
8669 { .name
= "ATS1CPRP",
8670 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 0,
8671 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
8672 .writefn
= ats_write
},
8673 { .name
= "ATS1CPWP",
8674 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 9, .opc2
= 1,
8675 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
,
8676 .writefn
= ats_write
},
8681 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
8682 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
8683 * is non-zero, which is never for ARMv7, optionally in ARMv8
8684 * and mandatorily for ARMv8.2 and up.
8685 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
8686 * implementation is RAZ/WI we can ignore this detail, as we
8689 static const ARMCPRegInfo actlr2_hactlr2_reginfo
[] = {
8690 { .name
= "ACTLR2", .state
= ARM_CP_STATE_AA32
,
8691 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 3,
8692 .access
= PL1_RW
, .accessfn
= access_tacr
,
8693 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
8694 { .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
8695 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
8696 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
8700 void register_cp_regs_for_features(ARMCPU
*cpu
)
8702 /* Register all the coprocessor registers based on feature bits */
8703 CPUARMState
*env
= &cpu
->env
;
8704 if (arm_feature(env
, ARM_FEATURE_M
)) {
8705 /* M profile has no coprocessor registers */
8709 define_arm_cp_regs(cpu
, cp_reginfo
);
8710 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8712 * Must go early as it is full of wildcards that may be
8713 * overridden by later definitions.
8715 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
8718 if (arm_feature(env
, ARM_FEATURE_V6
)) {
8719 /* The ID registers all have impdef reset values */
8720 ARMCPRegInfo v6_idregs
[] = {
8721 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
8722 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
8723 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8724 .accessfn
= access_aa32_tid3
,
8725 .resetvalue
= cpu
->isar
.id_pfr0
},
8727 * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
8728 * the value of the GIC field until after we define these regs.
8730 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
8731 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
8732 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
8733 .accessfn
= access_aa32_tid3
,
8734 #ifdef CONFIG_USER_ONLY
8735 .type
= ARM_CP_CONST
,
8736 .resetvalue
= cpu
->isar
.id_pfr1
,
8738 .type
= ARM_CP_NO_RAW
,
8739 .accessfn
= access_aa32_tid3
,
8740 .readfn
= id_pfr1_read
,
8741 .writefn
= arm_cp_write_ignore
8744 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
8745 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
8746 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8747 .accessfn
= access_aa32_tid3
,
8748 .resetvalue
= cpu
->isar
.id_dfr0
},
8749 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
8750 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
8751 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8752 .accessfn
= access_aa32_tid3
,
8753 .resetvalue
= cpu
->id_afr0
},
8754 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
8755 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
8756 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8757 .accessfn
= access_aa32_tid3
,
8758 .resetvalue
= cpu
->isar
.id_mmfr0
},
8759 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
8760 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
8761 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8762 .accessfn
= access_aa32_tid3
,
8763 .resetvalue
= cpu
->isar
.id_mmfr1
},
8764 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
8765 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
8766 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8767 .accessfn
= access_aa32_tid3
,
8768 .resetvalue
= cpu
->isar
.id_mmfr2
},
8769 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
8770 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
8771 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8772 .accessfn
= access_aa32_tid3
,
8773 .resetvalue
= cpu
->isar
.id_mmfr3
},
8774 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
8775 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
8776 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8777 .accessfn
= access_aa32_tid3
,
8778 .resetvalue
= cpu
->isar
.id_isar0
},
8779 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
8780 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
8781 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8782 .accessfn
= access_aa32_tid3
,
8783 .resetvalue
= cpu
->isar
.id_isar1
},
8784 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
8785 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
8786 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8787 .accessfn
= access_aa32_tid3
,
8788 .resetvalue
= cpu
->isar
.id_isar2
},
8789 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
8790 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
8791 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8792 .accessfn
= access_aa32_tid3
,
8793 .resetvalue
= cpu
->isar
.id_isar3
},
8794 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
8795 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
8796 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8797 .accessfn
= access_aa32_tid3
,
8798 .resetvalue
= cpu
->isar
.id_isar4
},
8799 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
8800 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
8801 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8802 .accessfn
= access_aa32_tid3
,
8803 .resetvalue
= cpu
->isar
.id_isar5
},
8804 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
8805 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
8806 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8807 .accessfn
= access_aa32_tid3
,
8808 .resetvalue
= cpu
->isar
.id_mmfr4
},
8809 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
8810 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
8811 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8812 .accessfn
= access_aa32_tid3
,
8813 .resetvalue
= cpu
->isar
.id_isar6
},
8815 define_arm_cp_regs(cpu
, v6_idregs
);
8816 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
8818 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
8820 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
8821 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
8823 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
8824 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
8825 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
8827 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
8828 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
8830 if (arm_feature(env
, ARM_FEATURE_V7
)) {
8831 ARMCPRegInfo clidr
= {
8832 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
8833 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
8834 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8835 .accessfn
= access_tid4
,
8836 .fgt
= FGT_CLIDR_EL1
,
8837 .resetvalue
= cpu
->clidr
8839 define_one_arm_cp_reg(cpu
, &clidr
);
8840 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
8841 define_debug_regs(cpu
);
8842 define_pmu_regs(cpu
);
8844 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
8846 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8848 * v8 ID registers, which all have impdef reset values.
8849 * Note that within the ID register ranges the unused slots
8850 * must all RAZ, not UNDEF; future architecture versions may
8851 * define new registers here.
8852 * ID registers which are AArch64 views of the AArch32 ID registers
8853 * which already existed in v6 and v7 are handled elsewhere,
8857 ARMCPRegInfo v8_idregs
[] = {
8859 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
8860 * emulation because we don't know the right value for the
8861 * GIC field until after we define these regs.
8863 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8864 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
8866 #ifdef CONFIG_USER_ONLY
8867 .type
= ARM_CP_CONST
,
8868 .resetvalue
= cpu
->isar
.id_aa64pfr0
8870 .type
= ARM_CP_NO_RAW
,
8871 .accessfn
= access_aa64_tid3
,
8872 .readfn
= id_aa64pfr0_read
,
8873 .writefn
= arm_cp_write_ignore
8876 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
8877 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
8878 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8879 .accessfn
= access_aa64_tid3
,
8880 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
8881 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8882 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
8883 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8884 .accessfn
= access_aa64_tid3
,
8886 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8887 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
8888 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8889 .accessfn
= access_aa64_tid3
,
8891 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8892 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
8893 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8894 .accessfn
= access_aa64_tid3
,
8895 .resetvalue
= cpu
->isar
.id_aa64zfr0
},
8896 { .name
= "ID_AA64SMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8897 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
8898 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8899 .accessfn
= access_aa64_tid3
,
8900 .resetvalue
= cpu
->isar
.id_aa64smfr0
},
8901 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8902 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
8903 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8904 .accessfn
= access_aa64_tid3
,
8906 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8907 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
8908 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8909 .accessfn
= access_aa64_tid3
,
8911 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8912 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
8913 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8914 .accessfn
= access_aa64_tid3
,
8915 .resetvalue
= cpu
->isar
.id_aa64dfr0
},
8916 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
8917 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
8918 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8919 .accessfn
= access_aa64_tid3
,
8920 .resetvalue
= cpu
->isar
.id_aa64dfr1
},
8921 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8922 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
8923 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8924 .accessfn
= access_aa64_tid3
,
8926 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8927 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
8928 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8929 .accessfn
= access_aa64_tid3
,
8931 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8932 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
8933 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8934 .accessfn
= access_aa64_tid3
,
8935 .resetvalue
= cpu
->id_aa64afr0
},
8936 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
8937 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
8938 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8939 .accessfn
= access_aa64_tid3
,
8940 .resetvalue
= cpu
->id_aa64afr1
},
8941 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8942 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
8943 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8944 .accessfn
= access_aa64_tid3
,
8946 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8947 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
8948 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8949 .accessfn
= access_aa64_tid3
,
8951 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
8952 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
8953 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8954 .accessfn
= access_aa64_tid3
,
8955 .resetvalue
= cpu
->isar
.id_aa64isar0
},
8956 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
8957 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
8958 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8959 .accessfn
= access_aa64_tid3
,
8960 .resetvalue
= cpu
->isar
.id_aa64isar1
},
8961 { .name
= "ID_AA64ISAR2_EL1", .state
= ARM_CP_STATE_AA64
,
8962 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
8963 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8964 .accessfn
= access_aa64_tid3
,
8965 .resetvalue
= cpu
->isar
.id_aa64isar2
},
8966 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8967 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
8968 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8969 .accessfn
= access_aa64_tid3
,
8971 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8972 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
8973 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8974 .accessfn
= access_aa64_tid3
,
8976 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8977 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
8978 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8979 .accessfn
= access_aa64_tid3
,
8981 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8982 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
8983 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8984 .accessfn
= access_aa64_tid3
,
8986 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
8987 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
8988 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8989 .accessfn
= access_aa64_tid3
,
8991 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
8992 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
8993 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8994 .accessfn
= access_aa64_tid3
,
8995 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
8996 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
8997 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
8998 .access
= PL1_R
, .type
= ARM_CP_CONST
,
8999 .accessfn
= access_aa64_tid3
,
9000 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
9001 { .name
= "ID_AA64MMFR2_EL1", .state
= ARM_CP_STATE_AA64
,
9002 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
9003 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9004 .accessfn
= access_aa64_tid3
,
9005 .resetvalue
= cpu
->isar
.id_aa64mmfr2
},
9006 { .name
= "ID_AA64MMFR3_EL1", .state
= ARM_CP_STATE_AA64
,
9007 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
9008 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9009 .accessfn
= access_aa64_tid3
,
9010 .resetvalue
= cpu
->isar
.id_aa64mmfr3
},
9011 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
9012 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
9013 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9014 .accessfn
= access_aa64_tid3
,
9016 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
9017 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
9018 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9019 .accessfn
= access_aa64_tid3
,
9021 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
9022 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
9023 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9024 .accessfn
= access_aa64_tid3
,
9026 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
9027 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
9028 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9029 .accessfn
= access_aa64_tid3
,
9031 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
9032 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
9033 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9034 .accessfn
= access_aa64_tid3
,
9035 .resetvalue
= cpu
->isar
.mvfr0
},
9036 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
9037 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
9038 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9039 .accessfn
= access_aa64_tid3
,
9040 .resetvalue
= cpu
->isar
.mvfr1
},
9041 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
9042 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
9043 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9044 .accessfn
= access_aa64_tid3
,
9045 .resetvalue
= cpu
->isar
.mvfr2
},
9047 * "0, c0, c3, {0,1,2}" are the encodings corresponding to
9048 * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
9049 * as RAZ, since it is in the "reserved for future ID
9050 * registers, RAZ" part of the AArch32 encoding space.
9052 { .name
= "RES_0_C0_C3_0", .state
= ARM_CP_STATE_AA32
,
9053 .cp
= 15, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
9054 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9055 .accessfn
= access_aa64_tid3
,
9057 { .name
= "RES_0_C0_C3_1", .state
= ARM_CP_STATE_AA32
,
9058 .cp
= 15, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
9059 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9060 .accessfn
= access_aa64_tid3
,
9062 { .name
= "RES_0_C0_C3_2", .state
= ARM_CP_STATE_AA32
,
9063 .cp
= 15, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
9064 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9065 .accessfn
= access_aa64_tid3
,
9068 * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
9069 * they're also RAZ for AArch64, and in v8 are gradually
9070 * being filled with AArch64-view-of-AArch32-ID-register
9071 * for new ID registers.
9073 { .name
= "RES_0_C0_C3_3", .state
= ARM_CP_STATE_BOTH
,
9074 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
9075 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9076 .accessfn
= access_aa64_tid3
,
9078 { .name
= "ID_PFR2", .state
= ARM_CP_STATE_BOTH
,
9079 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
9080 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9081 .accessfn
= access_aa64_tid3
,
9082 .resetvalue
= cpu
->isar
.id_pfr2
},
9083 { .name
= "ID_DFR1", .state
= ARM_CP_STATE_BOTH
,
9084 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
9085 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9086 .accessfn
= access_aa64_tid3
,
9087 .resetvalue
= cpu
->isar
.id_dfr1
},
9088 { .name
= "ID_MMFR5", .state
= ARM_CP_STATE_BOTH
,
9089 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
9090 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9091 .accessfn
= access_aa64_tid3
,
9092 .resetvalue
= cpu
->isar
.id_mmfr5
},
9093 { .name
= "RES_0_C0_C3_7", .state
= ARM_CP_STATE_BOTH
,
9094 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
9095 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9096 .accessfn
= access_aa64_tid3
,
9098 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
9099 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
9100 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
9101 .fgt
= FGT_PMCEIDN_EL0
,
9102 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
9103 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
9104 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
9105 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
9106 .fgt
= FGT_PMCEIDN_EL0
,
9107 .resetvalue
= cpu
->pmceid0
},
9108 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
9109 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
9110 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
9111 .fgt
= FGT_PMCEIDN_EL0
,
9112 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
9113 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
9114 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
9115 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
9116 .fgt
= FGT_PMCEIDN_EL0
,
9117 .resetvalue
= cpu
->pmceid1
},
9119 #ifdef CONFIG_USER_ONLY
9120 static const ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
9121 { .name
= "ID_AA64PFR0_EL1",
9122 .exported_bits
= R_ID_AA64PFR0_FP_MASK
|
9123 R_ID_AA64PFR0_ADVSIMD_MASK
|
9124 R_ID_AA64PFR0_SVE_MASK
|
9125 R_ID_AA64PFR0_DIT_MASK
,
9126 .fixed_bits
= (0x1u
<< R_ID_AA64PFR0_EL0_SHIFT
) |
9127 (0x1u
<< R_ID_AA64PFR0_EL1_SHIFT
) },
9128 { .name
= "ID_AA64PFR1_EL1",
9129 .exported_bits
= R_ID_AA64PFR1_BT_MASK
|
9130 R_ID_AA64PFR1_SSBS_MASK
|
9131 R_ID_AA64PFR1_MTE_MASK
|
9132 R_ID_AA64PFR1_SME_MASK
},
9133 { .name
= "ID_AA64PFR*_EL1_RESERVED",
9135 { .name
= "ID_AA64ZFR0_EL1",
9136 .exported_bits
= R_ID_AA64ZFR0_SVEVER_MASK
|
9137 R_ID_AA64ZFR0_AES_MASK
|
9138 R_ID_AA64ZFR0_BITPERM_MASK
|
9139 R_ID_AA64ZFR0_BFLOAT16_MASK
|
9140 R_ID_AA64ZFR0_B16B16_MASK
|
9141 R_ID_AA64ZFR0_SHA3_MASK
|
9142 R_ID_AA64ZFR0_SM4_MASK
|
9143 R_ID_AA64ZFR0_I8MM_MASK
|
9144 R_ID_AA64ZFR0_F32MM_MASK
|
9145 R_ID_AA64ZFR0_F64MM_MASK
},
9146 { .name
= "ID_AA64SMFR0_EL1",
9147 .exported_bits
= R_ID_AA64SMFR0_F32F32_MASK
|
9148 R_ID_AA64SMFR0_BI32I32_MASK
|
9149 R_ID_AA64SMFR0_B16F32_MASK
|
9150 R_ID_AA64SMFR0_F16F32_MASK
|
9151 R_ID_AA64SMFR0_I8I32_MASK
|
9152 R_ID_AA64SMFR0_F16F16_MASK
|
9153 R_ID_AA64SMFR0_B16B16_MASK
|
9154 R_ID_AA64SMFR0_I16I32_MASK
|
9155 R_ID_AA64SMFR0_F64F64_MASK
|
9156 R_ID_AA64SMFR0_I16I64_MASK
|
9157 R_ID_AA64SMFR0_SMEVER_MASK
|
9158 R_ID_AA64SMFR0_FA64_MASK
},
9159 { .name
= "ID_AA64MMFR0_EL1",
9160 .exported_bits
= R_ID_AA64MMFR0_ECV_MASK
,
9161 .fixed_bits
= (0xfu
<< R_ID_AA64MMFR0_TGRAN64_SHIFT
) |
9162 (0xfu
<< R_ID_AA64MMFR0_TGRAN4_SHIFT
) },
9163 { .name
= "ID_AA64MMFR1_EL1",
9164 .exported_bits
= R_ID_AA64MMFR1_AFP_MASK
},
9165 { .name
= "ID_AA64MMFR2_EL1",
9166 .exported_bits
= R_ID_AA64MMFR2_AT_MASK
},
9167 { .name
= "ID_AA64MMFR3_EL1",
9168 .exported_bits
= 0 },
9169 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
9171 { .name
= "ID_AA64DFR0_EL1",
9172 .fixed_bits
= (0x6u
<< R_ID_AA64DFR0_DEBUGVER_SHIFT
) },
9173 { .name
= "ID_AA64DFR1_EL1" },
9174 { .name
= "ID_AA64DFR*_EL1_RESERVED",
9176 { .name
= "ID_AA64AFR*",
9178 { .name
= "ID_AA64ISAR0_EL1",
9179 .exported_bits
= R_ID_AA64ISAR0_AES_MASK
|
9180 R_ID_AA64ISAR0_SHA1_MASK
|
9181 R_ID_AA64ISAR0_SHA2_MASK
|
9182 R_ID_AA64ISAR0_CRC32_MASK
|
9183 R_ID_AA64ISAR0_ATOMIC_MASK
|
9184 R_ID_AA64ISAR0_RDM_MASK
|
9185 R_ID_AA64ISAR0_SHA3_MASK
|
9186 R_ID_AA64ISAR0_SM3_MASK
|
9187 R_ID_AA64ISAR0_SM4_MASK
|
9188 R_ID_AA64ISAR0_DP_MASK
|
9189 R_ID_AA64ISAR0_FHM_MASK
|
9190 R_ID_AA64ISAR0_TS_MASK
|
9191 R_ID_AA64ISAR0_RNDR_MASK
},
9192 { .name
= "ID_AA64ISAR1_EL1",
9193 .exported_bits
= R_ID_AA64ISAR1_DPB_MASK
|
9194 R_ID_AA64ISAR1_APA_MASK
|
9195 R_ID_AA64ISAR1_API_MASK
|
9196 R_ID_AA64ISAR1_JSCVT_MASK
|
9197 R_ID_AA64ISAR1_FCMA_MASK
|
9198 R_ID_AA64ISAR1_LRCPC_MASK
|
9199 R_ID_AA64ISAR1_GPA_MASK
|
9200 R_ID_AA64ISAR1_GPI_MASK
|
9201 R_ID_AA64ISAR1_FRINTTS_MASK
|
9202 R_ID_AA64ISAR1_SB_MASK
|
9203 R_ID_AA64ISAR1_BF16_MASK
|
9204 R_ID_AA64ISAR1_DGH_MASK
|
9205 R_ID_AA64ISAR1_I8MM_MASK
},
9206 { .name
= "ID_AA64ISAR2_EL1",
9207 .exported_bits
= R_ID_AA64ISAR2_WFXT_MASK
|
9208 R_ID_AA64ISAR2_RPRES_MASK
|
9209 R_ID_AA64ISAR2_GPA3_MASK
|
9210 R_ID_AA64ISAR2_APA3_MASK
|
9211 R_ID_AA64ISAR2_MOPS_MASK
|
9212 R_ID_AA64ISAR2_BC_MASK
|
9213 R_ID_AA64ISAR2_RPRFM_MASK
|
9214 R_ID_AA64ISAR2_CSSC_MASK
},
9215 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
9218 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
9221 * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
9222 * TODO: For RMR, a write with bit 1 set should do something with
9223 * cpu_reset(). In the meantime, "the bit is strictly a request",
9224 * so we are in spec just ignoring writes.
9226 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
9227 !arm_feature(env
, ARM_FEATURE_EL2
)) {
9228 ARMCPRegInfo el1_reset_regs
[] = {
9229 { .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_BOTH
,
9230 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
9232 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
) },
9233 { .name
= "RMR_EL1", .state
= ARM_CP_STATE_BOTH
,
9234 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 2,
9235 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
9236 .resetvalue
= arm_feature(env
, ARM_FEATURE_AARCH64
) }
9238 define_arm_cp_regs(cpu
, el1_reset_regs
);
9240 define_arm_cp_regs(cpu
, v8_idregs
);
9241 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
9242 if (cpu_isar_feature(aa64_aa32_el1
, cpu
)) {
9243 define_arm_cp_regs(cpu
, v8_aa32_el1_reginfo
);
9246 for (i
= 4; i
< 16; i
++) {
9248 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
9249 * For pre-v8 cores there are RAZ patterns for these in
9250 * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
9251 * v8 extends the "must RAZ" part of the ID register space
9252 * to also cover c0, 0, c{8-15}, {0-7}.
9253 * These are STATE_AA32 because in the AArch64 sysreg space
9254 * c4-c7 is where the AArch64 ID registers live (and we've
9255 * already defined those in v8_idregs[]), and c8-c15 are not
9256 * "must RAZ" for AArch64.
9258 g_autofree
char *name
= g_strdup_printf("RES_0_C0_C%d_X", i
);
9259 ARMCPRegInfo v8_aa32_raz_idregs
= {
9261 .state
= ARM_CP_STATE_AA32
,
9262 .cp
= 15, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= CP_ANY
,
9263 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9264 .accessfn
= access_aa64_tid3
,
9266 define_one_arm_cp_reg(cpu
, &v8_aa32_raz_idregs
);
9271 * Register the base EL2 cpregs.
9272 * Pre v8, these registers are implemented only as part of the
9273 * Virtualization Extensions (EL2 present). Beginning with v8,
9274 * if EL2 is missing but EL3 is enabled, mostly these become
9275 * RES0 from EL3, with some specific exceptions.
9277 if (arm_feature(env
, ARM_FEATURE_EL2
)
9278 || (arm_feature(env
, ARM_FEATURE_EL3
)
9279 && arm_feature(env
, ARM_FEATURE_V8
))) {
9280 uint64_t vmpidr_def
= mpidr_read_val(env
);
9281 ARMCPRegInfo vpidr_regs
[] = {
9282 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
9283 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
9284 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
9285 .resetvalue
= cpu
->midr
,
9286 .type
= ARM_CP_ALIAS
| ARM_CP_EL3_NO_EL2_C_NZ
,
9287 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
9288 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
9289 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
9290 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
9291 .type
= ARM_CP_EL3_NO_EL2_C_NZ
,
9292 .nv2_redirect_offset
= 0x88,
9293 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
9294 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
9295 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
9296 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
9297 .resetvalue
= vmpidr_def
,
9298 .type
= ARM_CP_ALIAS
| ARM_CP_EL3_NO_EL2_C_NZ
,
9299 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
9300 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
9301 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
9302 .access
= PL2_RW
, .resetvalue
= vmpidr_def
,
9303 .type
= ARM_CP_EL3_NO_EL2_C_NZ
,
9304 .nv2_redirect_offset
= 0x50,
9305 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
9308 * The only field of MDCR_EL2 that has a defined architectural reset
9309 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
9311 ARMCPRegInfo mdcr_el2
= {
9312 .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
, .type
= ARM_CP_IO
,
9313 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
9314 .writefn
= mdcr_el2_write
,
9315 .access
= PL2_RW
, .resetvalue
= pmu_num_counters(env
),
9316 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
),
9318 define_one_arm_cp_reg(cpu
, &mdcr_el2
);
9319 define_arm_cp_regs(cpu
, vpidr_regs
);
9320 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
9321 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9322 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
9324 if (cpu_isar_feature(aa64_sel2
, cpu
)) {
9325 define_arm_cp_regs(cpu
, el2_sec_cp_reginfo
);
9328 * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
9329 * See commentary near RMR_EL1.
9331 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
9332 static const ARMCPRegInfo el2_reset_regs
[] = {
9333 { .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
9334 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
9336 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
) },
9337 { .name
= "RVBAR", .type
= ARM_CP_ALIAS
,
9338 .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
9340 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
) },
9341 { .name
= "RMR_EL2", .state
= ARM_CP_STATE_AA64
,
9342 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 2,
9343 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 1 },
9345 define_arm_cp_regs(cpu
, el2_reset_regs
);
9349 /* Register the base EL3 cpregs. */
9350 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
9351 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
9352 ARMCPRegInfo el3_regs
[] = {
9353 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
9354 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
9356 .fieldoffset
= offsetof(CPUARMState
, cp15
.rvbar
), },
9357 { .name
= "RMR_EL3", .state
= ARM_CP_STATE_AA64
,
9358 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 2,
9359 .access
= PL3_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 1 },
9360 { .name
= "RMR", .state
= ARM_CP_STATE_AA32
,
9361 .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 2,
9362 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
9363 .resetvalue
= arm_feature(env
, ARM_FEATURE_AARCH64
) },
9364 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
9365 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
9367 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
9368 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
9369 .resetvalue
= cpu
->reset_sctlr
},
9372 define_arm_cp_regs(cpu
, el3_regs
);
9375 * The behaviour of NSACR is sufficiently various that we don't
9376 * try to describe it in a single reginfo:
9377 * if EL3 is 64 bit, then trap to EL3 from S EL1,
9378 * reads as constant 0xc00 from NS EL1 and NS EL2
9379 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
9380 * if v7 without EL3, register doesn't exist
9381 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
9383 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
9384 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
9385 static const ARMCPRegInfo nsacr
= {
9386 .name
= "NSACR", .type
= ARM_CP_CONST
,
9387 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
9388 .access
= PL1_RW
, .accessfn
= nsacr_access
,
9391 define_one_arm_cp_reg(cpu
, &nsacr
);
9393 static const ARMCPRegInfo nsacr
= {
9395 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
9396 .access
= PL3_RW
| PL1_R
,
9398 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
9400 define_one_arm_cp_reg(cpu
, &nsacr
);
9403 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9404 static const ARMCPRegInfo nsacr
= {
9405 .name
= "NSACR", .type
= ARM_CP_CONST
,
9406 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
9410 define_one_arm_cp_reg(cpu
, &nsacr
);
9414 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
9415 if (arm_feature(env
, ARM_FEATURE_V6
)) {
9416 /* PMSAv6 not implemented */
9417 assert(arm_feature(env
, ARM_FEATURE_V7
));
9418 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
9419 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
9421 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
9424 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
9425 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
9426 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
9427 if (cpu_isar_feature(aa32_hpd
, cpu
)) {
9428 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
9431 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
9432 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
9434 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
9435 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
9437 if (cpu_isar_feature(aa64_ecv_traps
, cpu
)) {
9438 define_arm_cp_regs(cpu
, gen_timer_ecv_cp_reginfo
);
9440 #ifndef CONFIG_USER_ONLY
9441 if (cpu_isar_feature(aa64_ecv
, cpu
)) {
9442 define_one_arm_cp_reg(cpu
, &gen_timer_cntpoff_reginfo
);
9445 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
9446 ARMCPRegInfo vapa_cp_reginfo
[] = {
9447 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
9448 .access
= PL1_RW
, .resetvalue
= 0,
9449 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
9450 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
9451 .writefn
= par_write
},
9452 #ifndef CONFIG_USER_ONLY
9453 /* This underdecoding is safe because the reginfo is NO_RAW. */
9454 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
9455 .access
= PL1_W
, .accessfn
= ats_access
,
9456 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
| ARM_CP_RAISES_EXC
},
9461 * When LPAE exists this 32-bit PAR register is an alias of the
9462 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
9464 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
9465 vapa_cp_reginfo
[0].type
= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
9467 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
9469 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
9470 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
9472 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
9473 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
9475 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
9476 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
9478 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
9479 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
9481 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
9482 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
9484 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
9485 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
9487 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
9488 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
9490 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
9491 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
9493 if (cpu_isar_feature(aa32_jazelle
, cpu
)) {
9494 define_arm_cp_regs(cpu
, jazelle_regs
);
9497 * Slightly awkwardly, the OMAP and StrongARM cores need all of
9498 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
9499 * be read-only (ie write causes UNDEF exception).
9502 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
9504 * Pre-v8 MIDR space.
9505 * Note that the MIDR isn't a simple constant register because
9506 * of the TI925 behaviour where writes to another register can
9507 * cause the MIDR value to change.
9509 * Unimplemented registers in the c15 0 0 0 space default to
9510 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
9511 * and friends override accordingly.
9514 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
9515 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
9516 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
9517 .readfn
= midr_read
,
9518 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
9519 .type
= ARM_CP_OVERRIDE
},
9520 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
9522 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
9523 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
9525 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
9526 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
9528 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
9529 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
9531 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
9532 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
9534 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
9535 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
9537 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
9538 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
9539 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
9540 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
9541 .fgt
= FGT_MIDR_EL1
,
9542 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
9543 .readfn
= midr_read
},
9544 /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
9545 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
9546 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
9547 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
9548 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
9549 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
9551 .accessfn
= access_aa64_tid1
,
9552 .fgt
= FGT_REVIDR_EL1
,
9553 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
9555 ARMCPRegInfo id_v8_midr_alias_cp_reginfo
= {
9556 .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
| ARM_CP_NO_GDB
,
9557 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
9558 .access
= PL1_R
, .resetvalue
= cpu
->midr
9560 ARMCPRegInfo id_cp_reginfo
[] = {
9561 /* These are common to v8 and pre-v8 */
9563 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
9564 .access
= PL1_R
, .accessfn
= ctr_el0_access
,
9565 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
9566 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
9567 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
9568 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
9570 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
9571 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
9573 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
9575 .accessfn
= access_aa32_tid1
,
9576 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
9578 /* TLBTR is specific to VMSA */
9579 ARMCPRegInfo id_tlbtr_reginfo
= {
9581 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
9583 .accessfn
= access_aa32_tid1
,
9584 .type
= ARM_CP_CONST
, .resetvalue
= 0,
9586 /* MPUIR is specific to PMSA V6+ */
9587 ARMCPRegInfo id_mpuir_reginfo
= {
9589 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
9590 .access
= PL1_R
, .type
= ARM_CP_CONST
,
9591 .resetvalue
= cpu
->pmsav7_dregion
<< 8
9593 /* HMPUIR is specific to PMSA V8 */
9594 ARMCPRegInfo id_hmpuir_reginfo
= {
9596 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 4,
9597 .access
= PL2_R
, .type
= ARM_CP_CONST
,
9598 .resetvalue
= cpu
->pmsav8r_hdregion
9600 static const ARMCPRegInfo crn0_wi_reginfo
= {
9601 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
9602 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
9603 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
9605 #ifdef CONFIG_USER_ONLY
9606 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
9607 { .name
= "MIDR_EL1",
9608 .exported_bits
= R_MIDR_EL1_REVISION_MASK
|
9609 R_MIDR_EL1_PARTNUM_MASK
|
9610 R_MIDR_EL1_ARCHITECTURE_MASK
|
9611 R_MIDR_EL1_VARIANT_MASK
|
9612 R_MIDR_EL1_IMPLEMENTER_MASK
},
9613 { .name
= "REVIDR_EL1" },
9615 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
9617 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
9618 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
9621 * Register the blanket "writes ignored" value first to cover the
9622 * whole space. Then update the specific ID registers to allow write
9623 * access, so that they ignore writes rather than causing them to
9626 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
9627 for (i
= 0; i
< ARRAY_SIZE(id_pre_v8_midr_cp_reginfo
); ++i
) {
9628 id_pre_v8_midr_cp_reginfo
[i
].access
= PL1_RW
;
9630 for (i
= 0; i
< ARRAY_SIZE(id_cp_reginfo
); ++i
) {
9631 id_cp_reginfo
[i
].access
= PL1_RW
;
9633 id_mpuir_reginfo
.access
= PL1_RW
;
9634 id_tlbtr_reginfo
.access
= PL1_RW
;
9636 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9637 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
9638 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
9639 define_one_arm_cp_reg(cpu
, &id_v8_midr_alias_cp_reginfo
);
9642 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
9644 define_arm_cp_regs(cpu
, id_cp_reginfo
);
9645 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
9646 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
9647 } else if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
9648 arm_feature(env
, ARM_FEATURE_V8
)) {
9652 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
9653 define_one_arm_cp_reg(cpu
, &id_hmpuir_reginfo
);
9654 define_arm_cp_regs(cpu
, pmsav8r_cp_reginfo
);
9656 /* Register alias is only valid for first 32 indexes */
9657 for (i
= 0; i
< MIN(cpu
->pmsav7_dregion
, 32); ++i
) {
9658 uint8_t crm
= 0b1000 | extract32(i
, 1, 3);
9659 uint8_t opc1
= extract32(i
, 4, 1);
9660 uint8_t opc2
= extract32(i
, 0, 1) << 2;
9662 tmp_string
= g_strdup_printf("PRBAR%u", i
);
9663 ARMCPRegInfo tmp_prbarn_reginfo
= {
9664 .name
= tmp_string
, .type
= ARM_CP_ALIAS
| ARM_CP_NO_RAW
,
9665 .cp
= 15, .opc1
= opc1
, .crn
= 6, .crm
= crm
, .opc2
= opc2
,
9666 .access
= PL1_RW
, .resetvalue
= 0,
9667 .accessfn
= access_tvm_trvm
,
9668 .writefn
= pmsav8r_regn_write
, .readfn
= pmsav8r_regn_read
9670 define_one_arm_cp_reg(cpu
, &tmp_prbarn_reginfo
);
9673 opc2
= extract32(i
, 0, 1) << 2 | 0x1;
9674 tmp_string
= g_strdup_printf("PRLAR%u", i
);
9675 ARMCPRegInfo tmp_prlarn_reginfo
= {
9676 .name
= tmp_string
, .type
= ARM_CP_ALIAS
| ARM_CP_NO_RAW
,
9677 .cp
= 15, .opc1
= opc1
, .crn
= 6, .crm
= crm
, .opc2
= opc2
,
9678 .access
= PL1_RW
, .resetvalue
= 0,
9679 .accessfn
= access_tvm_trvm
,
9680 .writefn
= pmsav8r_regn_write
, .readfn
= pmsav8r_regn_read
9682 define_one_arm_cp_reg(cpu
, &tmp_prlarn_reginfo
);
9686 /* Register alias is only valid for first 32 indexes */
9687 for (i
= 0; i
< MIN(cpu
->pmsav8r_hdregion
, 32); ++i
) {
9688 uint8_t crm
= 0b1000 | extract32(i
, 1, 3);
9689 uint8_t opc1
= 0b100 | extract32(i
, 4, 1);
9690 uint8_t opc2
= extract32(i
, 0, 1) << 2;
9692 tmp_string
= g_strdup_printf("HPRBAR%u", i
);
9693 ARMCPRegInfo tmp_hprbarn_reginfo
= {
9695 .type
= ARM_CP_NO_RAW
,
9696 .cp
= 15, .opc1
= opc1
, .crn
= 6, .crm
= crm
, .opc2
= opc2
,
9697 .access
= PL2_RW
, .resetvalue
= 0,
9698 .writefn
= pmsav8r_regn_write
, .readfn
= pmsav8r_regn_read
9700 define_one_arm_cp_reg(cpu
, &tmp_hprbarn_reginfo
);
9703 opc2
= extract32(i
, 0, 1) << 2 | 0x1;
9704 tmp_string
= g_strdup_printf("HPRLAR%u", i
);
9705 ARMCPRegInfo tmp_hprlarn_reginfo
= {
9707 .type
= ARM_CP_NO_RAW
,
9708 .cp
= 15, .opc1
= opc1
, .crn
= 6, .crm
= crm
, .opc2
= opc2
,
9709 .access
= PL2_RW
, .resetvalue
= 0,
9710 .writefn
= pmsav8r_regn_write
, .readfn
= pmsav8r_regn_read
9712 define_one_arm_cp_reg(cpu
, &tmp_hprlarn_reginfo
);
9715 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
9716 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
9720 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
9721 ARMCPRegInfo mpidr_cp_reginfo
[] = {
9722 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
9723 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
9724 .fgt
= FGT_MPIDR_EL1
,
9725 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
9727 #ifdef CONFIG_USER_ONLY
9728 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
9729 { .name
= "MPIDR_EL1",
9730 .fixed_bits
= 0x0000000080000000 },
9732 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
9734 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
9737 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
9738 ARMCPRegInfo auxcr_reginfo
[] = {
9739 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
9740 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
9741 .access
= PL1_RW
, .accessfn
= access_tacr
,
9742 .nv2_redirect_offset
= 0x118,
9743 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->reset_auxcr
},
9744 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
9745 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
9746 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
9748 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
9749 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
9750 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
9753 define_arm_cp_regs(cpu
, auxcr_reginfo
);
9754 if (cpu_isar_feature(aa32_ac2
, cpu
)) {
9755 define_arm_cp_regs(cpu
, actlr2_hactlr2_reginfo
);
9759 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
9761 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
9762 * There are two flavours:
9763 * (1) older 32-bit only cores have a simple 32-bit CBAR
9764 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
9765 * 32-bit register visible to AArch32 at a different encoding
9766 * to the "flavour 1" register and with the bits rearranged to
9767 * be able to squash a 64-bit address into the 32-bit view.
9768 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
9769 * in future if we support AArch32-only configs of some of the
9770 * AArch64 cores we might need to add a specific feature flag
9771 * to indicate cores with "flavour 2" CBAR.
9773 if (arm_feature(env
, ARM_FEATURE_V8
)) {
9774 /* 32 bit view is [31:18] 0...0 [43:32]. */
9775 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
9776 | extract64(cpu
->reset_cbar
, 32, 12);
9777 ARMCPRegInfo cbar_reginfo
[] = {
9779 .type
= ARM_CP_CONST
,
9780 .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 1, .opc2
= 0,
9781 .access
= PL1_R
, .resetvalue
= cbar32
},
9782 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
9783 .type
= ARM_CP_CONST
,
9784 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
9785 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
9787 /* We don't implement a r/w 64 bit CBAR currently */
9788 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
9789 define_arm_cp_regs(cpu
, cbar_reginfo
);
9791 ARMCPRegInfo cbar
= {
9793 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
9794 .access
= PL1_R
| PL3_W
, .resetvalue
= cpu
->reset_cbar
,
9795 .fieldoffset
= offsetof(CPUARMState
,
9796 cp15
.c15_config_base_address
)
9798 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
9799 cbar
.access
= PL1_R
;
9800 cbar
.fieldoffset
= 0;
9801 cbar
.type
= ARM_CP_CONST
;
9803 define_one_arm_cp_reg(cpu
, &cbar
);
9807 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
9808 static const ARMCPRegInfo vbar_cp_reginfo
[] = {
9809 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
9810 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
9811 .access
= PL1_RW
, .writefn
= vbar_write
,
9812 .accessfn
= access_nv1
,
9813 .fgt
= FGT_VBAR_EL1
,
9814 .nv2_redirect_offset
= 0x250 | NV2_REDIR_NV1
,
9815 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
9816 offsetof(CPUARMState
, cp15
.vbar_ns
) },
9819 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
9822 /* Generic registers whose values depend on the implementation */
9824 ARMCPRegInfo sctlr
= {
9825 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
9826 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
9827 .access
= PL1_RW
, .accessfn
= access_tvm_trvm
,
9828 .fgt
= FGT_SCTLR_EL1
,
9829 .nv2_redirect_offset
= 0x110 | NV2_REDIR_NV1
,
9830 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
9831 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
9832 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
9833 .raw_writefn
= raw_write
,
9835 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
9837 * Normally we would always end the TB on an SCTLR write, but Linux
9838 * arch/arm/mach-pxa/sleep.S expects two instructions following
9839 * an MMU enable to execute from cache. Imitate this behaviour.
9841 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
9843 define_one_arm_cp_reg(cpu
, &sctlr
);
9845 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
9846 arm_feature(env
, ARM_FEATURE_V8
)) {
9847 ARMCPRegInfo vsctlr
= {
9848 .name
= "VSCTLR", .state
= ARM_CP_STATE_AA32
,
9849 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
9850 .access
= PL2_RW
, .resetvalue
= 0x0,
9851 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vsctlr
),
9853 define_one_arm_cp_reg(cpu
, &vsctlr
);
9857 if (cpu_isar_feature(aa64_lor
, cpu
)) {
9858 define_arm_cp_regs(cpu
, lor_reginfo
);
9860 if (cpu_isar_feature(aa64_pan
, cpu
)) {
9861 define_one_arm_cp_reg(cpu
, &pan_reginfo
);
9863 #ifndef CONFIG_USER_ONLY
9864 if (cpu_isar_feature(aa64_ats1e1
, cpu
)) {
9865 define_arm_cp_regs(cpu
, ats1e1_reginfo
);
9867 if (cpu_isar_feature(aa32_ats1e1
, cpu
)) {
9868 define_arm_cp_regs(cpu
, ats1cp_reginfo
);
9871 if (cpu_isar_feature(aa64_uao
, cpu
)) {
9872 define_one_arm_cp_reg(cpu
, &uao_reginfo
);
9875 if (cpu_isar_feature(aa64_dit
, cpu
)) {
9876 define_one_arm_cp_reg(cpu
, &dit_reginfo
);
9878 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
9879 define_one_arm_cp_reg(cpu
, &ssbs_reginfo
);
9881 if (cpu_isar_feature(any_ras
, cpu
)) {
9882 define_arm_cp_regs(cpu
, minimal_ras_reginfo
);
9885 if (cpu_isar_feature(aa64_vh
, cpu
) ||
9886 cpu_isar_feature(aa64_debugv8p2
, cpu
)) {
9887 define_one_arm_cp_reg(cpu
, &contextidr_el2
);
9889 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
9890 define_arm_cp_regs(cpu
, vhe_reginfo
);
9893 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9894 define_arm_cp_regs(cpu
, zcr_reginfo
);
9897 if (cpu_isar_feature(aa64_hcx
, cpu
)) {
9898 define_one_arm_cp_reg(cpu
, &hcrx_el2_reginfo
);
9901 #ifdef TARGET_AARCH64
9902 if (cpu_isar_feature(aa64_sme
, cpu
)) {
9903 define_arm_cp_regs(cpu
, sme_reginfo
);
9905 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
9906 define_arm_cp_regs(cpu
, pauth_reginfo
);
9908 if (cpu_isar_feature(aa64_rndr
, cpu
)) {
9909 define_arm_cp_regs(cpu
, rndr_reginfo
);
9911 if (cpu_isar_feature(aa64_tlbirange
, cpu
)) {
9912 define_arm_cp_regs(cpu
, tlbirange_reginfo
);
9914 if (cpu_isar_feature(aa64_tlbios
, cpu
)) {
9915 define_arm_cp_regs(cpu
, tlbios_reginfo
);
9917 /* Data Cache clean instructions up to PoP */
9918 if (cpu_isar_feature(aa64_dcpop
, cpu
)) {
9919 define_one_arm_cp_reg(cpu
, dcpop_reg
);
9921 if (cpu_isar_feature(aa64_dcpodp
, cpu
)) {
9922 define_one_arm_cp_reg(cpu
, dcpodp_reg
);
9927 * If full MTE is enabled, add all of the system registers.
9928 * If only "instructions available at EL0" are enabled,
9929 * then define only a RAZ/WI version of PSTATE.TCO.
9931 if (cpu_isar_feature(aa64_mte
, cpu
)) {
9932 ARMCPRegInfo gmid_reginfo
= {
9933 .name
= "GMID_EL1", .state
= ARM_CP_STATE_AA64
,
9934 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 4,
9935 .access
= PL1_R
, .accessfn
= access_aa64_tid5
,
9936 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->gm_blocksize
,
9938 define_one_arm_cp_reg(cpu
, &gmid_reginfo
);
9939 define_arm_cp_regs(cpu
, mte_reginfo
);
9940 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
9941 } else if (cpu_isar_feature(aa64_mte_insn_reg
, cpu
)) {
9942 define_arm_cp_regs(cpu
, mte_tco_ro_reginfo
);
9943 define_arm_cp_regs(cpu
, mte_el0_cacheop_reginfo
);
9946 if (cpu_isar_feature(aa64_scxtnum
, cpu
)) {
9947 define_arm_cp_regs(cpu
, scxtnum_reginfo
);
9950 if (cpu_isar_feature(aa64_fgt
, cpu
)) {
9951 define_arm_cp_regs(cpu
, fgt_reginfo
);
9954 if (cpu_isar_feature(aa64_rme
, cpu
)) {
9955 define_arm_cp_regs(cpu
, rme_reginfo
);
9956 if (cpu_isar_feature(aa64_mte
, cpu
)) {
9957 define_arm_cp_regs(cpu
, rme_mte_reginfo
);
9961 if (cpu_isar_feature(aa64_nv2
, cpu
)) {
9962 define_arm_cp_regs(cpu
, nv2_reginfo
);
9965 if (cpu_isar_feature(aa64_nmi
, cpu
)) {
9966 define_arm_cp_regs(cpu
, nmi_reginfo
);
9970 if (cpu_isar_feature(any_predinv
, cpu
)) {
9971 define_arm_cp_regs(cpu
, predinv_reginfo
);
9974 if (cpu_isar_feature(any_ccidx
, cpu
)) {
9975 define_arm_cp_regs(cpu
, ccsidr2_reginfo
);
9978 #ifndef CONFIG_USER_ONLY
9980 * Register redirections and aliases must be done last,
9981 * after the registers from the other extensions have been defined.
9983 if (arm_feature(env
, ARM_FEATURE_EL2
) && cpu_isar_feature(aa64_vh
, cpu
)) {
9984 define_arm_vh_e2h_redirects_aliases(cpu
);
9990 * Private utility function for define_one_arm_cp_reg_with_opaque():
9991 * add a single reginfo struct to the hash table.
9993 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
9994 void *opaque
, CPState state
,
9995 CPSecureState secstate
,
9996 int crm
, int opc1
, int opc2
,
9999 CPUARMState
*env
= &cpu
->env
;
10002 bool is64
= r
->type
& ARM_CP_64BIT
;
10003 bool ns
= secstate
& ARM_CP_SECSTATE_NS
;
10009 case ARM_CP_STATE_AA32
:
10010 /* We assume it is a cp15 register if the .cp field is left unset. */
10011 if (cp
== 0 && r
->state
== ARM_CP_STATE_BOTH
) {
10014 key
= ENCODE_CP_REG(cp
, is64
, ns
, r
->crn
, crm
, opc1
, opc2
);
10016 case ARM_CP_STATE_AA64
:
10018 * To allow abbreviation of ARMCPRegInfo definitions, we treat
10019 * cp == 0 as equivalent to the value for "standard guest-visible
10020 * sysreg". STATE_BOTH definitions are also always "standard sysreg"
10021 * in their AArch64 view (the .cp value may be non-zero for the
10022 * benefit of the AArch32 view).
10024 if (cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
10025 cp
= CP_REG_ARM64_SYSREG_CP
;
10027 key
= ENCODE_AA64_CP_REG(cp
, r
->crn
, crm
, r
->opc0
, opc1
, opc2
);
10030 g_assert_not_reached();
10033 /* Overriding of an existing definition must be explicitly requested. */
10034 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
10035 const ARMCPRegInfo
*oldreg
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
10037 assert(oldreg
->type
& ARM_CP_OVERRIDE
);
10042 * Eliminate registers that are not present because the EL is missing.
10043 * Doing this here makes it easier to put all registers for a given
10044 * feature into the same ARMCPRegInfo array and define them all at once.
10046 make_const
= false;
10047 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
10049 * An EL2 register without EL2 but with EL3 is (usually) RES0.
10050 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
10052 int min_el
= ctz32(r
->access
) / 2;
10053 if (min_el
== 2 && !arm_feature(env
, ARM_FEATURE_EL2
)) {
10054 if (r
->type
& ARM_CP_EL3_NO_EL2_UNDEF
) {
10057 make_const
= !(r
->type
& ARM_CP_EL3_NO_EL2_KEEP
);
10060 CPAccessRights max_el
= (arm_feature(env
, ARM_FEATURE_EL2
)
10061 ? PL2_RW
: PL1_RW
);
10062 if ((r
->access
& max_el
) == 0) {
10067 /* Combine cpreg and name into one allocation. */
10068 name_len
= strlen(name
) + 1;
10069 r2
= g_malloc(sizeof(*r2
) + name_len
);
10071 r2
->name
= memcpy(r2
+ 1, name
, name_len
);
10074 * Update fields to match the instantiation, overwiting wildcards
10075 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
10082 r2
->secure
= secstate
;
10084 r2
->opaque
= opaque
;
10088 /* This should not have been a very special register to begin. */
10089 int old_special
= r2
->type
& ARM_CP_SPECIAL_MASK
;
10090 assert(old_special
== 0 || old_special
== ARM_CP_NOP
);
10092 * Set the special function to CONST, retaining the other flags.
10093 * This is important for e.g. ARM_CP_SVE so that we still
10094 * take the SVE trap if CPTR_EL3.EZ == 0.
10096 r2
->type
= (r2
->type
& ~ARM_CP_SPECIAL_MASK
) | ARM_CP_CONST
;
10098 * Usually, these registers become RES0, but there are a few
10099 * special cases like VPIDR_EL2 which have a constant non-zero
10100 * value with writes ignored.
10102 if (!(r
->type
& ARM_CP_EL3_NO_EL2_C_NZ
)) {
10103 r2
->resetvalue
= 0;
10106 * ARM_CP_CONST has precedence, so removing the callbacks and
10107 * offsets are not strictly necessary, but it is potentially
10108 * less confusing to debug later.
10111 r2
->writefn
= NULL
;
10112 r2
->raw_readfn
= NULL
;
10113 r2
->raw_writefn
= NULL
;
10114 r2
->resetfn
= NULL
;
10115 r2
->fieldoffset
= 0;
10116 r2
->bank_fieldoffsets
[0] = 0;
10117 r2
->bank_fieldoffsets
[1] = 0;
10119 bool isbanked
= r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1];
10123 * Register is banked (using both entries in array).
10124 * Overwriting fieldoffset as the array is only used to define
10125 * banked registers but later only fieldoffset is used.
10127 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
10129 if (state
== ARM_CP_STATE_AA32
) {
10132 * If the register is banked then we don't need to migrate or
10133 * reset the 32-bit instance in certain cases:
10135 * 1) If the register has both 32-bit and 64-bit instances
10136 * then we can count on the 64-bit instance taking care
10137 * of the non-secure bank.
10138 * 2) If ARMv8 is enabled then we can count on a 64-bit
10139 * version taking care of the secure bank. This requires
10140 * that separate 32 and 64-bit definitions are provided.
10142 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
10143 (arm_feature(env
, ARM_FEATURE_V8
) && !ns
)) {
10144 r2
->type
|= ARM_CP_ALIAS
;
10146 } else if ((secstate
!= r
->secure
) && !ns
) {
10148 * The register is not banked so we only want to allow
10149 * migration of the non-secure instance.
10151 r2
->type
|= ARM_CP_ALIAS
;
10154 if (HOST_BIG_ENDIAN
&&
10155 r
->state
== ARM_CP_STATE_BOTH
&& r2
->fieldoffset
) {
10156 r2
->fieldoffset
+= sizeof(uint32_t);
10162 * By convention, for wildcarded registers only the first
10163 * entry is used for migration; the others are marked as
10164 * ALIAS so we don't try to transfer the register
10165 * multiple times. Special registers (ie NOP/WFI) are
10166 * never migratable and not even raw-accessible.
10168 if (r2
->type
& ARM_CP_SPECIAL_MASK
) {
10169 r2
->type
|= ARM_CP_NO_RAW
;
10171 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
10172 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
10173 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
10174 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
10178 * Check that raw accesses are either forbidden or handled. Note that
10179 * we can't assert this earlier because the setup of fieldoffset for
10180 * banked registers has to be done first.
10182 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
10183 assert(!raw_accessors_invalid(r2
));
10186 g_hash_table_insert(cpu
->cp_regs
, (gpointer
)(uintptr_t)key
, r2
);
10190 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
10191 const ARMCPRegInfo
*r
, void *opaque
)
10194 * Define implementations of coprocessor registers.
10195 * We store these in a hashtable because typically
10196 * there are less than 150 registers in a space which
10197 * is 16*16*16*8*8 = 262144 in size.
10198 * Wildcarding is supported for the crm, opc1 and opc2 fields.
10199 * If a register is defined twice then the second definition is
10200 * used, so this can be used to define some generic registers and
10201 * then override them with implementation specific variations.
10202 * At least one of the original and the second definition should
10203 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
10204 * against accidental use.
10206 * The state field defines whether the register is to be
10207 * visible in the AArch32 or AArch64 execution state. If the
10208 * state is set to ARM_CP_STATE_BOTH then we synthesise a
10209 * reginfo structure for the AArch32 view, which sees the lower
10210 * 32 bits of the 64 bit register.
10212 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
10213 * be wildcarded. AArch64 registers are always considered to be 64
10214 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
10215 * the register, if any.
10217 int crm
, opc1
, opc2
;
10218 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
10219 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
10220 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
10221 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
10222 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
10223 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
10226 /* 64 bit registers have only CRm and Opc1 fields */
10227 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
10228 /* op0 only exists in the AArch64 encodings */
10229 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
10230 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
10231 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
10233 * This API is only for Arm's system coprocessors (14 and 15) or
10234 * (M-profile or v7A-and-earlier only) for implementation defined
10235 * coprocessors in the range 0..7. Our decode assumes this, since
10236 * 8..13 can be used for other insns including VFP and Neon. See
10237 * valid_cp() in translate.c. Assert here that we haven't tried
10238 * to use an invalid coprocessor number.
10240 switch (r
->state
) {
10241 case ARM_CP_STATE_BOTH
:
10242 /* 0 has a special meaning, but otherwise the same rules as AA32. */
10247 case ARM_CP_STATE_AA32
:
10248 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) &&
10249 !arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
10250 assert(r
->cp
>= 14 && r
->cp
<= 15);
10252 assert(r
->cp
< 8 || (r
->cp
>= 14 && r
->cp
<= 15));
10255 case ARM_CP_STATE_AA64
:
10256 assert(r
->cp
== 0 || r
->cp
== CP_REG_ARM64_SYSREG_CP
);
10259 g_assert_not_reached();
10262 * The AArch64 pseudocode CheckSystemAccess() specifies that op1
10263 * encodes a minimum access level for the register. We roll this
10264 * runtime check into our general permission check code, so check
10265 * here that the reginfo's specified permissions are strict enough
10266 * to encompass the generic architectural permission check.
10268 if (r
->state
!= ARM_CP_STATE_AA32
) {
10269 CPAccessRights mask
;
10272 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
10273 mask
= PL0U_R
| PL1_RW
;
10293 /* min_EL EL1, secure mode only (we don't check the latter) */
10297 /* broken reginfo with out-of-range opc1 */
10298 g_assert_not_reached();
10300 /* assert our permissions are not too lax (stricter is fine) */
10301 assert((r
->access
& ~mask
) == 0);
10305 * Check that the register definition has enough info to handle
10306 * reads and writes if they are permitted.
10308 if (!(r
->type
& (ARM_CP_SPECIAL_MASK
| ARM_CP_CONST
))) {
10309 if (r
->access
& PL3_R
) {
10310 assert((r
->fieldoffset
||
10311 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
10314 if (r
->access
& PL3_W
) {
10315 assert((r
->fieldoffset
||
10316 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
10321 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
10322 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
10323 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
10324 for (state
= ARM_CP_STATE_AA32
;
10325 state
<= ARM_CP_STATE_AA64
; state
++) {
10326 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
10329 if (state
== ARM_CP_STATE_AA32
) {
10331 * Under AArch32 CP registers can be common
10332 * (same for secure and non-secure world) or banked.
10336 switch (r
->secure
) {
10337 case ARM_CP_SECSTATE_S
:
10338 case ARM_CP_SECSTATE_NS
:
10339 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
10340 r
->secure
, crm
, opc1
, opc2
,
10343 case ARM_CP_SECSTATE_BOTH
:
10344 name
= g_strdup_printf("%s_S", r
->name
);
10345 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
10347 crm
, opc1
, opc2
, name
);
10349 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
10350 ARM_CP_SECSTATE_NS
,
10351 crm
, opc1
, opc2
, r
->name
);
10354 g_assert_not_reached();
10358 * AArch64 registers get mapped to non-secure instance
10361 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
10362 ARM_CP_SECSTATE_NS
,
10363 crm
, opc1
, opc2
, r
->name
);
10371 /* Define a whole list of registers */
10372 void define_arm_cp_regs_with_opaque_len(ARMCPU
*cpu
, const ARMCPRegInfo
*regs
,
10373 void *opaque
, size_t len
)
10376 for (i
= 0; i
< len
; ++i
) {
10377 define_one_arm_cp_reg_with_opaque(cpu
, regs
+ i
, opaque
);
10382 * Modify ARMCPRegInfo for access from userspace.
10384 * This is a data driven modification directed by
10385 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
10386 * user-space cannot alter any values and dynamic values pertaining to
10387 * execution state are hidden from user space view anyway.
10389 void modify_arm_cp_regs_with_len(ARMCPRegInfo
*regs
, size_t regs_len
,
10390 const ARMCPRegUserSpaceInfo
*mods
,
10393 for (size_t mi
= 0; mi
< mods_len
; ++mi
) {
10394 const ARMCPRegUserSpaceInfo
*m
= mods
+ mi
;
10395 GPatternSpec
*pat
= NULL
;
10398 pat
= g_pattern_spec_new(m
->name
);
10400 for (size_t ri
= 0; ri
< regs_len
; ++ri
) {
10401 ARMCPRegInfo
*r
= regs
+ ri
;
10403 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
10404 r
->type
= ARM_CP_CONST
;
10405 r
->access
= PL0U_R
;
10408 } else if (strcmp(r
->name
, m
->name
) == 0) {
10409 r
->type
= ARM_CP_CONST
;
10410 r
->access
= PL0U_R
;
10411 r
->resetvalue
&= m
->exported_bits
;
10412 r
->resetvalue
|= m
->fixed_bits
;
10417 g_pattern_spec_free(pat
);
10422 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
10424 return g_hash_table_lookup(cpregs
, (gpointer
)(uintptr_t)encoded_cp
);
10427 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
10430 /* Helper coprocessor write function for write-ignore registers */
10433 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
10435 /* Helper coprocessor write function for read-as-zero registers */
10439 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
10441 /* Helper coprocessor reset function for do-nothing-on-reset registers */
10444 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
10447 * Return true if it is not valid for us to switch to
10448 * this CPU mode (ie all the UNPREDICTABLE cases in
10449 * the ARM ARM CPSRWriteByInstr pseudocode).
10452 /* Changes to or from Hyp via MSR and CPS are illegal. */
10453 if (write_type
== CPSRWriteByInstr
&&
10454 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
10455 mode
== ARM_CPU_MODE_HYP
)) {
10460 case ARM_CPU_MODE_USR
:
10462 case ARM_CPU_MODE_SYS
:
10463 case ARM_CPU_MODE_SVC
:
10464 case ARM_CPU_MODE_ABT
:
10465 case ARM_CPU_MODE_UND
:
10466 case ARM_CPU_MODE_IRQ
:
10467 case ARM_CPU_MODE_FIQ
:
10469 * Note that we don't implement the IMPDEF NSACR.RFR which in v7
10470 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
10473 * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
10474 * and CPS are treated as illegal mode changes.
10476 if (write_type
== CPSRWriteByInstr
&&
10477 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
10478 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
10482 case ARM_CPU_MODE_HYP
:
10483 return !arm_is_el2_enabled(env
) || arm_current_el(env
) < 2;
10484 case ARM_CPU_MODE_MON
:
10485 return arm_current_el(env
) < 3;
10491 uint32_t cpsr_read(CPUARMState
*env
)
10494 ZF
= (env
->ZF
== 0);
10495 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
10496 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
10497 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
10498 | ((env
->condexec_bits
& 0xfc) << 8)
10499 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
10502 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
10503 CPSRWriteType write_type
)
10505 uint32_t changed_daif
;
10506 bool rebuild_hflags
= (write_type
!= CPSRWriteRaw
) &&
10507 (mask
& (CPSR_M
| CPSR_E
| CPSR_IL
));
10509 if (mask
& CPSR_NZCV
) {
10510 env
->ZF
= (~val
) & CPSR_Z
;
10512 env
->CF
= (val
>> 29) & 1;
10513 env
->VF
= (val
<< 3) & 0x80000000;
10515 if (mask
& CPSR_Q
) {
10516 env
->QF
= ((val
& CPSR_Q
) != 0);
10518 if (mask
& CPSR_T
) {
10519 env
->thumb
= ((val
& CPSR_T
) != 0);
10521 if (mask
& CPSR_IT_0_1
) {
10522 env
->condexec_bits
&= ~3;
10523 env
->condexec_bits
|= (val
>> 25) & 3;
10525 if (mask
& CPSR_IT_2_7
) {
10526 env
->condexec_bits
&= 3;
10527 env
->condexec_bits
|= (val
>> 8) & 0xfc;
10529 if (mask
& CPSR_GE
) {
10530 env
->GE
= (val
>> 16) & 0xf;
10534 * In a V7 implementation that includes the security extensions but does
10535 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
10536 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
10537 * bits respectively.
10539 * In a V8 implementation, it is permitted for privileged software to
10540 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
10542 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
10543 arm_feature(env
, ARM_FEATURE_EL3
) &&
10544 !arm_feature(env
, ARM_FEATURE_EL2
) &&
10545 !arm_is_secure(env
)) {
10547 changed_daif
= (env
->daif
^ val
) & mask
;
10549 if (changed_daif
& CPSR_A
) {
10551 * Check to see if we are allowed to change the masking of async
10552 * abort exceptions from a non-secure state.
10554 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
10555 qemu_log_mask(LOG_GUEST_ERROR
,
10556 "Ignoring attempt to switch CPSR_A flag from "
10557 "non-secure world with SCR.AW bit clear\n");
10562 if (changed_daif
& CPSR_F
) {
10564 * Check to see if we are allowed to change the masking of FIQ
10565 * exceptions from a non-secure state.
10567 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
10568 qemu_log_mask(LOG_GUEST_ERROR
,
10569 "Ignoring attempt to switch CPSR_F flag from "
10570 "non-secure world with SCR.FW bit clear\n");
10575 * Check whether non-maskable FIQ (NMFI) support is enabled.
10576 * If this bit is set software is not allowed to mask
10577 * FIQs, but is allowed to set CPSR_F to 0.
10579 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
10581 qemu_log_mask(LOG_GUEST_ERROR
,
10582 "Ignoring attempt to enable CPSR_F flag "
10583 "(non-maskable FIQ [NMFI] support enabled)\n");
10589 env
->daif
&= ~(CPSR_AIF
& mask
);
10590 env
->daif
|= val
& CPSR_AIF
& mask
;
10592 if (write_type
!= CPSRWriteRaw
&&
10593 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
10594 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
10596 * Note that we can only get here in USR mode if this is a
10597 * gdb stub write; for this case we follow the architectural
10598 * behaviour for guest writes in USR mode of ignoring an attempt
10599 * to switch mode. (Those are caught by translate.c for writes
10600 * triggered by guest instructions.)
10603 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
10605 * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
10606 * v7, and has defined behaviour in v8:
10607 * + leave CPSR.M untouched
10608 * + allow changes to the other CPSR fields
10610 * For user changes via the GDB stub, we don't set PSTATE.IL,
10611 * as this would be unnecessarily harsh for a user error.
10614 if (write_type
!= CPSRWriteByGDBStub
&&
10615 arm_feature(env
, ARM_FEATURE_V8
)) {
10619 qemu_log_mask(LOG_GUEST_ERROR
,
10620 "Illegal AArch32 mode switch attempt from %s to %s\n",
10621 aarch32_mode_name(env
->uncached_cpsr
),
10622 aarch32_mode_name(val
));
10624 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
10625 write_type
== CPSRWriteExceptionReturn
?
10626 "Exception return from AArch32" :
10627 "AArch32 mode switch from",
10628 aarch32_mode_name(env
->uncached_cpsr
),
10629 aarch32_mode_name(val
), env
->regs
[15]);
10630 switch_mode(env
, val
& CPSR_M
);
10633 mask
&= ~CACHED_CPSR_BITS
;
10634 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
10635 if (tcg_enabled() && rebuild_hflags
) {
10636 arm_rebuild_hflags(env
);
10640 #ifdef CONFIG_USER_ONLY
10642 static void switch_mode(CPUARMState
*env
, int mode
)
10644 ARMCPU
*cpu
= env_archcpu(env
);
10646 if (mode
!= ARM_CPU_MODE_USR
) {
10647 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
10651 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
10652 uint32_t cur_el
, bool secure
)
10657 void aarch64_sync_64_to_32(CPUARMState
*env
)
10659 g_assert_not_reached();
10664 static void switch_mode(CPUARMState
*env
, int mode
)
10669 old_mode
= env
->uncached_cpsr
& CPSR_M
;
10670 if (mode
== old_mode
) {
10674 if (old_mode
== ARM_CPU_MODE_FIQ
) {
10675 memcpy(env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
10676 memcpy(env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
10677 } else if (mode
== ARM_CPU_MODE_FIQ
) {
10678 memcpy(env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
10679 memcpy(env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
10682 i
= bank_number(old_mode
);
10683 env
->banked_r13
[i
] = env
->regs
[13];
10684 env
->banked_spsr
[i
] = env
->spsr
;
10686 i
= bank_number(mode
);
10687 env
->regs
[13] = env
->banked_r13
[i
];
10688 env
->spsr
= env
->banked_spsr
[i
];
10690 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
10691 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
10695 * Physical Interrupt Target EL Lookup Table
10697 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
10699 * The below multi-dimensional table is used for looking up the target
10700 * exception level given numerous condition criteria. Specifically, the
10701 * target EL is based on SCR and HCR routing controls as well as the
10702 * currently executing EL and secure state.
10705 * target_el_table[2][2][2][2][2][4]
10706 * | | | | | +--- Current EL
10707 * | | | | +------ Non-secure(0)/Secure(1)
10708 * | | | +--------- HCR mask override
10709 * | | +------------ SCR exec state control
10710 * | +--------------- SCR mask override
10711 * +------------------ 32-bit(0)/64-bit(1) EL3
10713 * The table values are as such:
10715 * -1 = Cannot occur
10717 * The ARM ARM target EL table includes entries indicating that an "exception
10718 * is not taken". The two cases where this is applicable are:
10719 * 1) An exception is taken from EL3 but the SCR does not have the exception
10721 * 2) An exception is taken from EL2 but the HCR does not have the exception
10723 * In these two cases, the below table contain a target of EL1. This value is
10724 * returned as it is expected that the consumer of the table data will check
10725 * for "target EL >= current EL" to ensure the exception is not taken.
10729 * BIT IRQ IMO Non-secure Secure
10730 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
10732 static const int8_t target_el_table
[2][2][2][2][2][4] = {
10733 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
10734 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
10735 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
10736 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
10737 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
10738 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
10739 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
10740 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
10741 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
10742 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
10743 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
10744 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
10745 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
10746 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
10747 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
10748 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
10752 * Determine the target EL for physical exceptions
10754 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
10755 uint32_t cur_el
, bool secure
)
10757 CPUARMState
*env
= cpu_env(cs
);
10762 /* Is the highest EL AArch64? */
10763 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
10766 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
10767 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
10770 * Either EL2 is the highest EL (and so the EL2 register width
10771 * is given by is64); or there is no EL2 or EL3, in which case
10772 * the value of 'rw' does not affect the table lookup anyway.
10777 hcr_el2
= arm_hcr_el2_eff(env
);
10778 switch (excp_idx
) {
10781 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
10782 hcr
= hcr_el2
& HCR_IMO
;
10785 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
10786 hcr
= hcr_el2
& HCR_FMO
;
10789 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
10790 hcr
= hcr_el2
& HCR_AMO
;
10795 * For these purposes, TGE and AMO/IMO/FMO both force the
10796 * interrupt to EL2. Fold TGE into the bit extracted above.
10798 hcr
|= (hcr_el2
& HCR_TGE
) != 0;
10800 /* Perform a table-lookup for the target EL given the current state */
10801 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
10803 assert(target_el
> 0);
10808 void arm_log_exception(CPUState
*cs
)
10810 int idx
= cs
->exception_index
;
10812 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
10813 const char *exc
= NULL
;
10814 static const char * const excnames
[] = {
10815 [EXCP_UDEF
] = "Undefined Instruction",
10816 [EXCP_SWI
] = "SVC",
10817 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
10818 [EXCP_DATA_ABORT
] = "Data Abort",
10819 [EXCP_IRQ
] = "IRQ",
10820 [EXCP_FIQ
] = "FIQ",
10821 [EXCP_BKPT
] = "Breakpoint",
10822 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
10823 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
10824 [EXCP_HVC
] = "Hypervisor Call",
10825 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
10826 [EXCP_SMC
] = "Secure Monitor Call",
10827 [EXCP_VIRQ
] = "Virtual IRQ",
10828 [EXCP_VFIQ
] = "Virtual FIQ",
10829 [EXCP_SEMIHOST
] = "Semihosting call",
10830 [EXCP_NOCP
] = "v7M NOCP UsageFault",
10831 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
10832 [EXCP_STKOF
] = "v8M STKOF UsageFault",
10833 [EXCP_LAZYFP
] = "v7M exception during lazy FP stacking",
10834 [EXCP_LSERR
] = "v8M LSERR UsageFault",
10835 [EXCP_UNALIGNED
] = "v7M UNALIGNED UsageFault",
10836 [EXCP_DIVBYZERO
] = "v7M DIVBYZERO UsageFault",
10837 [EXCP_VSERR
] = "Virtual SERR",
10838 [EXCP_GPC
] = "Granule Protection Check",
10839 [EXCP_NMI
] = "NMI",
10840 [EXCP_VINMI
] = "Virtual IRQ NMI",
10841 [EXCP_VFNMI
] = "Virtual FIQ NMI",
10844 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
10845 exc
= excnames
[idx
];
10850 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s] on CPU %d\n",
10851 idx
, exc
, cs
->cpu_index
);
10856 * Function used to synchronize QEMU's AArch64 register set with AArch32
10857 * register set. This is necessary when switching between AArch32 and AArch64
10860 void aarch64_sync_32_to_64(CPUARMState
*env
)
10863 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
10865 /* We can blanket copy R[0:7] to X[0:7] */
10866 for (i
= 0; i
< 8; i
++) {
10867 env
->xregs
[i
] = env
->regs
[i
];
10871 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
10872 * Otherwise, they come from the banked user regs.
10874 if (mode
== ARM_CPU_MODE_FIQ
) {
10875 for (i
= 8; i
< 13; i
++) {
10876 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
10879 for (i
= 8; i
< 13; i
++) {
10880 env
->xregs
[i
] = env
->regs
[i
];
10885 * Registers x13-x23 are the various mode SP and FP registers. Registers
10886 * r13 and r14 are only copied if we are in that mode, otherwise we copy
10887 * from the mode banked register.
10889 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
10890 env
->xregs
[13] = env
->regs
[13];
10891 env
->xregs
[14] = env
->regs
[14];
10893 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
10894 /* HYP is an exception in that it is copied from r14 */
10895 if (mode
== ARM_CPU_MODE_HYP
) {
10896 env
->xregs
[14] = env
->regs
[14];
10898 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
10902 if (mode
== ARM_CPU_MODE_HYP
) {
10903 env
->xregs
[15] = env
->regs
[13];
10905 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
10908 if (mode
== ARM_CPU_MODE_IRQ
) {
10909 env
->xregs
[16] = env
->regs
[14];
10910 env
->xregs
[17] = env
->regs
[13];
10912 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
10913 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
10916 if (mode
== ARM_CPU_MODE_SVC
) {
10917 env
->xregs
[18] = env
->regs
[14];
10918 env
->xregs
[19] = env
->regs
[13];
10920 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
10921 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
10924 if (mode
== ARM_CPU_MODE_ABT
) {
10925 env
->xregs
[20] = env
->regs
[14];
10926 env
->xregs
[21] = env
->regs
[13];
10928 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
10929 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
10932 if (mode
== ARM_CPU_MODE_UND
) {
10933 env
->xregs
[22] = env
->regs
[14];
10934 env
->xregs
[23] = env
->regs
[13];
10936 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
10937 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
10941 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
10942 * mode, then we can copy from r8-r14. Otherwise, we copy from the
10943 * FIQ bank for r8-r14.
10945 if (mode
== ARM_CPU_MODE_FIQ
) {
10946 for (i
= 24; i
< 31; i
++) {
10947 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
10950 for (i
= 24; i
< 29; i
++) {
10951 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
10953 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
10954 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
10957 env
->pc
= env
->regs
[15];
10961 * Function used to synchronize QEMU's AArch32 register set with AArch64
10962 * register set. This is necessary when switching between AArch32 and AArch64
10965 void aarch64_sync_64_to_32(CPUARMState
*env
)
10968 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
10970 /* We can blanket copy X[0:7] to R[0:7] */
10971 for (i
= 0; i
< 8; i
++) {
10972 env
->regs
[i
] = env
->xregs
[i
];
10976 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
10977 * Otherwise, we copy x8-x12 into the banked user regs.
10979 if (mode
== ARM_CPU_MODE_FIQ
) {
10980 for (i
= 8; i
< 13; i
++) {
10981 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
10984 for (i
= 8; i
< 13; i
++) {
10985 env
->regs
[i
] = env
->xregs
[i
];
10990 * Registers r13 & r14 depend on the current mode.
10991 * If we are in a given mode, we copy the corresponding x registers to r13
10992 * and r14. Otherwise, we copy the x register to the banked r13 and r14
10995 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
10996 env
->regs
[13] = env
->xregs
[13];
10997 env
->regs
[14] = env
->xregs
[14];
10999 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
11002 * HYP is an exception in that it does not have its own banked r14 but
11003 * shares the USR r14
11005 if (mode
== ARM_CPU_MODE_HYP
) {
11006 env
->regs
[14] = env
->xregs
[14];
11008 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
11012 if (mode
== ARM_CPU_MODE_HYP
) {
11013 env
->regs
[13] = env
->xregs
[15];
11015 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
11018 if (mode
== ARM_CPU_MODE_IRQ
) {
11019 env
->regs
[14] = env
->xregs
[16];
11020 env
->regs
[13] = env
->xregs
[17];
11022 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
11023 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
11026 if (mode
== ARM_CPU_MODE_SVC
) {
11027 env
->regs
[14] = env
->xregs
[18];
11028 env
->regs
[13] = env
->xregs
[19];
11030 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
11031 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
11034 if (mode
== ARM_CPU_MODE_ABT
) {
11035 env
->regs
[14] = env
->xregs
[20];
11036 env
->regs
[13] = env
->xregs
[21];
11038 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
11039 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
11042 if (mode
== ARM_CPU_MODE_UND
) {
11043 env
->regs
[14] = env
->xregs
[22];
11044 env
->regs
[13] = env
->xregs
[23];
11046 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
11047 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
11051 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
11052 * mode, then we can copy to r8-r14. Otherwise, we copy to the
11053 * FIQ bank for r8-r14.
11055 if (mode
== ARM_CPU_MODE_FIQ
) {
11056 for (i
= 24; i
< 31; i
++) {
11057 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
11060 for (i
= 24; i
< 29; i
++) {
11061 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
11063 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
11064 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
11067 env
->regs
[15] = env
->pc
;
11070 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
11071 uint32_t mask
, uint32_t offset
,
11076 /* Change the CPU state so as to actually take the exception. */
11077 switch_mode(env
, new_mode
);
11080 * For exceptions taken to AArch32 we must clear the SS bit in both
11081 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
11083 env
->pstate
&= ~PSTATE_SS
;
11084 env
->spsr
= cpsr_read(env
);
11085 /* Clear IT bits. */
11086 env
->condexec_bits
= 0;
11087 /* Switch to the new mode, and to the correct instruction set. */
11088 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
11090 /* This must be after mode switching. */
11091 new_el
= arm_current_el(env
);
11093 /* Set new mode endianness */
11094 env
->uncached_cpsr
&= ~CPSR_E
;
11095 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_EE
) {
11096 env
->uncached_cpsr
|= CPSR_E
;
11098 /* J and IL must always be cleared for exception entry */
11099 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
11102 if (cpu_isar_feature(aa32_ssbs
, env_archcpu(env
))) {
11103 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_32
) {
11104 env
->uncached_cpsr
|= CPSR_SSBS
;
11106 env
->uncached_cpsr
&= ~CPSR_SSBS
;
11110 if (new_mode
== ARM_CPU_MODE_HYP
) {
11111 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
11112 env
->elr_el
[2] = env
->regs
[15];
11114 /* CPSR.PAN is normally preserved preserved unless... */
11115 if (cpu_isar_feature(aa32_pan
, env_archcpu(env
))) {
11118 if (!arm_is_secure_below_el3(env
)) {
11119 /* ... the target is EL3, from non-secure state. */
11120 env
->uncached_cpsr
&= ~CPSR_PAN
;
11123 /* ... the target is EL3, from secure state ... */
11126 /* ... the target is EL1 and SCTLR.SPAN is 0. */
11127 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
)) {
11128 env
->uncached_cpsr
|= CPSR_PAN
;
11134 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
11135 * and we should just guard the thumb mode on V4
11137 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
11139 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
11141 env
->regs
[14] = env
->regs
[15] + offset
;
11143 env
->regs
[15] = newpc
;
11145 if (tcg_enabled()) {
11146 arm_rebuild_hflags(env
);
11150 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
11153 * Handle exception entry to Hyp mode; this is sufficiently
11154 * different to entry to other AArch32 modes that we handle it
11157 * The vector table entry used is always the 0x14 Hyp mode entry point,
11158 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
11159 * The offset applied to the preferred return address is always zero
11160 * (see DDI0487C.a section G1.12.3).
11161 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
11163 uint32_t addr
, mask
;
11164 ARMCPU
*cpu
= ARM_CPU(cs
);
11165 CPUARMState
*env
= &cpu
->env
;
11167 switch (cs
->exception_index
) {
11175 /* Fall through to prefetch abort. */
11176 case EXCP_PREFETCH_ABORT
:
11177 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
11178 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
11179 (uint32_t)env
->exception
.vaddress
);
11182 case EXCP_DATA_ABORT
:
11183 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
11184 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
11185 (uint32_t)env
->exception
.vaddress
);
11197 case EXCP_HYP_TRAP
:
11201 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
11204 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
11205 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11207 * QEMU syndrome values are v8-style. v7 has the IL bit
11208 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
11209 * If this is a v7 CPU, squash the IL bit in those cases.
11211 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
11212 (cs
->exception_index
== EXCP_DATA_ABORT
&&
11213 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
11214 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
11215 env
->exception
.syndrome
&= ~ARM_EL_IL
;
11218 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
11221 if (arm_current_el(env
) != 2 && addr
< 0x14) {
11226 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
11229 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
11232 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
11236 addr
+= env
->cp15
.hvbar
;
11238 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
11241 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
11243 ARMCPU
*cpu
= ARM_CPU(cs
);
11244 CPUARMState
*env
= &cpu
->env
;
11251 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
11252 switch (syn_get_ec(env
->exception
.syndrome
)) {
11253 case EC_BREAKPOINT
:
11254 case EC_BREAKPOINT_SAME_EL
:
11257 case EC_WATCHPOINT
:
11258 case EC_WATCHPOINT_SAME_EL
:
11264 case EC_VECTORCATCH
:
11273 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
11276 if (env
->exception
.target_el
== 2) {
11277 /* Debug exceptions are reported differently on AArch32 */
11278 switch (syn_get_ec(env
->exception
.syndrome
)) {
11279 case EC_BREAKPOINT
:
11280 case EC_BREAKPOINT_SAME_EL
:
11282 case EC_VECTORCATCH
:
11283 env
->exception
.syndrome
= syn_insn_abort(arm_current_el(env
) == 2,
11286 case EC_WATCHPOINT
:
11287 env
->exception
.syndrome
= syn_set_ec(env
->exception
.syndrome
,
11290 case EC_WATCHPOINT_SAME_EL
:
11291 env
->exception
.syndrome
= syn_set_ec(env
->exception
.syndrome
,
11292 EC_DATAABORT_SAME_EL
);
11295 arm_cpu_do_interrupt_aarch32_hyp(cs
);
11299 switch (cs
->exception_index
) {
11301 new_mode
= ARM_CPU_MODE_UND
;
11311 new_mode
= ARM_CPU_MODE_SVC
;
11314 /* The PC already points to the next instruction. */
11318 /* Fall through to prefetch abort. */
11319 case EXCP_PREFETCH_ABORT
:
11320 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
11321 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
11322 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
11323 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
11324 new_mode
= ARM_CPU_MODE_ABT
;
11326 mask
= CPSR_A
| CPSR_I
;
11329 case EXCP_DATA_ABORT
:
11330 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
11331 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
11332 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
11333 env
->exception
.fsr
,
11334 (uint32_t)env
->exception
.vaddress
);
11335 new_mode
= ARM_CPU_MODE_ABT
;
11337 mask
= CPSR_A
| CPSR_I
;
11341 new_mode
= ARM_CPU_MODE_IRQ
;
11343 /* Disable IRQ and imprecise data aborts. */
11344 mask
= CPSR_A
| CPSR_I
;
11346 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
11347 /* IRQ routed to monitor mode */
11348 new_mode
= ARM_CPU_MODE_MON
;
11353 new_mode
= ARM_CPU_MODE_FIQ
;
11355 /* Disable FIQ, IRQ and imprecise data aborts. */
11356 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
11357 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
11358 /* FIQ routed to monitor mode */
11359 new_mode
= ARM_CPU_MODE_MON
;
11364 new_mode
= ARM_CPU_MODE_IRQ
;
11366 /* Disable IRQ and imprecise data aborts. */
11367 mask
= CPSR_A
| CPSR_I
;
11371 new_mode
= ARM_CPU_MODE_FIQ
;
11373 /* Disable FIQ, IRQ and imprecise data aborts. */
11374 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
11380 * Note that this is reported as a data abort, but the DFAR
11381 * has an UNKNOWN value. Construct the SError syndrome from
11382 * AET and ExT fields.
11384 ARMMMUFaultInfo fi
= { .type
= ARMFault_AsyncExternal
, };
11386 if (extended_addresses_enabled(env
)) {
11387 env
->exception
.fsr
= arm_fi_to_lfsc(&fi
);
11389 env
->exception
.fsr
= arm_fi_to_sfsc(&fi
);
11391 env
->exception
.fsr
|= env
->cp15
.vsesr_el2
& 0xd000;
11392 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
11393 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x\n",
11394 env
->exception
.fsr
);
11396 new_mode
= ARM_CPU_MODE_ABT
;
11398 mask
= CPSR_A
| CPSR_I
;
11403 new_mode
= ARM_CPU_MODE_MON
;
11405 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
11409 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
11410 return; /* Never happens. Keep compiler happy. */
11413 if (new_mode
== ARM_CPU_MODE_MON
) {
11414 addr
+= env
->cp15
.mvbar
;
11415 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
11416 /* High vectors. When enabled, base address cannot be remapped. */
11417 addr
+= 0xffff0000;
11420 * ARM v7 architectures provide a vector base address register to remap
11421 * the interrupt vector table.
11422 * This register is only followed in non-monitor mode, and is banked.
11423 * Note: only bits 31:5 are valid.
11425 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
11428 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
11429 env
->cp15
.scr_el3
&= ~SCR_NS
;
11432 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
11435 static int aarch64_regnum(CPUARMState
*env
, int aarch32_reg
)
11438 * Return the register number of the AArch64 view of the AArch32
11439 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
11440 * be that of the AArch32 mode the exception came from.
11442 int mode
= env
->uncached_cpsr
& CPSR_M
;
11444 switch (aarch32_reg
) {
11446 return aarch32_reg
;
11448 return mode
== ARM_CPU_MODE_FIQ
? aarch32_reg
+ 16 : aarch32_reg
;
11451 case ARM_CPU_MODE_USR
:
11452 case ARM_CPU_MODE_SYS
:
11454 case ARM_CPU_MODE_HYP
:
11456 case ARM_CPU_MODE_IRQ
:
11458 case ARM_CPU_MODE_SVC
:
11460 case ARM_CPU_MODE_ABT
:
11462 case ARM_CPU_MODE_UND
:
11464 case ARM_CPU_MODE_FIQ
:
11467 g_assert_not_reached();
11471 case ARM_CPU_MODE_USR
:
11472 case ARM_CPU_MODE_SYS
:
11473 case ARM_CPU_MODE_HYP
:
11475 case ARM_CPU_MODE_IRQ
:
11477 case ARM_CPU_MODE_SVC
:
11479 case ARM_CPU_MODE_ABT
:
11481 case ARM_CPU_MODE_UND
:
11483 case ARM_CPU_MODE_FIQ
:
11486 g_assert_not_reached();
11491 g_assert_not_reached();
11495 static uint32_t cpsr_read_for_spsr_elx(CPUARMState
*env
)
11497 uint32_t ret
= cpsr_read(env
);
11499 /* Move DIT to the correct location for SPSR_ELx */
11500 if (ret
& CPSR_DIT
) {
11504 /* Merge PSTATE.SS into SPSR_ELx */
11505 ret
|= env
->pstate
& PSTATE_SS
;
11510 static bool syndrome_is_sync_extabt(uint32_t syndrome
)
11512 /* Return true if this syndrome value is a synchronous external abort */
11513 switch (syn_get_ec(syndrome
)) {
11515 case EC_INSNABORT_SAME_EL
:
11517 case EC_DATAABORT_SAME_EL
:
11518 /* Look at fault status code for all the synchronous ext abort cases */
11519 switch (syndrome
& 0x3f) {
11535 /* Handle exception entry to a target EL which is using AArch64 */
11536 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
11538 ARMCPU
*cpu
= ARM_CPU(cs
);
11539 CPUARMState
*env
= &cpu
->env
;
11540 unsigned int new_el
= env
->exception
.target_el
;
11541 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
11542 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
11543 unsigned int old_mode
;
11544 unsigned int cur_el
= arm_current_el(env
);
11547 if (tcg_enabled()) {
11549 * Note that new_el can never be 0. If cur_el is 0, then
11550 * el0_a64 is is_a64(), else el0_a64 is ignored.
11552 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
11555 if (cur_el
< new_el
) {
11557 * Entry vector offset depends on whether the implemented EL
11558 * immediately lower than the target level is using AArch32 or AArch64
11565 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
11568 hcr
= arm_hcr_el2_eff(env
);
11569 if ((hcr
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
11570 is_aa64
= (hcr
& HCR_RW
) != 0;
11575 is_aa64
= is_a64(env
);
11578 g_assert_not_reached();
11586 } else if (pstate_read(env
) & PSTATE_SP
) {
11590 switch (cs
->exception_index
) {
11592 qemu_log_mask(CPU_LOG_INT
, "...with MFAR 0x%" PRIx64
"\n",
11593 env
->cp15
.mfar_el3
);
11595 case EXCP_PREFETCH_ABORT
:
11596 case EXCP_DATA_ABORT
:
11598 * FEAT_DoubleFault allows synchronous external aborts taken to EL3
11599 * to be taken to the SError vector entrypoint.
11601 if (new_el
== 3 && (env
->cp15
.scr_el3
& SCR_EASE
) &&
11602 syndrome_is_sync_extabt(env
->exception
.syndrome
)) {
11605 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
11606 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
11607 env
->cp15
.far_el
[new_el
]);
11613 case EXCP_HYP_TRAP
:
11615 switch (syn_get_ec(env
->exception
.syndrome
)) {
11616 case EC_ADVSIMDFPACCESSTRAP
:
11618 * QEMU internal FP/SIMD syndromes from AArch32 include the
11619 * TA and coproc fields which are only exposed if the exception
11620 * is taken to AArch32 Hyp mode. Mask them out to get a valid
11621 * AArch64 format syndrome.
11623 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
11625 case EC_CP14RTTRAP
:
11626 case EC_CP15RTTRAP
:
11627 case EC_CP14DTTRAP
:
11629 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
11630 * the raw register field from the insn; when taking this to
11631 * AArch64 we must convert it to the AArch64 view of the register
11632 * number. Notice that we read a 4-bit AArch32 register number and
11633 * write back a 5-bit AArch64 one.
11635 rt
= extract32(env
->exception
.syndrome
, 5, 4);
11636 rt
= aarch64_regnum(env
, rt
);
11637 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
11640 case EC_CP15RRTTRAP
:
11641 case EC_CP14RRTTRAP
:
11642 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
11643 rt
= extract32(env
->exception
.syndrome
, 5, 4);
11644 rt
= aarch64_regnum(env
, rt
);
11645 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
11647 rt
= extract32(env
->exception
.syndrome
, 10, 4);
11648 rt
= aarch64_regnum(env
, rt
);
11649 env
->exception
.syndrome
= deposit32(env
->exception
.syndrome
,
11653 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
11668 /* Construct the SError syndrome from IDS and ISS fields. */
11669 env
->exception
.syndrome
= syn_serror(env
->cp15
.vsesr_el2
& 0x1ffffff);
11670 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
11673 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
11677 old_mode
= pstate_read(env
);
11678 aarch64_save_sp(env
, arm_current_el(env
));
11679 env
->elr_el
[new_el
] = env
->pc
;
11681 if (cur_el
== 1 && new_el
== 1) {
11682 uint64_t hcr
= arm_hcr_el2_eff(env
);
11683 if ((hcr
& (HCR_NV
| HCR_NV1
| HCR_NV2
)) == HCR_NV
||
11684 (hcr
& (HCR_NV
| HCR_NV2
)) == (HCR_NV
| HCR_NV2
)) {
11686 * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
11687 * by setting M[3:2] to 0b10.
11688 * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
11689 * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
11691 old_mode
= deposit32(old_mode
, 2, 2, 2);
11695 old_mode
= cpsr_read_for_spsr_elx(env
);
11696 env
->elr_el
[new_el
] = env
->regs
[15];
11698 aarch64_sync_32_to_64(env
);
11700 env
->condexec_bits
= 0;
11702 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = old_mode
;
11704 qemu_log_mask(CPU_LOG_INT
, "...with SPSR 0x%x\n", old_mode
);
11705 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
11706 env
->elr_el
[new_el
]);
11708 if (cpu_isar_feature(aa64_pan
, cpu
)) {
11709 /* The value of PSTATE.PAN is normally preserved, except when ... */
11710 new_mode
|= old_mode
& PSTATE_PAN
;
11713 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
11714 if ((arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
))
11715 != (HCR_E2H
| HCR_TGE
)) {
11720 /* ... the target is EL1 ... */
11721 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
11722 if ((env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPAN
) == 0) {
11723 new_mode
|= PSTATE_PAN
;
11728 if (cpu_isar_feature(aa64_mte
, cpu
)) {
11729 new_mode
|= PSTATE_TCO
;
11732 if (cpu_isar_feature(aa64_ssbs
, cpu
)) {
11733 if (env
->cp15
.sctlr_el
[new_el
] & SCTLR_DSSBS_64
) {
11734 new_mode
|= PSTATE_SSBS
;
11736 new_mode
&= ~PSTATE_SSBS
;
11740 if (cpu_isar_feature(aa64_nmi
, cpu
)) {
11741 if (!(env
->cp15
.sctlr_el
[new_el
] & SCTLR_SPINTMASK
)) {
11742 new_mode
|= PSTATE_ALLINT
;
11744 new_mode
&= ~PSTATE_ALLINT
;
11748 pstate_write(env
, PSTATE_DAIF
| new_mode
);
11749 env
->aarch64
= true;
11750 aarch64_restore_sp(env
, new_el
);
11752 if (tcg_enabled()) {
11753 helper_rebuild_hflags_a64(env
, new_el
);
11758 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
11759 new_el
, env
->pc
, pstate_read(env
));
11763 * Do semihosting call and set the appropriate return value. All the
11764 * permission and validity checks have been done at translate time.
11766 * We only see semihosting exceptions in TCG only as they are not
11767 * trapped to the hypervisor in KVM.
11770 static void tcg_handle_semihosting(CPUState
*cs
)
11772 ARMCPU
*cpu
= ARM_CPU(cs
);
11773 CPUARMState
*env
= &cpu
->env
;
11776 qemu_log_mask(CPU_LOG_INT
,
11777 "...handling as semihosting call 0x%" PRIx64
"\n",
11779 do_common_semihosting(cs
);
11782 qemu_log_mask(CPU_LOG_INT
,
11783 "...handling as semihosting call 0x%x\n",
11785 do_common_semihosting(cs
);
11786 env
->regs
[15] += env
->thumb
? 2 : 4;
11792 * Handle a CPU exception for A and R profile CPUs.
11793 * Do any appropriate logging, handle PSCI calls, and then hand off
11794 * to the AArch64-entry or AArch32-entry function depending on the
11795 * target exception level's register width.
11797 * Note: this is used for both TCG (as the do_interrupt tcg op),
11798 * and KVM to re-inject guest debug exceptions, and to
11799 * inject a Synchronous-External-Abort.
11801 void arm_cpu_do_interrupt(CPUState
*cs
)
11803 ARMCPU
*cpu
= ARM_CPU(cs
);
11804 CPUARMState
*env
= &cpu
->env
;
11805 unsigned int new_el
= env
->exception
.target_el
;
11807 assert(!arm_feature(env
, ARM_FEATURE_M
));
11809 arm_log_exception(cs
);
11810 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
11812 if (qemu_loglevel_mask(CPU_LOG_INT
)
11813 && !excp_is_internal(cs
->exception_index
)) {
11814 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
11815 syn_get_ec(env
->exception
.syndrome
),
11816 env
->exception
.syndrome
);
11819 if (tcg_enabled() && arm_is_psci_call(cpu
, cs
->exception_index
)) {
11820 arm_handle_psci_call(cpu
);
11821 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
11826 * Semihosting semantics depend on the register width of the code
11827 * that caused the exception, not the target exception level, so
11828 * must be handled here.
11831 if (cs
->exception_index
== EXCP_SEMIHOST
) {
11832 tcg_handle_semihosting(cs
);
11838 * Hooks may change global state so BQL should be held, also the
11839 * BQL needs to be held for any modification of
11840 * cs->interrupt_request.
11842 g_assert(bql_locked());
11844 arm_call_pre_el_change_hook(cpu
);
11846 assert(!excp_is_internal(cs
->exception_index
));
11847 if (arm_el_is_aa64(env
, new_el
)) {
11848 arm_cpu_do_interrupt_aarch64(cs
);
11850 arm_cpu_do_interrupt_aarch32(cs
);
11853 arm_call_el_change_hook(cpu
);
11855 if (!kvm_enabled()) {
11856 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
11859 #endif /* !CONFIG_USER_ONLY */
11861 uint64_t arm_sctlr(CPUARMState
*env
, int el
)
11863 if (arm_aa32_secure_pl1_0(env
)) {
11864 /* In Secure PL1&0 SCTLR_S is always controlling */
11866 } else if (el
== 0) {
11867 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
11868 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, 0);
11869 el
= mmu_idx
== ARMMMUIdx_E20_0
? 2 : 1;
11871 return env
->cp15
.sctlr_el
[el
];
11874 int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11876 if (regime_has_2_ranges(mmu_idx
)) {
11877 return extract64(tcr
, 37, 2);
11878 } else if (regime_is_stage2(mmu_idx
)) {
11879 return 0; /* VTCR_EL2 */
11881 /* Replicate the single TBI bit so we always have 2 bits. */
11882 return extract32(tcr
, 20, 1) * 3;
11886 int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11888 if (regime_has_2_ranges(mmu_idx
)) {
11889 return extract64(tcr
, 51, 2);
11890 } else if (regime_is_stage2(mmu_idx
)) {
11891 return 0; /* VTCR_EL2 */
11893 /* Replicate the single TBID bit so we always have 2 bits. */
11894 return extract32(tcr
, 29, 1) * 3;
11898 int aa64_va_parameter_tcma(uint64_t tcr
, ARMMMUIdx mmu_idx
)
11900 if (regime_has_2_ranges(mmu_idx
)) {
11901 return extract64(tcr
, 57, 2);
11903 /* Replicate the single TCMA bit so we always have 2 bits. */
11904 return extract32(tcr
, 30, 1) * 3;
11908 static ARMGranuleSize
tg0_to_gran_size(int tg
)
11918 return GranInvalid
;
11922 static ARMGranuleSize
tg1_to_gran_size(int tg
)
11932 return GranInvalid
;
11936 static inline bool have4k(ARMCPU
*cpu
, bool stage2
)
11938 return stage2
? cpu_isar_feature(aa64_tgran4_2
, cpu
)
11939 : cpu_isar_feature(aa64_tgran4
, cpu
);
11942 static inline bool have16k(ARMCPU
*cpu
, bool stage2
)
11944 return stage2
? cpu_isar_feature(aa64_tgran16_2
, cpu
)
11945 : cpu_isar_feature(aa64_tgran16
, cpu
);
11948 static inline bool have64k(ARMCPU
*cpu
, bool stage2
)
11950 return stage2
? cpu_isar_feature(aa64_tgran64_2
, cpu
)
11951 : cpu_isar_feature(aa64_tgran64
, cpu
);
11954 static ARMGranuleSize
sanitize_gran_size(ARMCPU
*cpu
, ARMGranuleSize gran
,
11959 if (have4k(cpu
, stage2
)) {
11964 if (have16k(cpu
, stage2
)) {
11969 if (have64k(cpu
, stage2
)) {
11977 * If the guest selects a granule size that isn't implemented,
11978 * the architecture requires that we behave as if it selected one
11979 * that is (with an IMPDEF choice of which one to pick). We choose
11980 * to implement the smallest supported granule size.
11982 if (have4k(cpu
, stage2
)) {
11985 if (have16k(cpu
, stage2
)) {
11988 assert(have64k(cpu
, stage2
));
11992 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
11993 ARMMMUIdx mmu_idx
, bool data
,
11996 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
11997 bool epd
, hpd
, tsz_oob
, ds
, ha
, hd
;
11998 int select
, tsz
, tbi
, max_tsz
, min_tsz
, ps
, sh
;
11999 ARMGranuleSize gran
;
12000 ARMCPU
*cpu
= env_archcpu(env
);
12001 bool stage2
= regime_is_stage2(mmu_idx
);
12003 if (!regime_has_2_ranges(mmu_idx
)) {
12005 tsz
= extract32(tcr
, 0, 6);
12006 gran
= tg0_to_gran_size(extract32(tcr
, 14, 2));
12011 hpd
= extract32(tcr
, 24, 1);
12014 sh
= extract32(tcr
, 12, 2);
12015 ps
= extract32(tcr
, 16, 3);
12016 ha
= extract32(tcr
, 21, 1) && cpu_isar_feature(aa64_hafs
, cpu
);
12017 hd
= extract32(tcr
, 22, 1) && cpu_isar_feature(aa64_hdbs
, cpu
);
12018 ds
= extract64(tcr
, 32, 1);
12023 * Bit 55 is always between the two regions, and is canonical for
12024 * determining if address tagging is enabled.
12026 select
= extract64(va
, 55, 1);
12028 tsz
= extract32(tcr
, 0, 6);
12029 gran
= tg0_to_gran_size(extract32(tcr
, 14, 2));
12030 epd
= extract32(tcr
, 7, 1);
12031 sh
= extract32(tcr
, 12, 2);
12032 hpd
= extract64(tcr
, 41, 1);
12033 e0pd
= extract64(tcr
, 55, 1);
12035 tsz
= extract32(tcr
, 16, 6);
12036 gran
= tg1_to_gran_size(extract32(tcr
, 30, 2));
12037 epd
= extract32(tcr
, 23, 1);
12038 sh
= extract32(tcr
, 28, 2);
12039 hpd
= extract64(tcr
, 42, 1);
12040 e0pd
= extract64(tcr
, 56, 1);
12042 ps
= extract64(tcr
, 32, 3);
12043 ha
= extract64(tcr
, 39, 1) && cpu_isar_feature(aa64_hafs
, cpu
);
12044 hd
= extract64(tcr
, 40, 1) && cpu_isar_feature(aa64_hdbs
, cpu
);
12045 ds
= extract64(tcr
, 59, 1);
12047 if (e0pd
&& cpu_isar_feature(aa64_e0pd
, cpu
) &&
12048 regime_is_user(env
, mmu_idx
)) {
12053 gran
= sanitize_gran_size(cpu
, gran
, stage2
);
12055 if (cpu_isar_feature(aa64_st
, cpu
)) {
12056 max_tsz
= 48 - (gran
== Gran64K
);
12062 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
12063 * adjust the effective value of DS, as documented.
12066 if (gran
== Gran64K
) {
12067 if (cpu_isar_feature(aa64_lva
, cpu
)) {
12072 if (regime_is_stage2(mmu_idx
)) {
12073 if (gran
== Gran16K
) {
12074 ds
= cpu_isar_feature(aa64_tgran16_2_lpa2
, cpu
);
12076 ds
= cpu_isar_feature(aa64_tgran4_2_lpa2
, cpu
);
12079 if (gran
== Gran16K
) {
12080 ds
= cpu_isar_feature(aa64_tgran16_lpa2
, cpu
);
12082 ds
= cpu_isar_feature(aa64_tgran4_lpa2
, cpu
);
12090 if (stage2
&& el1_is_aa32
) {
12092 * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
12093 * are loosened: a configured IPA of 40 bits is permitted even if
12094 * the implemented PA is less than that (and so a 40 bit IPA would
12095 * fault for an AArch64 EL1). See R_DTLMN.
12097 min_tsz
= MIN(min_tsz
, 24);
12100 if (tsz
> max_tsz
) {
12103 } else if (tsz
< min_tsz
) {
12110 /* Present TBI as a composite with TBID. */
12111 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
12113 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
12115 tbi
= (tbi
>> select
) & 1;
12117 return (ARMVAParameters
) {
12125 .tsz_oob
= tsz_oob
,
12134 * Note that signed overflow is undefined in C. The following routines are
12135 * careful to use unsigned types where modulo arithmetic is required.
12136 * Failure to do so _will_ break on newer gcc.
12139 /* Signed saturating arithmetic. */
12141 /* Perform 16-bit signed saturating addition. */
12142 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12147 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12157 /* Perform 8-bit signed saturating addition. */
12158 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12163 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12173 /* Perform 16-bit signed saturating subtraction. */
12174 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12179 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12189 /* Perform 8-bit signed saturating subtraction. */
12190 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12195 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12205 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12206 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12207 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12208 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12211 #include "op_addsub.h"
12213 /* Unsigned saturating arithmetic. */
12214 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12224 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12233 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12243 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12252 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12253 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12254 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12255 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12258 #include "op_addsub.h"
12260 /* Signed modulo arithmetic. */
12261 #define SARITH16(a, b, n, op) do { \
12263 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12264 RESULT(sum, n, 16); \
12266 ge |= 3 << (n * 2); \
12269 #define SARITH8(a, b, n, op) do { \
12271 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12272 RESULT(sum, n, 8); \
12278 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12279 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12280 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12281 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12285 #include "op_addsub.h"
12287 /* Unsigned modulo arithmetic. */
12288 #define ADD16(a, b, n) do { \
12290 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12291 RESULT(sum, n, 16); \
12292 if ((sum >> 16) == 1) \
12293 ge |= 3 << (n * 2); \
12296 #define ADD8(a, b, n) do { \
12298 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12299 RESULT(sum, n, 8); \
12300 if ((sum >> 8) == 1) \
12304 #define SUB16(a, b, n) do { \
12306 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12307 RESULT(sum, n, 16); \
12308 if ((sum >> 16) == 0) \
12309 ge |= 3 << (n * 2); \
12312 #define SUB8(a, b, n) do { \
12314 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12315 RESULT(sum, n, 8); \
12316 if ((sum >> 8) == 0) \
12323 #include "op_addsub.h"
12325 /* Halved signed arithmetic. */
12326 #define ADD16(a, b, n) \
12327 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12328 #define SUB16(a, b, n) \
12329 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12330 #define ADD8(a, b, n) \
12331 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12332 #define SUB8(a, b, n) \
12333 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12336 #include "op_addsub.h"
12338 /* Halved unsigned arithmetic. */
12339 #define ADD16(a, b, n) \
12340 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12341 #define SUB16(a, b, n) \
12342 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12343 #define ADD8(a, b, n) \
12344 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12345 #define SUB8(a, b, n) \
12346 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12349 #include "op_addsub.h"
12351 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12360 /* Unsigned sum of absolute byte differences. */
12361 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12364 sum
= do_usad(a
, b
);
12365 sum
+= do_usad(a
>> 8, b
>> 8);
12366 sum
+= do_usad(a
>> 16, b
>> 16);
12367 sum
+= do_usad(a
>> 24, b
>> 24);
12371 /* For ARMv6 SEL instruction. */
12372 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12387 mask
|= 0xff000000;
12389 return (a
& mask
) | (b
& ~mask
);
12394 * The upper bytes of val (above the number specified by 'bytes') must have
12395 * been zeroed out by the caller.
12397 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12401 stl_le_p(buf
, val
);
12403 /* zlib crc32 converts the accumulator and output to one's complement. */
12404 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12407 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12411 stl_le_p(buf
, val
);
12413 /* Linux crc32c converts the output to one's complement. */
12414 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12418 * Return the exception level to which FP-disabled exceptions should
12419 * be taken, or 0 if FP is enabled.
12421 int fp_exception_el(CPUARMState
*env
, int cur_el
)
12423 #ifndef CONFIG_USER_ONLY
12427 * CPACR and the CPTR registers don't exist before v6, so FP is
12428 * always accessible
12430 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12434 if (arm_feature(env
, ARM_FEATURE_M
)) {
12435 /* CPACR can cause a NOCP UsageFault taken to current security state */
12436 if (!v7m_cpacr_pass(env
, env
->v7m
.secure
, cur_el
!= 0)) {
12440 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && !env
->v7m
.secure
) {
12441 if (!extract32(env
->v7m
.nsacr
, 10, 1)) {
12442 /* FP insns cause a NOCP UsageFault taken to Secure */
12450 hcr_el2
= arm_hcr_el2_eff(env
);
12453 * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12454 * 0, 2 : trap EL0 and EL1/PL1 accesses
12455 * 1 : trap only EL0 accesses
12456 * 3 : trap no accesses
12457 * This register is ignored if E2H+TGE are both set.
12459 if ((hcr_el2
& (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
12460 int fpen
= FIELD_EX64(env
->cp15
.cpacr_el1
, CPACR_EL1
, FPEN
);
12470 /* Trap from Secure PL0 or PL1 to Secure PL1. */
12471 if (!arm_el_is_aa64(env
, 3)
12472 && (cur_el
== 3 || arm_is_secure_below_el3(env
))) {
12483 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12484 * to control non-secure access to the FPU. It doesn't have any
12485 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12487 if ((arm_feature(env
, ARM_FEATURE_EL3
) && !arm_el_is_aa64(env
, 3) &&
12488 cur_el
<= 2 && !arm_is_secure_below_el3(env
))) {
12489 if (!extract32(env
->cp15
.nsacr
, 10, 1)) {
12490 /* FP insns act as UNDEF */
12491 return cur_el
== 2 ? 2 : 1;
12496 * CPTR_EL2 is present in v7VE or v8, and changes format
12497 * with HCR_EL2.E2H (regardless of TGE).
12500 if (hcr_el2
& HCR_E2H
) {
12501 switch (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, FPEN
)) {
12503 if (cur_el
!= 0 || !(hcr_el2
& HCR_TGE
)) {
12511 } else if (arm_is_el2_enabled(env
)) {
12512 if (FIELD_EX64(env
->cp15
.cptr_el
[2], CPTR_EL2
, TFP
)) {
12518 /* CPTR_EL3 : present in v8 */
12519 if (FIELD_EX64(env
->cp15
.cptr_el
[3], CPTR_EL3
, TFP
)) {
12520 /* Trap all FP ops to EL3 */
12528 * Return the exception level we're running at if this is our mmu_idx.
12529 * s_pl1_0 should be true if this is the AArch32 Secure PL1&0 translation
12532 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
, bool s_pl1_0
)
12534 if (mmu_idx
& ARM_MMU_IDX_M
) {
12535 return mmu_idx
& ARM_MMU_IDX_M_PRIV
;
12539 case ARMMMUIdx_E10_0
:
12540 case ARMMMUIdx_E20_0
:
12542 case ARMMMUIdx_E10_1
:
12543 case ARMMMUIdx_E10_1_PAN
:
12544 return s_pl1_0
? 3 : 1;
12546 case ARMMMUIdx_E20_2
:
12547 case ARMMMUIdx_E20_2_PAN
:
12552 g_assert_not_reached();
12557 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
12559 g_assert_not_reached();
12563 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
)
12568 if (arm_feature(env
, ARM_FEATURE_M
)) {
12569 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
12572 /* See ARM pseudo-function ELIsInHost. */
12575 hcr
= arm_hcr_el2_eff(env
);
12576 if ((hcr
& (HCR_E2H
| HCR_TGE
)) == (HCR_E2H
| HCR_TGE
)) {
12577 idx
= ARMMMUIdx_E20_0
;
12579 idx
= ARMMMUIdx_E10_0
;
12584 * AArch64 EL3 has its own translation regime; AArch32 EL3
12585 * uses the Secure PL1&0 translation regime.
12587 if (arm_el_is_aa64(env
, 3)) {
12588 return ARMMMUIdx_E3
;
12592 if (arm_pan_enabled(env
)) {
12593 idx
= ARMMMUIdx_E10_1_PAN
;
12595 idx
= ARMMMUIdx_E10_1
;
12599 /* Note that TGE does not apply at EL2. */
12600 if (arm_hcr_el2_eff(env
) & HCR_E2H
) {
12601 if (arm_pan_enabled(env
)) {
12602 idx
= ARMMMUIdx_E20_2_PAN
;
12604 idx
= ARMMMUIdx_E20_2
;
12607 idx
= ARMMMUIdx_E2
;
12611 g_assert_not_reached();
12617 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
12619 return arm_mmu_idx_el(env
, arm_current_el(env
));
12622 static bool mve_no_pred(CPUARMState
*env
)
12625 * Return true if there is definitely no predication of MVE
12626 * instructions by VPR or LTPSIZE. (Returning false even if there
12627 * isn't any predication is OK; generated code will just be
12629 * If the CPU does not implement MVE then this TB flag is always 0.
12631 * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
12632 * logic in gen_update_fp_context() needs to be updated to match.
12634 * We do not include the effect of the ECI bits here -- they are
12635 * tracked in other TB flags. This simplifies the logic for
12636 * "when did we emit code that changes the MVE_NO_PRED TB flag
12637 * and thus need to end the TB?".
12639 if (cpu_isar_feature(aa32_mve
, env_archcpu(env
))) {
12642 if (env
->v7m
.vpr
) {
12645 if (env
->v7m
.ltpsize
< 4) {
12651 void cpu_get_tb_cpu_state(CPUARMState
*env
, vaddr
*pc
,
12652 uint64_t *cs_base
, uint32_t *pflags
)
12654 CPUARMTBFlags flags
;
12656 assert_hflags_rebuild_correctly(env
);
12657 flags
= env
->hflags
;
12659 if (EX_TBFLAG_ANY(flags
, AARCH64_STATE
)) {
12661 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
12662 DP_TBFLAG_A64(flags
, BTYPE
, env
->btype
);
12665 *pc
= env
->regs
[15];
12667 if (arm_feature(env
, ARM_FEATURE_M
)) {
12668 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
12669 FIELD_EX32(env
->v7m
.fpccr
[M_REG_S
], V7M_FPCCR
, S
)
12670 != env
->v7m
.secure
) {
12671 DP_TBFLAG_M32(flags
, FPCCR_S_WRONG
, 1);
12674 if ((env
->v7m
.fpccr
[env
->v7m
.secure
] & R_V7M_FPCCR_ASPEN_MASK
) &&
12675 (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) ||
12676 (env
->v7m
.secure
&&
12677 !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)))) {
12679 * ASPEN is set, but FPCA/SFPA indicate that there is no
12680 * active FP context; we must create a new FP context before
12681 * executing any FP insn.
12683 DP_TBFLAG_M32(flags
, NEW_FP_CTXT_NEEDED
, 1);
12686 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
12687 if (env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
12688 DP_TBFLAG_M32(flags
, LSPACT
, 1);
12691 if (mve_no_pred(env
)) {
12692 DP_TBFLAG_M32(flags
, MVE_NO_PRED
, 1);
12696 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12697 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12699 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
12700 DP_TBFLAG_A32(flags
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
12702 DP_TBFLAG_A32(flags
, VECLEN
, env
->vfp
.vec_len
);
12703 DP_TBFLAG_A32(flags
, VECSTRIDE
, env
->vfp
.vec_stride
);
12705 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) {
12706 DP_TBFLAG_A32(flags
, VFPEN
, 1);
12710 DP_TBFLAG_AM32(flags
, THUMB
, env
->thumb
);
12711 DP_TBFLAG_AM32(flags
, CONDEXEC
, env
->condexec_bits
);
12715 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12716 * states defined in the ARM ARM for software singlestep:
12717 * SS_ACTIVE PSTATE.SS State
12718 * 0 x Inactive (the TB flag for SS is always 0)
12719 * 1 0 Active-pending
12720 * 1 1 Active-not-pending
12721 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
12723 if (EX_TBFLAG_ANY(flags
, SS_ACTIVE
) && (env
->pstate
& PSTATE_SS
)) {
12724 DP_TBFLAG_ANY(flags
, PSTATE__SS
, 1);
12727 *pflags
= flags
.flags
;
12728 *cs_base
= flags
.flags2
;
12731 #ifdef TARGET_AARCH64
12733 * The manual says that when SVE is enabled and VQ is widened the
12734 * implementation is allowed to zero the previously inaccessible
12735 * portion of the registers. The corollary to that is that when
12736 * SVE is enabled and VQ is narrowed we are also allowed to zero
12737 * the now inaccessible portion of the registers.
12739 * The intent of this is that no predicate bit beyond VQ is ever set.
12740 * Which means that some operations on predicate registers themselves
12741 * may operate on full uint64_t or even unrolled across the maximum
12742 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
12743 * may well be cheaper than conditionals to restrict the operation
12744 * to the relevant portion of a uint16_t[16].
12746 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
12751 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
12752 assert(vq
<= env_archcpu(env
)->sve_max_vq
);
12754 /* Zap the high bits of the zregs. */
12755 for (i
= 0; i
< 32; i
++) {
12756 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
12759 /* Zap the high bits of the pregs and ffr. */
12762 pmask
= ~(-1ULL << (16 * (vq
& 3)));
12764 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
12765 for (i
= 0; i
< 17; ++i
) {
12766 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
12772 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState
*env
, int el
, bool sm
)
12777 exc_el
= sme_exception_el(env
, el
);
12779 exc_el
= sve_exception_el(env
, el
);
12782 return 0; /* disabled */
12784 return sve_vqm1_for_el_sm(env
, el
, sm
);
12788 * Notice a change in SVE vector size when changing EL.
12790 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
12791 int new_el
, bool el0_a64
)
12793 ARMCPU
*cpu
= env_archcpu(env
);
12794 int old_len
, new_len
;
12795 bool old_a64
, new_a64
, sm
;
12797 /* Nothing to do if no SVE. */
12798 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
12802 /* Nothing to do if FP is disabled in either EL. */
12803 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
12807 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
12808 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
12811 * Both AArch64.TakeException and AArch64.ExceptionReturn
12812 * invoke ResetSVEState when taking an exception from, or
12813 * returning to, AArch32 state when PSTATE.SM is enabled.
12815 sm
= FIELD_EX64(env
->svcr
, SVCR
, SM
);
12816 if (old_a64
!= new_a64
&& sm
) {
12817 arm_reset_sve_state(env
);
12822 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12823 * at ELx, or not available because the EL is in AArch32 state, then
12824 * for all purposes other than a direct read, the ZCR_ELx.LEN field
12825 * has an effective value of 0".
12827 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12828 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12829 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
12830 * we already have the correct register contents when encountering the
12831 * vq0->vq0 transition between EL0->EL1.
12833 old_len
= new_len
= 0;
12835 old_len
= sve_vqm1_for_el_sm_ena(env
, old_el
, sm
);
12838 new_len
= sve_vqm1_for_el_sm_ena(env
, new_el
, sm
);
12841 /* When changing vector length, clear inaccessible state. */
12842 if (new_len
< old_len
) {
12843 aarch64_sve_narrow_vq(env
, new_len
+ 1);
12848 #ifndef CONFIG_USER_ONLY
12849 ARMSecuritySpace
arm_security_space(CPUARMState
*env
)
12851 if (arm_feature(env
, ARM_FEATURE_M
)) {
12852 return arm_secure_to_space(env
->v7m
.secure
);
12856 * If EL3 is not supported then the secure state is implementation
12857 * defined, in which case QEMU defaults to non-secure.
12859 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
12860 return ARMSS_NonSecure
;
12863 /* Check for AArch64 EL3 or AArch32 Mon. */
12865 if (extract32(env
->pstate
, 2, 2) == 3) {
12866 if (cpu_isar_feature(aa64_rme
, env_archcpu(env
))) {
12869 return ARMSS_Secure
;
12873 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
12874 return ARMSS_Secure
;
12878 return arm_security_space_below_el3(env
);
12881 ARMSecuritySpace
arm_security_space_below_el3(CPUARMState
*env
)
12883 assert(!arm_feature(env
, ARM_FEATURE_M
));
12886 * If EL3 is not supported then the secure state is implementation
12887 * defined, in which case QEMU defaults to non-secure.
12889 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
12890 return ARMSS_NonSecure
;
12894 * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
12895 * Ignoring NSE when !NS retains consistency without having to
12896 * modify other predicates.
12898 if (!(env
->cp15
.scr_el3
& SCR_NS
)) {
12899 return ARMSS_Secure
;
12900 } else if (env
->cp15
.scr_el3
& SCR_NSE
) {
12901 return ARMSS_Realm
;
12903 return ARMSS_NonSecure
;
12906 #endif /* !CONFIG_USER_ONLY */