1 // SPDX-License-Identifier: GPL-2.0-only
3 * vpmu_counter_access - Test vPMU event counter access
5 * Copyright (c) 2023 Google LLC.
7 * This test checks if the guest can see the same number of the PMU event
8 * counters (PMCR_EL0.N) that userspace sets, if the guest can access
9 * those counters, and if the guest is prevented from accessing any
11 * It also checks if the userspace accesses to the PMU regsisters honor the
12 * PMCR.N value that's set for the guest.
13 * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
16 #include <processor.h>
17 #include <test_util.h>
19 #include <perf/arm_pmuv3.h>
20 #include <linux/bitfield.h>
22 /* The max number of the PMU event counters (excluding the cycle counter) */
23 #define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
25 /* The cycle counter bit position that's common among the PMU registers */
26 #define ARMV8_PMU_CYCLE_IDX 31
30 struct kvm_vcpu
*vcpu
;
34 static struct vpmu_vm vpmu_vm
;
41 #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
43 static uint64_t get_pmcr_n(uint64_t pmcr
)
45 return FIELD_GET(ARMV8_PMU_PMCR_N
, pmcr
);
48 static void set_pmcr_n(uint64_t *pmcr
, uint64_t pmcr_n
)
50 u64p_replace_bits((__u64
*) pmcr
, pmcr_n
, ARMV8_PMU_PMCR_N
);
53 static uint64_t get_counters_mask(uint64_t n
)
55 uint64_t mask
= BIT(ARMV8_PMU_CYCLE_IDX
);
58 mask
|= GENMASK(n
- 1, 0);
62 /* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
63 static inline unsigned long read_sel_evcntr(int sel
)
65 write_sysreg(sel
, pmselr_el0
);
67 return read_sysreg(pmxevcntr_el0
);
70 /* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
71 static inline void write_sel_evcntr(int sel
, unsigned long val
)
73 write_sysreg(sel
, pmselr_el0
);
75 write_sysreg(val
, pmxevcntr_el0
);
79 /* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
80 static inline unsigned long read_sel_evtyper(int sel
)
82 write_sysreg(sel
, pmselr_el0
);
84 return read_sysreg(pmxevtyper_el0
);
87 /* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
88 static inline void write_sel_evtyper(int sel
, unsigned long val
)
90 write_sysreg(sel
, pmselr_el0
);
92 write_sysreg(val
, pmxevtyper_el0
);
96 static void pmu_disable_reset(void)
98 uint64_t pmcr
= read_sysreg(pmcr_el0
);
100 /* Reset all counters, disabling them */
101 pmcr
&= ~ARMV8_PMU_PMCR_E
;
102 write_sysreg(pmcr
| ARMV8_PMU_PMCR_P
, pmcr_el0
);
106 #define RETURN_READ_PMEVCNTRN(n) \
107 return read_sysreg(pmevcntr##n##_el0)
108 static unsigned long read_pmevcntrn(int n
)
110 PMEVN_SWITCH(n
, RETURN_READ_PMEVCNTRN
);
114 #define WRITE_PMEVCNTRN(n) \
115 write_sysreg(val, pmevcntr##n##_el0)
116 static void write_pmevcntrn(int n
, unsigned long val
)
118 PMEVN_SWITCH(n
, WRITE_PMEVCNTRN
);
122 #define READ_PMEVTYPERN(n) \
123 return read_sysreg(pmevtyper##n##_el0)
124 static unsigned long read_pmevtypern(int n
)
126 PMEVN_SWITCH(n
, READ_PMEVTYPERN
);
130 #define WRITE_PMEVTYPERN(n) \
131 write_sysreg(val, pmevtyper##n##_el0)
132 static void write_pmevtypern(int n
, unsigned long val
)
134 PMEVN_SWITCH(n
, WRITE_PMEVTYPERN
);
139 * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
140 * accessors that test cases will use. Each of the accessors will
141 * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
142 * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
143 * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
145 * This is used to test that combinations of those accessors provide
146 * the consistent behavior.
148 struct pmc_accessor
{
149 /* A function to be used to read PMEVTCNTR<n>_EL0 */
150 unsigned long (*read_cntr
)(int idx
);
151 /* A function to be used to write PMEVTCNTR<n>_EL0 */
152 void (*write_cntr
)(int idx
, unsigned long val
);
153 /* A function to be used to read PMEVTYPER<n>_EL0 */
154 unsigned long (*read_typer
)(int idx
);
155 /* A function to be used to write PMEVTYPER<n>_EL0 */
156 void (*write_typer
)(int idx
, unsigned long val
);
159 struct pmc_accessor pmc_accessors
[] = {
160 /* test with all direct accesses */
161 { read_pmevcntrn
, write_pmevcntrn
, read_pmevtypern
, write_pmevtypern
},
162 /* test with all indirect accesses */
163 { read_sel_evcntr
, write_sel_evcntr
, read_sel_evtyper
, write_sel_evtyper
},
164 /* read with direct accesses, and write with indirect accesses */
165 { read_pmevcntrn
, write_sel_evcntr
, read_pmevtypern
, write_sel_evtyper
},
166 /* read with indirect accesses, and write with direct accesses */
167 { read_sel_evcntr
, write_pmevcntrn
, read_sel_evtyper
, write_pmevtypern
},
171 * Convert a pointer of pmc_accessor to an index in pmc_accessors[],
172 * assuming that the pointer is one of the entries in pmc_accessors[].
174 #define PMC_ACC_TO_IDX(acc) (acc - &pmc_accessors[0])
176 #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \
178 uint64_t _tval = read_sysreg(regname); \
181 __GUEST_ASSERT((_tval & mask), \
182 "tval: 0x%lx; mask: 0x%lx; set_expected: %u", \
183 _tval, mask, set_expected); \
185 __GUEST_ASSERT(!(_tval & mask), \
186 "tval: 0x%lx; mask: 0x%lx; set_expected: %u", \
187 _tval, mask, set_expected); \
191 * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
192 * are set or cleared as specified in @set_expected.
194 static void check_bitmap_pmu_regs(uint64_t mask
, bool set_expected
)
196 GUEST_ASSERT_BITMAP_REG(pmcntenset_el0
, mask
, set_expected
);
197 GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0
, mask
, set_expected
);
198 GUEST_ASSERT_BITMAP_REG(pmintenset_el1
, mask
, set_expected
);
199 GUEST_ASSERT_BITMAP_REG(pmintenclr_el1
, mask
, set_expected
);
200 GUEST_ASSERT_BITMAP_REG(pmovsset_el0
, mask
, set_expected
);
201 GUEST_ASSERT_BITMAP_REG(pmovsclr_el0
, mask
, set_expected
);
205 * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
206 * to the specified counter (@pmc_idx) can be read/written as expected.
207 * When @set_op is true, it tries to set the bit for the counter in
208 * those registers by writing the SET registers (the bit won't be set
209 * if the counter is not implemented though).
210 * Otherwise, it tries to clear the bits in the registers by writing
212 * Then, it checks if the values indicated in the registers are as expected.
214 static void test_bitmap_pmu_regs(int pmc_idx
, bool set_op
)
216 uint64_t pmcr_n
, test_bit
= BIT(pmc_idx
);
217 bool set_expected
= false;
220 write_sysreg(test_bit
, pmcntenset_el0
);
221 write_sysreg(test_bit
, pmintenset_el1
);
222 write_sysreg(test_bit
, pmovsset_el0
);
224 /* The bit will be set only if the counter is implemented */
225 pmcr_n
= get_pmcr_n(read_sysreg(pmcr_el0
));
226 set_expected
= (pmc_idx
< pmcr_n
) ? true : false;
228 write_sysreg(test_bit
, pmcntenclr_el0
);
229 write_sysreg(test_bit
, pmintenclr_el1
);
230 write_sysreg(test_bit
, pmovsclr_el0
);
232 check_bitmap_pmu_regs(test_bit
, set_expected
);
236 * Tests for reading/writing registers for the (implemented) event counter
237 * specified by @pmc_idx.
239 static void test_access_pmc_regs(struct pmc_accessor
*acc
, int pmc_idx
)
241 uint64_t write_data
, read_data
;
243 /* Disable all PMCs and reset all PMCs to zero. */
247 * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
250 /* Make sure that the bit in those registers are set to 0 */
251 test_bitmap_pmu_regs(pmc_idx
, false);
252 /* Test if setting the bit in those registers works */
253 test_bitmap_pmu_regs(pmc_idx
, true);
254 /* Test if clearing the bit in those registers works */
255 test_bitmap_pmu_regs(pmc_idx
, false);
258 * Tests for reading/writing the event type register.
262 * Set the event type register to an arbitrary value just for testing
263 * of reading/writing the register.
264 * Arm ARM says that for the event from 0x0000 to 0x003F,
265 * the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
266 * the value written to the field even when the specified event
269 write_data
= (ARMV8_PMU_EXCLUDE_EL1
| ARMV8_PMUV3_PERFCTR_INST_RETIRED
);
270 acc
->write_typer(pmc_idx
, write_data
);
271 read_data
= acc
->read_typer(pmc_idx
);
272 __GUEST_ASSERT(read_data
== write_data
,
273 "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
274 pmc_idx
, PMC_ACC_TO_IDX(acc
), read_data
, write_data
);
277 * Tests for reading/writing the event count register.
280 read_data
= acc
->read_cntr(pmc_idx
);
282 /* The count value must be 0, as it is disabled and reset */
283 __GUEST_ASSERT(read_data
== 0,
284 "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx",
285 pmc_idx
, PMC_ACC_TO_IDX(acc
), read_data
);
287 write_data
= read_data
+ pmc_idx
+ 0x12345;
288 acc
->write_cntr(pmc_idx
, write_data
);
289 read_data
= acc
->read_cntr(pmc_idx
);
290 __GUEST_ASSERT(read_data
== write_data
,
291 "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
292 pmc_idx
, PMC_ACC_TO_IDX(acc
), read_data
, write_data
);
295 #define INVALID_EC (-1ul)
296 uint64_t expected_ec
= INVALID_EC
;
298 static void guest_sync_handler(struct ex_regs
*regs
)
302 esr
= read_sysreg(esr_el1
);
303 ec
= ESR_ELx_EC(esr
);
305 __GUEST_ASSERT(expected_ec
== ec
,
306 "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
307 regs
->pc
, esr
, ec
, expected_ec
);
309 /* skip the trapping instruction */
312 /* Use INVALID_EC to indicate an exception occurred */
313 expected_ec
= INVALID_EC
;
317 * Run the given operation that should trigger an exception with the
318 * given exception class. The exception handler (guest_sync_handler)
319 * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
320 * the instruction that trapped.
322 #define TEST_EXCEPTION(ec, ops) \
324 GUEST_ASSERT(ec != INVALID_EC); \
325 WRITE_ONCE(expected_ec, ec); \
328 GUEST_ASSERT(expected_ec == INVALID_EC); \
332 * Tests for reading/writing registers for the unimplemented event counter
333 * specified by @pmc_idx (>= PMCR_EL0.N).
335 static void test_access_invalid_pmc_regs(struct pmc_accessor
*acc
, int pmc_idx
)
338 * Reading/writing the event count/type registers should cause
339 * an UNDEFINED exception.
341 TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN
, acc
->read_cntr(pmc_idx
));
342 TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN
, acc
->write_cntr(pmc_idx
, 0));
343 TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN
, acc
->read_typer(pmc_idx
));
344 TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN
, acc
->write_typer(pmc_idx
, 0));
346 * The bit corresponding to the (unimplemented) counter in
347 * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
349 test_bitmap_pmu_regs(pmc_idx
, 1);
350 test_bitmap_pmu_regs(pmc_idx
, 0);
354 * The guest is configured with PMUv3 with @expected_pmcr_n number of
356 * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
357 * if reading/writing PMU registers for implemented or unimplemented
358 * counters works as expected.
360 static void guest_code(uint64_t expected_pmcr_n
)
362 uint64_t pmcr
, pmcr_n
, unimp_mask
;
365 __GUEST_ASSERT(expected_pmcr_n
<= ARMV8_PMU_MAX_GENERAL_COUNTERS
,
366 "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%x",
367 expected_pmcr_n
, ARMV8_PMU_MAX_GENERAL_COUNTERS
);
369 pmcr
= read_sysreg(pmcr_el0
);
370 pmcr_n
= get_pmcr_n(pmcr
);
372 /* Make sure that PMCR_EL0.N indicates the value userspace set */
373 __GUEST_ASSERT(pmcr_n
== expected_pmcr_n
,
374 "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
375 expected_pmcr_n
, pmcr_n
);
378 * Make sure that (RAZ) bits corresponding to unimplemented event
379 * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
381 * (NOTE: bits for implemented event counters are reset to UNKNOWN)
383 unimp_mask
= GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS
- 1, pmcr_n
);
384 check_bitmap_pmu_regs(unimp_mask
, false);
387 * Tests for reading/writing PMU registers for implemented counters.
388 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
390 for (i
= 0; i
< ARRAY_SIZE(pmc_accessors
); i
++) {
391 for (pmc
= 0; pmc
< pmcr_n
; pmc
++)
392 test_access_pmc_regs(&pmc_accessors
[i
], pmc
);
396 * Tests for reading/writing PMU registers for unimplemented counters.
397 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
399 for (i
= 0; i
< ARRAY_SIZE(pmc_accessors
); i
++) {
400 for (pmc
= pmcr_n
; pmc
< ARMV8_PMU_MAX_GENERAL_COUNTERS
; pmc
++)
401 test_access_invalid_pmc_regs(&pmc_accessors
[i
], pmc
);
407 /* Create a VM that has one vCPU with PMUv3 configured. */
408 static void create_vpmu_vm(void *guest_code
)
410 struct kvm_vcpu_init init
;
412 uint64_t dfr0
, irq
= 23;
413 struct kvm_device_attr irq_attr
= {
414 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
415 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
416 .addr
= (uint64_t)&irq
,
418 struct kvm_device_attr init_attr
= {
419 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
420 .attr
= KVM_ARM_VCPU_PMU_V3_INIT
,
423 /* The test creates the vpmu_vm multiple times. Ensure a clean state */
424 memset(&vpmu_vm
, 0, sizeof(vpmu_vm
));
426 vpmu_vm
.vm
= vm_create(1);
427 vm_init_descriptor_tables(vpmu_vm
.vm
);
428 for (ec
= 0; ec
< ESR_ELx_EC_MAX
+ 1; ec
++) {
429 vm_install_sync_handler(vpmu_vm
.vm
, VECTOR_SYNC_CURRENT
, ec
,
433 /* Create vCPU with PMUv3 */
434 vm_ioctl(vpmu_vm
.vm
, KVM_ARM_PREFERRED_TARGET
, &init
);
435 init
.features
[0] |= (1 << KVM_ARM_VCPU_PMU_V3
);
436 vpmu_vm
.vcpu
= aarch64_vcpu_add(vpmu_vm
.vm
, 0, &init
, guest_code
);
437 vcpu_init_descriptor_tables(vpmu_vm
.vcpu
);
438 vpmu_vm
.gic_fd
= vgic_v3_setup(vpmu_vm
.vm
, 1, 64);
439 __TEST_REQUIRE(vpmu_vm
.gic_fd
>= 0,
440 "Failed to create vgic-v3, skipping");
442 /* Make sure that PMUv3 support is indicated in the ID register */
443 vcpu_get_reg(vpmu_vm
.vcpu
,
444 KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1
), &dfr0
);
445 pmuver
= FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer
), dfr0
);
446 TEST_ASSERT(pmuver
!= ID_AA64DFR0_EL1_PMUVer_IMP_DEF
&&
447 pmuver
>= ID_AA64DFR0_EL1_PMUVer_IMP
,
448 "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver
);
450 /* Initialize vPMU */
451 vcpu_ioctl(vpmu_vm
.vcpu
, KVM_SET_DEVICE_ATTR
, &irq_attr
);
452 vcpu_ioctl(vpmu_vm
.vcpu
, KVM_SET_DEVICE_ATTR
, &init_attr
);
455 static void destroy_vpmu_vm(void)
457 close(vpmu_vm
.gic_fd
);
458 kvm_vm_free(vpmu_vm
.vm
);
461 static void run_vcpu(struct kvm_vcpu
*vcpu
, uint64_t pmcr_n
)
465 vcpu_args_set(vcpu
, 1, pmcr_n
);
467 switch (get_ucall(vcpu
, &uc
)) {
469 REPORT_GUEST_ASSERT(uc
);
474 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
479 static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n
, bool expect_fail
)
481 struct kvm_vcpu
*vcpu
;
482 uint64_t pmcr
, pmcr_orig
;
484 create_vpmu_vm(guest_code
);
487 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_PMCR_EL0
), &pmcr_orig
);
491 * Setting a larger value of PMCR.N should not modify the field, and
494 set_pmcr_n(&pmcr
, pmcr_n
);
495 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_PMCR_EL0
), pmcr
);
496 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_PMCR_EL0
), &pmcr
);
499 TEST_ASSERT(pmcr_orig
== pmcr
,
500 "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
503 TEST_ASSERT(pmcr_n
== get_pmcr_n(pmcr
),
504 "Failed to update PMCR.N to %lu (received: %lu)",
505 pmcr_n
, get_pmcr_n(pmcr
));
509 * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
512 static void run_access_test(uint64_t pmcr_n
)
515 struct kvm_vcpu
*vcpu
;
516 struct kvm_vcpu_init init
;
518 pr_debug("Test with pmcr_n %lu\n", pmcr_n
);
520 test_create_vpmu_vm_with_pmcr_n(pmcr_n
, false);
523 /* Save the initial sp to restore them later to run the guest again */
524 vcpu_get_reg(vcpu
, ARM64_CORE_REG(sp_el1
), &sp
);
526 run_vcpu(vcpu
, pmcr_n
);
529 * Reset and re-initialize the vCPU, and run the guest code again to
530 * check if PMCR_EL0.N is preserved.
532 vm_ioctl(vpmu_vm
.vm
, KVM_ARM_PREFERRED_TARGET
, &init
);
533 init
.features
[0] |= (1 << KVM_ARM_VCPU_PMU_V3
);
534 aarch64_vcpu_setup(vcpu
, &init
);
535 vcpu_init_descriptor_tables(vcpu
);
536 vcpu_set_reg(vcpu
, ARM64_CORE_REG(sp_el1
), sp
);
537 vcpu_set_reg(vcpu
, ARM64_CORE_REG(regs
.pc
), (uint64_t)guest_code
);
539 run_vcpu(vcpu
, pmcr_n
);
544 static struct pmreg_sets validity_check_reg_sets
[] = {
545 PMREG_SET(SYS_PMCNTENSET_EL0
, SYS_PMCNTENCLR_EL0
),
546 PMREG_SET(SYS_PMINTENSET_EL1
, SYS_PMINTENCLR_EL1
),
547 PMREG_SET(SYS_PMOVSSET_EL0
, SYS_PMOVSCLR_EL0
),
551 * Create a VM, and check if KVM handles the userspace accesses of
552 * the PMU register sets in @validity_check_reg_sets[] correctly.
554 static void run_pmregs_validity_test(uint64_t pmcr_n
)
557 struct kvm_vcpu
*vcpu
;
558 uint64_t set_reg_id
, clr_reg_id
, reg_val
;
559 uint64_t valid_counters_mask
, max_counters_mask
;
561 test_create_vpmu_vm_with_pmcr_n(pmcr_n
, false);
564 valid_counters_mask
= get_counters_mask(pmcr_n
);
565 max_counters_mask
= get_counters_mask(ARMV8_PMU_MAX_COUNTERS
);
567 for (i
= 0; i
< ARRAY_SIZE(validity_check_reg_sets
); i
++) {
568 set_reg_id
= validity_check_reg_sets
[i
].set_reg_id
;
569 clr_reg_id
= validity_check_reg_sets
[i
].clr_reg_id
;
572 * Test if the 'set' and 'clr' variants of the registers
573 * are initialized based on the number of valid counters.
575 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(set_reg_id
), ®_val
);
576 TEST_ASSERT((reg_val
& (~valid_counters_mask
)) == 0,
577 "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
578 KVM_ARM64_SYS_REG(set_reg_id
), reg_val
);
580 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(clr_reg_id
), ®_val
);
581 TEST_ASSERT((reg_val
& (~valid_counters_mask
)) == 0,
582 "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
583 KVM_ARM64_SYS_REG(clr_reg_id
), reg_val
);
586 * Using the 'set' variant, force-set the register to the
587 * max number of possible counters and test if KVM discards
588 * the bits for unimplemented counters as it should.
590 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(set_reg_id
), max_counters_mask
);
592 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(set_reg_id
), ®_val
);
593 TEST_ASSERT((reg_val
& (~valid_counters_mask
)) == 0,
594 "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
595 KVM_ARM64_SYS_REG(set_reg_id
), reg_val
);
597 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(clr_reg_id
), ®_val
);
598 TEST_ASSERT((reg_val
& (~valid_counters_mask
)) == 0,
599 "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
600 KVM_ARM64_SYS_REG(clr_reg_id
), reg_val
);
607 * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
608 * the vCPU to @pmcr_n, which is larger than the host value.
609 * The attempt should fail as @pmcr_n is too big to set for the vCPU.
611 static void run_error_test(uint64_t pmcr_n
)
613 pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n
);
615 test_create_vpmu_vm_with_pmcr_n(pmcr_n
, true);
620 * Return the default number of implemented PMU event counters excluding
621 * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
623 static uint64_t get_pmcr_n_limit(void)
627 create_vpmu_vm(guest_code
);
628 vcpu_get_reg(vpmu_vm
.vcpu
, KVM_ARM64_SYS_REG(SYS_PMCR_EL0
), &pmcr
);
630 return get_pmcr_n(pmcr
);
637 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3
));
639 pmcr_n
= get_pmcr_n_limit();
640 for (i
= 0; i
<= pmcr_n
; i
++) {
642 run_pmregs_validity_test(i
);
645 for (i
= pmcr_n
+ 1; i
< ARMV8_PMU_MAX_COUNTERS
; i
++)