1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
9 #include <linux/kvm_host.h>
10 #include <linux/perf_event.h>
11 #include <linux/perf/arm_pmu.h>
12 #include <linux/uaccess.h>
13 #include <asm/kvm_emulate.h>
14 #include <kvm/arm_pmu.h>
15 #include <kvm/arm_vgic.h>
17 static void kvm_pmu_create_perf_event(struct kvm_vcpu
*vcpu
, u64 select_idx
);
19 #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
22 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
23 * @vcpu: The vcpu pointer
24 * @select_idx: The counter index
26 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu
*vcpu
, u64 select_idx
)
28 return (select_idx
== ARMV8_PMU_CYCLE_IDX
&&
29 __vcpu_sys_reg(vcpu
, PMCR_EL0
) & ARMV8_PMU_PMCR_LC
);
32 static struct kvm_vcpu
*kvm_pmc_to_vcpu(struct kvm_pmc
*pmc
)
35 struct kvm_vcpu_arch
*vcpu_arch
;
38 pmu
= container_of(pmc
, struct kvm_pmu
, pmc
[0]);
39 vcpu_arch
= container_of(pmu
, struct kvm_vcpu_arch
, pmu
);
40 return container_of(vcpu_arch
, struct kvm_vcpu
, arch
);
44 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
45 * @pmc: The PMU counter pointer
47 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc
*pmc
)
49 struct kvm_vcpu
*vcpu
= kvm_pmc_to_vcpu(pmc
);
51 return test_bit(pmc
->idx
>> 1, vcpu
->arch
.pmu
.chained
);
55 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
56 * @select_idx: The counter index
58 static bool kvm_pmu_idx_is_high_counter(u64 select_idx
)
60 return select_idx
& 0x1;
64 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
65 * @pmc: The PMU counter pointer
67 * When a pair of PMCs are chained together we use the low counter (canonical)
68 * to hold the underlying perf event.
70 static struct kvm_pmc
*kvm_pmu_get_canonical_pmc(struct kvm_pmc
*pmc
)
72 if (kvm_pmu_pmc_is_chained(pmc
) &&
73 kvm_pmu_idx_is_high_counter(pmc
->idx
))
80 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
81 * @vcpu: The vcpu pointer
82 * @select_idx: The counter index
84 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu
*vcpu
, u64 select_idx
)
90 if (select_idx
== ARMV8_PMU_CYCLE_IDX
)
93 reg
= PMEVTYPER0_EL0
+ select_idx
;
94 eventsel
= __vcpu_sys_reg(vcpu
, reg
) & ARMV8_PMU_EVTYPE_EVENT
;
96 return eventsel
== ARMV8_PMUV3_PERFCTR_CHAIN
;
100 * kvm_pmu_get_pair_counter_value - get PMU counter value
101 * @vcpu: The vcpu pointer
102 * @pmc: The PMU counter pointer
104 static u64
kvm_pmu_get_pair_counter_value(struct kvm_vcpu
*vcpu
,
107 u64 counter
, counter_high
, reg
, enabled
, running
;
109 if (kvm_pmu_pmc_is_chained(pmc
)) {
110 pmc
= kvm_pmu_get_canonical_pmc(pmc
);
111 reg
= PMEVCNTR0_EL0
+ pmc
->idx
;
113 counter
= __vcpu_sys_reg(vcpu
, reg
);
114 counter_high
= __vcpu_sys_reg(vcpu
, reg
+ 1);
116 counter
= lower_32_bits(counter
) | (counter_high
<< 32);
118 reg
= (pmc
->idx
== ARMV8_PMU_CYCLE_IDX
)
119 ? PMCCNTR_EL0
: PMEVCNTR0_EL0
+ pmc
->idx
;
120 counter
= __vcpu_sys_reg(vcpu
, reg
);
124 * The real counter value is equal to the value of counter register plus
125 * the value perf event counts.
128 counter
+= perf_event_read_value(pmc
->perf_event
, &enabled
,
135 * kvm_pmu_get_counter_value - get PMU counter value
136 * @vcpu: The vcpu pointer
137 * @select_idx: The counter index
139 u64
kvm_pmu_get_counter_value(struct kvm_vcpu
*vcpu
, u64 select_idx
)
142 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
143 struct kvm_pmc
*pmc
= &pmu
->pmc
[select_idx
];
145 counter
= kvm_pmu_get_pair_counter_value(vcpu
, pmc
);
147 if (kvm_pmu_pmc_is_chained(pmc
) &&
148 kvm_pmu_idx_is_high_counter(select_idx
))
149 counter
= upper_32_bits(counter
);
150 else if (select_idx
!= ARMV8_PMU_CYCLE_IDX
)
151 counter
= lower_32_bits(counter
);
157 * kvm_pmu_set_counter_value - set PMU counter value
158 * @vcpu: The vcpu pointer
159 * @select_idx: The counter index
160 * @val: The counter value
162 void kvm_pmu_set_counter_value(struct kvm_vcpu
*vcpu
, u64 select_idx
, u64 val
)
166 reg
= (select_idx
== ARMV8_PMU_CYCLE_IDX
)
167 ? PMCCNTR_EL0
: PMEVCNTR0_EL0
+ select_idx
;
168 __vcpu_sys_reg(vcpu
, reg
) += (s64
)val
- kvm_pmu_get_counter_value(vcpu
, select_idx
);
170 /* Recreate the perf event to reflect the updated sample_period */
171 kvm_pmu_create_perf_event(vcpu
, select_idx
);
175 * kvm_pmu_release_perf_event - remove the perf event
176 * @pmc: The PMU counter pointer
178 static void kvm_pmu_release_perf_event(struct kvm_pmc
*pmc
)
180 pmc
= kvm_pmu_get_canonical_pmc(pmc
);
181 if (pmc
->perf_event
) {
182 perf_event_disable(pmc
->perf_event
);
183 perf_event_release_kernel(pmc
->perf_event
);
184 pmc
->perf_event
= NULL
;
189 * kvm_pmu_stop_counter - stop PMU counter
190 * @pmc: The PMU counter pointer
192 * If this counter has been configured to monitor some event, release it here.
194 static void kvm_pmu_stop_counter(struct kvm_vcpu
*vcpu
, struct kvm_pmc
*pmc
)
196 u64 counter
, reg
, val
;
198 pmc
= kvm_pmu_get_canonical_pmc(pmc
);
199 if (!pmc
->perf_event
)
202 counter
= kvm_pmu_get_pair_counter_value(vcpu
, pmc
);
204 if (pmc
->idx
== ARMV8_PMU_CYCLE_IDX
) {
208 reg
= PMEVCNTR0_EL0
+ pmc
->idx
;
209 val
= lower_32_bits(counter
);
212 __vcpu_sys_reg(vcpu
, reg
) = val
;
214 if (kvm_pmu_pmc_is_chained(pmc
))
215 __vcpu_sys_reg(vcpu
, reg
+ 1) = upper_32_bits(counter
);
217 kvm_pmu_release_perf_event(pmc
);
221 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
222 * @vcpu: The vcpu pointer
225 void kvm_pmu_vcpu_init(struct kvm_vcpu
*vcpu
)
228 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
230 for (i
= 0; i
< ARMV8_PMU_MAX_COUNTERS
; i
++)
235 * kvm_pmu_vcpu_reset - reset pmu state for cpu
236 * @vcpu: The vcpu pointer
239 void kvm_pmu_vcpu_reset(struct kvm_vcpu
*vcpu
)
242 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
244 for (i
= 0; i
< ARMV8_PMU_MAX_COUNTERS
; i
++)
245 kvm_pmu_stop_counter(vcpu
, &pmu
->pmc
[i
]);
247 bitmap_zero(vcpu
->arch
.pmu
.chained
, ARMV8_PMU_MAX_COUNTER_PAIRS
);
251 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
252 * @vcpu: The vcpu pointer
255 void kvm_pmu_vcpu_destroy(struct kvm_vcpu
*vcpu
)
258 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
260 for (i
= 0; i
< ARMV8_PMU_MAX_COUNTERS
; i
++)
261 kvm_pmu_release_perf_event(&pmu
->pmc
[i
]);
264 u64
kvm_pmu_valid_counter_mask(struct kvm_vcpu
*vcpu
)
266 u64 val
= __vcpu_sys_reg(vcpu
, PMCR_EL0
) >> ARMV8_PMU_PMCR_N_SHIFT
;
268 val
&= ARMV8_PMU_PMCR_N_MASK
;
270 return BIT(ARMV8_PMU_CYCLE_IDX
);
272 return GENMASK(val
- 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX
);
276 * kvm_pmu_enable_counter_mask - enable selected PMU counters
277 * @vcpu: The vcpu pointer
278 * @val: the value guest writes to PMCNTENSET register
280 * Call perf_event_enable to start counting the perf event
282 void kvm_pmu_enable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
)
285 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
288 if (!(__vcpu_sys_reg(vcpu
, PMCR_EL0
) & ARMV8_PMU_PMCR_E
) || !val
)
291 for (i
= 0; i
< ARMV8_PMU_MAX_COUNTERS
; i
++) {
298 * For high counters of chained events we must recreate the
299 * perf event with the long (64bit) attribute set.
301 if (kvm_pmu_pmc_is_chained(pmc
) &&
302 kvm_pmu_idx_is_high_counter(i
)) {
303 kvm_pmu_create_perf_event(vcpu
, i
);
307 /* At this point, pmc must be the canonical */
308 if (pmc
->perf_event
) {
309 perf_event_enable(pmc
->perf_event
);
310 if (pmc
->perf_event
->state
!= PERF_EVENT_STATE_ACTIVE
)
311 kvm_debug("fail to enable perf event\n");
317 * kvm_pmu_disable_counter_mask - disable selected PMU counters
318 * @vcpu: The vcpu pointer
319 * @val: the value guest writes to PMCNTENCLR register
321 * Call perf_event_disable to stop counting the perf event
323 void kvm_pmu_disable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
)
326 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
332 for (i
= 0; i
< ARMV8_PMU_MAX_COUNTERS
; i
++) {
339 * For high counters of chained events we must recreate the
340 * perf event with the long (64bit) attribute unset.
342 if (kvm_pmu_pmc_is_chained(pmc
) &&
343 kvm_pmu_idx_is_high_counter(i
)) {
344 kvm_pmu_create_perf_event(vcpu
, i
);
348 /* At this point, pmc must be the canonical */
350 perf_event_disable(pmc
->perf_event
);
354 static u64
kvm_pmu_overflow_status(struct kvm_vcpu
*vcpu
)
358 if ((__vcpu_sys_reg(vcpu
, PMCR_EL0
) & ARMV8_PMU_PMCR_E
)) {
359 reg
= __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
);
360 reg
&= __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
);
361 reg
&= __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
);
362 reg
&= kvm_pmu_valid_counter_mask(vcpu
);
368 static void kvm_pmu_update_state(struct kvm_vcpu
*vcpu
)
370 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
373 if (!kvm_arm_pmu_v3_ready(vcpu
))
376 overflow
= !!kvm_pmu_overflow_status(vcpu
);
377 if (pmu
->irq_level
== overflow
)
380 pmu
->irq_level
= overflow
;
382 if (likely(irqchip_in_kernel(vcpu
->kvm
))) {
383 int ret
= kvm_vgic_inject_irq(vcpu
->kvm
, vcpu
->vcpu_id
,
384 pmu
->irq_num
, overflow
, pmu
);
389 bool kvm_pmu_should_notify_user(struct kvm_vcpu
*vcpu
)
391 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
392 struct kvm_sync_regs
*sregs
= &vcpu
->run
->s
.regs
;
393 bool run_level
= sregs
->device_irq_level
& KVM_ARM_DEV_PMU
;
395 if (likely(irqchip_in_kernel(vcpu
->kvm
)))
398 return pmu
->irq_level
!= run_level
;
402 * Reflect the PMU overflow interrupt output level into the kvm_run structure
404 void kvm_pmu_update_run(struct kvm_vcpu
*vcpu
)
406 struct kvm_sync_regs
*regs
= &vcpu
->run
->s
.regs
;
408 /* Populate the timer bitmap for user space */
409 regs
->device_irq_level
&= ~KVM_ARM_DEV_PMU
;
410 if (vcpu
->arch
.pmu
.irq_level
)
411 regs
->device_irq_level
|= KVM_ARM_DEV_PMU
;
415 * kvm_pmu_flush_hwstate - flush pmu state to cpu
416 * @vcpu: The vcpu pointer
418 * Check if the PMU has overflowed while we were running in the host, and inject
419 * an interrupt if that was the case.
421 void kvm_pmu_flush_hwstate(struct kvm_vcpu
*vcpu
)
423 kvm_pmu_update_state(vcpu
);
427 * kvm_pmu_sync_hwstate - sync pmu state from cpu
428 * @vcpu: The vcpu pointer
430 * Check if the PMU has overflowed while we were running in the guest, and
431 * inject an interrupt if that was the case.
433 void kvm_pmu_sync_hwstate(struct kvm_vcpu
*vcpu
)
435 kvm_pmu_update_state(vcpu
);
439 * When the perf event overflows, set the overflow status and inform the vcpu.
441 static void kvm_pmu_perf_overflow(struct perf_event
*perf_event
,
442 struct perf_sample_data
*data
,
443 struct pt_regs
*regs
)
445 struct kvm_pmc
*pmc
= perf_event
->overflow_handler_context
;
446 struct arm_pmu
*cpu_pmu
= to_arm_pmu(perf_event
->pmu
);
447 struct kvm_vcpu
*vcpu
= kvm_pmc_to_vcpu(pmc
);
451 cpu_pmu
->pmu
.stop(perf_event
, PERF_EF_UPDATE
);
454 * Reset the sample period to the architectural limit,
455 * i.e. the point where the counter overflows.
457 period
= -(local64_read(&perf_event
->count
));
459 if (!kvm_pmu_idx_is_64bit(vcpu
, pmc
->idx
))
460 period
&= GENMASK(31, 0);
462 local64_set(&perf_event
->hw
.period_left
, 0);
463 perf_event
->attr
.sample_period
= period
;
464 perf_event
->hw
.sample_period
= period
;
466 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) |= BIT(idx
);
468 if (kvm_pmu_overflow_status(vcpu
)) {
469 kvm_make_request(KVM_REQ_IRQ_PENDING
, vcpu
);
473 cpu_pmu
->pmu
.start(perf_event
, PERF_EF_RELOAD
);
477 * kvm_pmu_software_increment - do software increment
478 * @vcpu: The vcpu pointer
479 * @val: the value guest writes to PMSWINC register
481 void kvm_pmu_software_increment(struct kvm_vcpu
*vcpu
, u64 val
)
484 u64 type
, enable
, reg
;
489 enable
= __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
);
490 for (i
= 0; i
< ARMV8_PMU_CYCLE_IDX
; i
++) {
493 type
= __vcpu_sys_reg(vcpu
, PMEVTYPER0_EL0
+ i
)
494 & ARMV8_PMU_EVTYPE_EVENT
;
495 if ((type
== ARMV8_PMUV3_PERFCTR_SW_INCR
)
496 && (enable
& BIT(i
))) {
497 reg
= __vcpu_sys_reg(vcpu
, PMEVCNTR0_EL0
+ i
) + 1;
498 reg
= lower_32_bits(reg
);
499 __vcpu_sys_reg(vcpu
, PMEVCNTR0_EL0
+ i
) = reg
;
501 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) |= BIT(i
);
507 * kvm_pmu_handle_pmcr - handle PMCR register
508 * @vcpu: The vcpu pointer
509 * @val: the value guest writes to PMCR register
511 void kvm_pmu_handle_pmcr(struct kvm_vcpu
*vcpu
, u64 val
)
516 mask
= kvm_pmu_valid_counter_mask(vcpu
);
517 if (val
& ARMV8_PMU_PMCR_E
) {
518 kvm_pmu_enable_counter_mask(vcpu
,
519 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) & mask
);
521 kvm_pmu_disable_counter_mask(vcpu
, mask
);
524 if (val
& ARMV8_PMU_PMCR_C
)
525 kvm_pmu_set_counter_value(vcpu
, ARMV8_PMU_CYCLE_IDX
, 0);
527 if (val
& ARMV8_PMU_PMCR_P
) {
528 for (i
= 0; i
< ARMV8_PMU_CYCLE_IDX
; i
++)
529 kvm_pmu_set_counter_value(vcpu
, i
, 0);
533 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu
*vcpu
, u64 select_idx
)
535 return (__vcpu_sys_reg(vcpu
, PMCR_EL0
) & ARMV8_PMU_PMCR_E
) &&
536 (__vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) & BIT(select_idx
));
540 * kvm_pmu_create_perf_event - create a perf event for a counter
541 * @vcpu: The vcpu pointer
542 * @select_idx: The number of selected counter
544 static void kvm_pmu_create_perf_event(struct kvm_vcpu
*vcpu
, u64 select_idx
)
546 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
548 struct perf_event
*event
;
549 struct perf_event_attr attr
;
550 u64 eventsel
, counter
, reg
, data
;
553 * For chained counters the event type and filtering attributes are
554 * obtained from the low/even counter. We also use this counter to
555 * determine if the event is enabled/disabled.
557 pmc
= kvm_pmu_get_canonical_pmc(&pmu
->pmc
[select_idx
]);
559 reg
= (pmc
->idx
== ARMV8_PMU_CYCLE_IDX
)
560 ? PMCCFILTR_EL0
: PMEVTYPER0_EL0
+ pmc
->idx
;
561 data
= __vcpu_sys_reg(vcpu
, reg
);
563 kvm_pmu_stop_counter(vcpu
, pmc
);
564 eventsel
= data
& ARMV8_PMU_EVTYPE_EVENT
;
566 /* Software increment event does't need to be backed by a perf event */
567 if (eventsel
== ARMV8_PMUV3_PERFCTR_SW_INCR
&&
568 pmc
->idx
!= ARMV8_PMU_CYCLE_IDX
)
571 memset(&attr
, 0, sizeof(struct perf_event_attr
));
572 attr
.type
= PERF_TYPE_RAW
;
573 attr
.size
= sizeof(attr
);
575 attr
.disabled
= !kvm_pmu_counter_is_enabled(vcpu
, pmc
->idx
);
576 attr
.exclude_user
= data
& ARMV8_PMU_EXCLUDE_EL0
? 1 : 0;
577 attr
.exclude_kernel
= data
& ARMV8_PMU_EXCLUDE_EL1
? 1 : 0;
578 attr
.exclude_hv
= 1; /* Don't count EL2 events */
579 attr
.exclude_host
= 1; /* Don't count host events */
580 attr
.config
= (pmc
->idx
== ARMV8_PMU_CYCLE_IDX
) ?
581 ARMV8_PMUV3_PERFCTR_CPU_CYCLES
: eventsel
;
583 counter
= kvm_pmu_get_pair_counter_value(vcpu
, pmc
);
585 if (kvm_pmu_idx_has_chain_evtype(vcpu
, pmc
->idx
)) {
587 * The initial sample period (overflow count) of an event. For
588 * chained counters we only support overflow interrupts on the
591 attr
.sample_period
= (-counter
) & GENMASK(63, 0);
592 if (kvm_pmu_counter_is_enabled(vcpu
, pmc
->idx
+ 1))
593 attr
.config1
|= PERF_ATTR_CFG1_KVM_PMU_CHAINED
;
595 event
= perf_event_create_kernel_counter(&attr
, -1, current
,
596 kvm_pmu_perf_overflow
,
599 /* The initial sample period (overflow count) of an event. */
600 if (kvm_pmu_idx_is_64bit(vcpu
, pmc
->idx
))
601 attr
.sample_period
= (-counter
) & GENMASK(63, 0);
603 attr
.sample_period
= (-counter
) & GENMASK(31, 0);
605 event
= perf_event_create_kernel_counter(&attr
, -1, current
,
606 kvm_pmu_perf_overflow
, pmc
);
610 pr_err_once("kvm: pmu event creation failed %ld\n",
615 pmc
->perf_event
= event
;
619 * kvm_pmu_update_pmc_chained - update chained bitmap
620 * @vcpu: The vcpu pointer
621 * @select_idx: The number of selected counter
623 * Update the chained bitmap based on the event type written in the
626 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu
*vcpu
, u64 select_idx
)
628 struct kvm_pmu
*pmu
= &vcpu
->arch
.pmu
;
629 struct kvm_pmc
*pmc
= &pmu
->pmc
[select_idx
];
631 if (kvm_pmu_idx_has_chain_evtype(vcpu
, pmc
->idx
)) {
633 * During promotion from !chained to chained we must ensure
634 * the adjacent counter is stopped and its event destroyed
636 if (!kvm_pmu_pmc_is_chained(pmc
))
637 kvm_pmu_stop_counter(vcpu
, pmc
);
639 set_bit(pmc
->idx
>> 1, vcpu
->arch
.pmu
.chained
);
641 clear_bit(pmc
->idx
>> 1, vcpu
->arch
.pmu
.chained
);
646 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
647 * @vcpu: The vcpu pointer
648 * @data: The data guest writes to PMXEVTYPER_EL0
649 * @select_idx: The number of selected counter
651 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
652 * event with given hardware event number. Here we call perf_event API to
653 * emulate this action and create a kernel perf event for it.
655 void kvm_pmu_set_counter_event_type(struct kvm_vcpu
*vcpu
, u64 data
,
658 u64 reg
, event_type
= data
& ARMV8_PMU_EVTYPE_MASK
;
660 reg
= (select_idx
== ARMV8_PMU_CYCLE_IDX
)
661 ? PMCCFILTR_EL0
: PMEVTYPER0_EL0
+ select_idx
;
663 __vcpu_sys_reg(vcpu
, reg
) = event_type
;
665 kvm_pmu_update_pmc_chained(vcpu
, select_idx
);
666 kvm_pmu_create_perf_event(vcpu
, select_idx
);
669 bool kvm_arm_support_pmu_v3(void)
672 * Check if HW_PERF_EVENTS are supported by checking the number of
673 * hardware performance counters. This could ensure the presence of
674 * a physical PMU and CONFIG_PERF_EVENT is selected.
676 return (perf_num_counters() > 0);
679 int kvm_arm_pmu_v3_enable(struct kvm_vcpu
*vcpu
)
681 if (!vcpu
->arch
.pmu
.created
)
685 * A valid interrupt configuration for the PMU is either to have a
686 * properly configured interrupt number and using an in-kernel
687 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
689 if (irqchip_in_kernel(vcpu
->kvm
)) {
690 int irq
= vcpu
->arch
.pmu
.irq_num
;
691 if (!kvm_arm_pmu_irq_initialized(vcpu
))
695 * If we are using an in-kernel vgic, at this point we know
696 * the vgic will be initialized, so we can check the PMU irq
697 * number against the dimensions of the vgic and make sure
700 if (!irq_is_ppi(irq
) && !vgic_valid_spi(vcpu
->kvm
, irq
))
702 } else if (kvm_arm_pmu_irq_initialized(vcpu
)) {
706 kvm_pmu_vcpu_reset(vcpu
);
707 vcpu
->arch
.pmu
.ready
= true;
712 static int kvm_arm_pmu_v3_init(struct kvm_vcpu
*vcpu
)
714 if (!kvm_arm_support_pmu_v3())
717 if (!test_bit(KVM_ARM_VCPU_PMU_V3
, vcpu
->arch
.features
))
720 if (vcpu
->arch
.pmu
.created
)
723 if (irqchip_in_kernel(vcpu
->kvm
)) {
727 * If using the PMU with an in-kernel virtual GIC
728 * implementation, we require the GIC to be already
729 * initialized when initializing the PMU.
731 if (!vgic_initialized(vcpu
->kvm
))
734 if (!kvm_arm_pmu_irq_initialized(vcpu
))
737 ret
= kvm_vgic_set_owner(vcpu
, vcpu
->arch
.pmu
.irq_num
,
743 vcpu
->arch
.pmu
.created
= true;
748 * For one VM the interrupt type must be same for each vcpu.
749 * As a PPI, the interrupt number is the same for all vcpus,
750 * while as an SPI it must be a separate number per vcpu.
752 static bool pmu_irq_is_valid(struct kvm
*kvm
, int irq
)
755 struct kvm_vcpu
*vcpu
;
757 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
758 if (!kvm_arm_pmu_irq_initialized(vcpu
))
761 if (irq_is_ppi(irq
)) {
762 if (vcpu
->arch
.pmu
.irq_num
!= irq
)
765 if (vcpu
->arch
.pmu
.irq_num
== irq
)
773 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
775 switch (attr
->attr
) {
776 case KVM_ARM_VCPU_PMU_V3_IRQ
: {
777 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
780 if (!irqchip_in_kernel(vcpu
->kvm
))
783 if (!test_bit(KVM_ARM_VCPU_PMU_V3
, vcpu
->arch
.features
))
786 if (get_user(irq
, uaddr
))
789 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
790 if (!(irq_is_ppi(irq
) || irq_is_spi(irq
)))
793 if (!pmu_irq_is_valid(vcpu
->kvm
, irq
))
796 if (kvm_arm_pmu_irq_initialized(vcpu
))
799 kvm_debug("Set kvm ARM PMU irq: %d\n", irq
);
800 vcpu
->arch
.pmu
.irq_num
= irq
;
803 case KVM_ARM_VCPU_PMU_V3_INIT
:
804 return kvm_arm_pmu_v3_init(vcpu
);
810 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
812 switch (attr
->attr
) {
813 case KVM_ARM_VCPU_PMU_V3_IRQ
: {
814 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
817 if (!irqchip_in_kernel(vcpu
->kvm
))
820 if (!test_bit(KVM_ARM_VCPU_PMU_V3
, vcpu
->arch
.features
))
823 if (!kvm_arm_pmu_irq_initialized(vcpu
))
826 irq
= vcpu
->arch
.pmu
.irq_num
;
827 return put_user(irq
, uaddr
);
834 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
836 switch (attr
->attr
) {
837 case KVM_ARM_VCPU_PMU_V3_IRQ
:
838 case KVM_ARM_VCPU_PMU_V3_INIT
:
839 if (kvm_arm_support_pmu_v3() &&
840 test_bit(KVM_ARM_VCPU_PMU_V3
, vcpu
->arch
.features
))