1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
7 #ifndef __ASM_ARM_KVM_PMU_H
8 #define __ASM_ARM_KVM_PMU_H
10 #include <linux/perf_event.h>
11 #include <linux/perf/arm_pmuv3.h>
13 #define KVM_ARMV8_PMU_MAX_COUNTERS 32
15 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
17 u8 idx
; /* index into the pmu->pmc array */
18 struct perf_event
*perf_event
;
21 struct kvm_pmu_events
{
27 struct irq_work overflow_work
;
28 struct kvm_pmu_events events
;
29 struct kvm_pmc pmc
[KVM_ARMV8_PMU_MAX_COUNTERS
];
35 struct arm_pmu_entry
{
36 struct list_head entry
;
37 struct arm_pmu
*arm_pmu
;
40 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available
);
42 static __always_inline
bool kvm_arm_support_pmu_v3(void)
44 return static_branch_likely(&kvm_arm_pmu_available
);
47 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
48 u64
kvm_pmu_get_counter_value(struct kvm_vcpu
*vcpu
, u64 select_idx
);
49 void kvm_pmu_set_counter_value(struct kvm_vcpu
*vcpu
, u64 select_idx
, u64 val
);
50 u64
kvm_pmu_valid_counter_mask(struct kvm_vcpu
*vcpu
);
51 u64
kvm_pmu_get_pmceid(struct kvm_vcpu
*vcpu
, bool pmceid1
);
52 void kvm_pmu_vcpu_init(struct kvm_vcpu
*vcpu
);
53 void kvm_pmu_vcpu_reset(struct kvm_vcpu
*vcpu
);
54 void kvm_pmu_vcpu_destroy(struct kvm_vcpu
*vcpu
);
55 void kvm_pmu_disable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
);
56 void kvm_pmu_enable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
);
57 void kvm_pmu_flush_hwstate(struct kvm_vcpu
*vcpu
);
58 void kvm_pmu_sync_hwstate(struct kvm_vcpu
*vcpu
);
59 bool kvm_pmu_should_notify_user(struct kvm_vcpu
*vcpu
);
60 void kvm_pmu_update_run(struct kvm_vcpu
*vcpu
);
61 void kvm_pmu_software_increment(struct kvm_vcpu
*vcpu
, u64 val
);
62 void kvm_pmu_handle_pmcr(struct kvm_vcpu
*vcpu
, u64 val
);
63 void kvm_pmu_set_counter_event_type(struct kvm_vcpu
*vcpu
, u64 data
,
65 void kvm_vcpu_reload_pmu(struct kvm_vcpu
*vcpu
);
66 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu
*vcpu
,
67 struct kvm_device_attr
*attr
);
68 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu
*vcpu
,
69 struct kvm_device_attr
*attr
);
70 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu
*vcpu
,
71 struct kvm_device_attr
*attr
);
72 int kvm_arm_pmu_v3_enable(struct kvm_vcpu
*vcpu
);
74 struct kvm_pmu_events
*kvm_get_pmu_events(void);
75 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu
*vcpu
);
76 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu
*vcpu
);
77 void kvm_vcpu_pmu_resync_el0(void);
79 #define kvm_vcpu_has_pmu(vcpu) \
80 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
83 * Updates the vcpu's view of the pmu events for this cpu.
84 * Must be called before every vcpu run after disabling interrupts, to ensure
85 * that an interrupt cannot fire and update the structure.
87 #define kvm_pmu_update_vcpu_events(vcpu) \
89 if (!has_vhe() && kvm_arm_support_pmu_v3()) \
90 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
93 u8
kvm_arm_pmu_get_pmuver_limit(void);
94 u64
kvm_pmu_evtyper_mask(struct kvm
*kvm
);
95 int kvm_arm_set_default_pmu(struct kvm
*kvm
);
96 u8
kvm_arm_pmu_get_max_counters(struct kvm
*kvm
);
98 u64
kvm_vcpu_read_pmcr(struct kvm_vcpu
*vcpu
);
103 static inline bool kvm_arm_support_pmu_v3(void)
108 #define kvm_arm_pmu_irq_initialized(v) (false)
109 static inline u64
kvm_pmu_get_counter_value(struct kvm_vcpu
*vcpu
,
114 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu
*vcpu
,
115 u64 select_idx
, u64 val
) {}
116 static inline u64
kvm_pmu_valid_counter_mask(struct kvm_vcpu
*vcpu
)
120 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu
*vcpu
) {}
121 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu
*vcpu
) {}
122 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu
*vcpu
) {}
123 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
) {}
124 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
) {}
125 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu
*vcpu
) {}
126 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu
*vcpu
) {}
127 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu
*vcpu
)
131 static inline void kvm_pmu_update_run(struct kvm_vcpu
*vcpu
) {}
132 static inline void kvm_pmu_software_increment(struct kvm_vcpu
*vcpu
, u64 val
) {}
133 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu
*vcpu
, u64 val
) {}
134 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu
*vcpu
,
135 u64 data
, u64 select_idx
) {}
136 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu
*vcpu
,
137 struct kvm_device_attr
*attr
)
141 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu
*vcpu
,
142 struct kvm_device_attr
*attr
)
146 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu
*vcpu
,
147 struct kvm_device_attr
*attr
)
151 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu
*vcpu
)
155 static inline u64
kvm_pmu_get_pmceid(struct kvm_vcpu
*vcpu
, bool pmceid1
)
160 #define kvm_vcpu_has_pmu(vcpu) ({ false; })
161 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu
*vcpu
) {}
162 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu
*vcpu
) {}
163 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu
*vcpu
) {}
164 static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu
*vcpu
) {}
165 static inline u8
kvm_arm_pmu_get_pmuver_limit(void)
169 static inline u64
kvm_pmu_evtyper_mask(struct kvm
*kvm
)
173 static inline void kvm_vcpu_pmu_resync_el0(void) {}
175 static inline int kvm_arm_set_default_pmu(struct kvm
*kvm
)
180 static inline u8
kvm_arm_pmu_get_max_counters(struct kvm
*kvm
)
185 static inline u64
kvm_vcpu_read_pmcr(struct kvm_vcpu
*vcpu
)