1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
7 #ifndef __ASM_ARM_KVM_PMU_H
8 #define __ASM_ARM_KVM_PMU_H
10 #include <linux/perf_event.h>
11 #include <asm/perf_event.h>
13 #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
14 #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
16 #ifdef CONFIG_KVM_ARM_PMU
19 u8 idx
; /* index into the pmu->pmc array */
20 struct perf_event
*perf_event
;
25 struct kvm_pmc pmc
[ARMV8_PMU_MAX_COUNTERS
];
26 DECLARE_BITMAP(chained
, ARMV8_PMU_MAX_COUNTER_PAIRS
);
32 #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
33 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
34 u64
kvm_pmu_get_counter_value(struct kvm_vcpu
*vcpu
, u64 select_idx
);
35 void kvm_pmu_set_counter_value(struct kvm_vcpu
*vcpu
, u64 select_idx
, u64 val
);
36 u64
kvm_pmu_valid_counter_mask(struct kvm_vcpu
*vcpu
);
37 void kvm_pmu_vcpu_init(struct kvm_vcpu
*vcpu
);
38 void kvm_pmu_vcpu_reset(struct kvm_vcpu
*vcpu
);
39 void kvm_pmu_vcpu_destroy(struct kvm_vcpu
*vcpu
);
40 void kvm_pmu_disable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
);
41 void kvm_pmu_enable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
);
42 void kvm_pmu_flush_hwstate(struct kvm_vcpu
*vcpu
);
43 void kvm_pmu_sync_hwstate(struct kvm_vcpu
*vcpu
);
44 bool kvm_pmu_should_notify_user(struct kvm_vcpu
*vcpu
);
45 void kvm_pmu_update_run(struct kvm_vcpu
*vcpu
);
46 void kvm_pmu_software_increment(struct kvm_vcpu
*vcpu
, u64 val
);
47 void kvm_pmu_handle_pmcr(struct kvm_vcpu
*vcpu
, u64 val
);
48 void kvm_pmu_set_counter_event_type(struct kvm_vcpu
*vcpu
, u64 data
,
50 bool kvm_arm_support_pmu_v3(void);
51 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu
*vcpu
,
52 struct kvm_device_attr
*attr
);
53 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu
*vcpu
,
54 struct kvm_device_attr
*attr
);
55 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu
*vcpu
,
56 struct kvm_device_attr
*attr
);
57 int kvm_arm_pmu_v3_enable(struct kvm_vcpu
*vcpu
);
62 #define kvm_arm_pmu_v3_ready(v) (false)
63 #define kvm_arm_pmu_irq_initialized(v) (false)
64 static inline u64
kvm_pmu_get_counter_value(struct kvm_vcpu
*vcpu
,
69 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu
*vcpu
,
70 u64 select_idx
, u64 val
) {}
71 static inline u64
kvm_pmu_valid_counter_mask(struct kvm_vcpu
*vcpu
)
75 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu
*vcpu
) {}
76 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu
*vcpu
) {}
77 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu
*vcpu
) {}
78 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
) {}
79 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu
*vcpu
, u64 val
) {}
80 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu
*vcpu
) {}
81 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu
*vcpu
) {}
82 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu
*vcpu
)
86 static inline void kvm_pmu_update_run(struct kvm_vcpu
*vcpu
) {}
87 static inline void kvm_pmu_software_increment(struct kvm_vcpu
*vcpu
, u64 val
) {}
88 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu
*vcpu
, u64 val
) {}
89 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu
*vcpu
,
90 u64 data
, u64 select_idx
) {}
91 static inline bool kvm_arm_support_pmu_v3(void) { return false; }
92 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu
*vcpu
,
93 struct kvm_device_attr
*attr
)
97 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu
*vcpu
,
98 struct kvm_device_attr
*attr
)
102 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu
*vcpu
,
103 struct kvm_device_attr
*attr
)
107 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu
*vcpu
)