1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2023 Rivos Inc
6 * Atish Patra <atishp@rivosinc.com>
9 #ifndef __KVM_VCPU_RISCV_PMU_H
10 #define __KVM_VCPU_RISCV_PMU_H
12 #include <linux/perf/riscv_pmu.h>
13 #include <asm/kvm_vcpu_insn.h>
16 #ifdef CONFIG_RISCV_PMU_SBI
17 #define RISCV_KVM_MAX_FW_CTRS 32
18 #define RISCV_KVM_MAX_HW_CTRS 32
19 #define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
20 static_assert(RISCV_KVM_MAX_COUNTERS
<= 64);
23 /* Current value of the event */
26 /* Event monitoring status */
30 /* Per virtual pmu counter data */
33 struct perf_event
*perf_event
;
35 union sbi_pmu_ctr_info cinfo
;
36 /* Event monitoring status */
38 /* Monitoring event ID */
39 unsigned long event_idx
;
40 struct kvm_vcpu
*vcpu
;
43 /* PMU data structure per vcpu */
45 struct kvm_pmc pmc
[RISCV_KVM_MAX_COUNTERS
];
46 struct kvm_fw_event fw_event
[RISCV_KVM_MAX_FW_CTRS
];
47 /* Number of the virtual firmware counters available */
49 /* Number of the virtual hardware counters available */
51 /* A flag to indicate that pmu initialization is done */
53 /* Bit map of all the virtual counter used */
54 DECLARE_BITMAP(pmc_in_use
, RISCV_KVM_MAX_COUNTERS
);
55 /* Bit map of all the virtual counter overflown */
56 DECLARE_BITMAP(pmc_overflown
, RISCV_KVM_MAX_COUNTERS
);
57 /* The address of the counter snapshot area (guest physical address) */
59 /* The actual data of the snapshot */
60 struct riscv_pmu_snapshot_data
*sdata
;
63 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
64 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
66 #if defined(CONFIG_32BIT)
67 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
68 {.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
69 {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
71 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
72 {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
75 int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu
*vcpu
, unsigned long fid
);
76 int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu
*vcpu
, unsigned int csr_num
,
77 unsigned long *val
, unsigned long new_val
,
78 unsigned long wr_mask
);
80 int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu
*vcpu
, struct kvm_vcpu_sbi_return
*retdata
);
81 int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu
*vcpu
, unsigned long cidx
,
82 struct kvm_vcpu_sbi_return
*retdata
);
83 int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu
*vcpu
, unsigned long ctr_base
,
84 unsigned long ctr_mask
, unsigned long flags
, u64 ival
,
85 struct kvm_vcpu_sbi_return
*retdata
);
86 int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu
*vcpu
, unsigned long ctr_base
,
87 unsigned long ctr_mask
, unsigned long flags
,
88 struct kvm_vcpu_sbi_return
*retdata
);
89 int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu
*vcpu
, unsigned long ctr_base
,
90 unsigned long ctr_mask
, unsigned long flags
,
91 unsigned long eidx
, u64 evtdata
,
92 struct kvm_vcpu_sbi_return
*retdata
);
93 int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu
*vcpu
, unsigned long cidx
,
94 struct kvm_vcpu_sbi_return
*retdata
);
95 int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu
*vcpu
, unsigned long cidx
,
96 struct kvm_vcpu_sbi_return
*retdata
);
97 void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu
*vcpu
);
98 int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu
*vcpu
, unsigned long saddr_low
,
99 unsigned long saddr_high
, unsigned long flags
,
100 struct kvm_vcpu_sbi_return
*retdata
);
101 void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu
*vcpu
);
102 void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu
*vcpu
);
108 static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu
*vcpu
, unsigned int csr_num
,
109 unsigned long *val
, unsigned long new_val
,
110 unsigned long wr_mask
)
112 if (csr_num
== CSR_CYCLE
|| csr_num
== CSR_INSTRET
) {
114 return KVM_INSN_CONTINUE_NEXT_SEPC
;
116 return KVM_INSN_ILLEGAL_TRAP
;
120 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
121 {.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
123 static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu
*vcpu
) {}
124 static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu
*vcpu
, unsigned long fid
)
129 static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu
*vcpu
) {}
130 static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu
*vcpu
) {}
131 #endif /* CONFIG_RISCV_PMU_SBI */
132 #endif /* !__KVM_VCPU_RISCV_PMU_H */