Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / riscv / kvm / vcpu_sbi_sta.c
blob5f35427114c1da34da5679e2b75f4b456431a825
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023 Ventana Micro Systems Inc.
4 */
6 #include <linux/kconfig.h>
7 #include <linux/kernel.h>
8 #include <linux/kvm_host.h>
9 #include <linux/mm.h>
10 #include <linux/sizes.h>
12 #include <asm/bug.h>
13 #include <asm/current.h>
14 #include <asm/kvm_vcpu_sbi.h>
15 #include <asm/page.h>
16 #include <asm/sbi.h>
17 #include <asm/uaccess.h>
19 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
21 vcpu->arch.sta.shmem = INVALID_GPA;
22 vcpu->arch.sta.last_steal = 0;
25 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
27 gpa_t shmem = vcpu->arch.sta.shmem;
28 u64 last_steal = vcpu->arch.sta.last_steal;
29 __le32 __user *sequence_ptr;
30 __le64 __user *steal_ptr;
31 __le32 sequence_le;
32 __le64 steal_le;
33 u32 sequence;
34 u64 steal;
35 unsigned long hva;
36 gfn_t gfn;
38 if (shmem == INVALID_GPA)
39 return;
42 * shmem is 64-byte aligned (see the enforcement in
43 * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
44 * is 64 bytes, so we know all its offsets are in the same page.
46 gfn = shmem >> PAGE_SHIFT;
47 hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
49 if (WARN_ON(kvm_is_error_hva(hva))) {
50 vcpu->arch.sta.shmem = INVALID_GPA;
51 return;
54 sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
55 offsetof(struct sbi_sta_struct, sequence));
56 steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
57 offsetof(struct sbi_sta_struct, steal));
59 if (WARN_ON(get_user(sequence_le, sequence_ptr)))
60 return;
62 sequence = le32_to_cpu(sequence_le);
63 sequence += 1;
65 if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
66 return;
68 if (!WARN_ON(get_user(steal_le, steal_ptr))) {
69 steal = le64_to_cpu(steal_le);
70 vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
71 steal += vcpu->arch.sta.last_steal - last_steal;
72 WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
75 sequence += 1;
76 WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
78 kvm_vcpu_mark_page_dirty(vcpu, gfn);
81 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
83 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
84 unsigned long shmem_phys_lo = cp->a0;
85 unsigned long shmem_phys_hi = cp->a1;
86 u32 flags = cp->a2;
87 struct sbi_sta_struct zero_sta = {0};
88 unsigned long hva;
89 bool writable;
90 gpa_t shmem;
91 int ret;
93 if (flags != 0)
94 return SBI_ERR_INVALID_PARAM;
96 if (shmem_phys_lo == SBI_SHMEM_DISABLE &&
97 shmem_phys_hi == SBI_SHMEM_DISABLE) {
98 vcpu->arch.sta.shmem = INVALID_GPA;
99 return 0;
102 if (shmem_phys_lo & (SZ_64 - 1))
103 return SBI_ERR_INVALID_PARAM;
105 shmem = shmem_phys_lo;
107 if (shmem_phys_hi != 0) {
108 if (IS_ENABLED(CONFIG_32BIT))
109 shmem |= ((gpa_t)shmem_phys_hi << 32);
110 else
111 return SBI_ERR_INVALID_ADDRESS;
114 hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
115 if (kvm_is_error_hva(hva) || !writable)
116 return SBI_ERR_INVALID_ADDRESS;
118 ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
119 if (ret)
120 return SBI_ERR_FAILURE;
122 vcpu->arch.sta.shmem = shmem;
123 vcpu->arch.sta.last_steal = current->sched_info.run_delay;
125 return 0;
128 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
129 struct kvm_vcpu_sbi_return *retdata)
131 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
132 unsigned long funcid = cp->a6;
133 int ret;
135 switch (funcid) {
136 case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
137 ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
138 break;
139 default:
140 ret = SBI_ERR_NOT_SUPPORTED;
141 break;
144 retdata->err_val = ret;
146 return 0;
149 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
151 return !!sched_info_on();
154 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
155 .extid_start = SBI_EXT_STA,
156 .extid_end = SBI_EXT_STA,
157 .handler = kvm_sbi_ext_sta_handler,
158 .probe = kvm_sbi_ext_sta_probe,
161 int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
162 unsigned long reg_num,
163 unsigned long *reg_val)
165 switch (reg_num) {
166 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
167 *reg_val = (unsigned long)vcpu->arch.sta.shmem;
168 break;
169 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
170 if (IS_ENABLED(CONFIG_32BIT))
171 *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
172 else
173 *reg_val = 0;
174 break;
175 default:
176 return -EINVAL;
179 return 0;
182 int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
183 unsigned long reg_num,
184 unsigned long reg_val)
186 switch (reg_num) {
187 case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
188 if (IS_ENABLED(CONFIG_32BIT)) {
189 gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
191 vcpu->arch.sta.shmem = reg_val;
192 vcpu->arch.sta.shmem |= hi << 32;
193 } else {
194 vcpu->arch.sta.shmem = reg_val;
196 break;
197 case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
198 if (IS_ENABLED(CONFIG_32BIT)) {
199 gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
201 vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
202 vcpu->arch.sta.shmem |= lo;
203 } else if (reg_val != 0) {
204 return -EINVAL;
206 break;
207 default:
208 return -EINVAL;
211 return 0;