Linux 4.18.10
[linux/fpc-iii.git] / arch / arm64 / kvm / regmap.c
blobeefe403a2e63ab6c06f24f7aa6733d835a6ac216
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/emulate.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/mm.h>
23 #include <linux/kvm_host.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/ptrace.h>
27 #define VCPU_NR_MODES 6
28 #define REG_OFFSET(_reg) \
29 (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
31 #define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
33 static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
34 /* USR Registers */
36 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
37 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
38 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
39 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
40 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
41 REG_OFFSET(pc)
44 /* FIQ Registers */
46 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
47 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
48 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
49 REG_OFFSET(compat_r8_fiq), /* r8 */
50 REG_OFFSET(compat_r9_fiq), /* r9 */
51 REG_OFFSET(compat_r10_fiq), /* r10 */
52 REG_OFFSET(compat_r11_fiq), /* r11 */
53 REG_OFFSET(compat_r12_fiq), /* r12 */
54 REG_OFFSET(compat_sp_fiq), /* r13 */
55 REG_OFFSET(compat_lr_fiq), /* r14 */
56 REG_OFFSET(pc)
59 /* IRQ Registers */
61 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
62 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
63 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
64 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
65 USR_REG_OFFSET(12),
66 REG_OFFSET(compat_sp_irq), /* r13 */
67 REG_OFFSET(compat_lr_irq), /* r14 */
68 REG_OFFSET(pc)
71 /* SVC Registers */
73 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
74 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
75 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
76 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
77 USR_REG_OFFSET(12),
78 REG_OFFSET(compat_sp_svc), /* r13 */
79 REG_OFFSET(compat_lr_svc), /* r14 */
80 REG_OFFSET(pc)
83 /* ABT Registers */
85 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
86 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
87 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
88 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
89 USR_REG_OFFSET(12),
90 REG_OFFSET(compat_sp_abt), /* r13 */
91 REG_OFFSET(compat_lr_abt), /* r14 */
92 REG_OFFSET(pc)
95 /* UND Registers */
97 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
98 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
99 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
100 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
101 USR_REG_OFFSET(12),
102 REG_OFFSET(compat_sp_und), /* r13 */
103 REG_OFFSET(compat_lr_und), /* r14 */
104 REG_OFFSET(pc)
109 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU.
112 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
114 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
115 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
117 switch (mode) {
118 case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
119 mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
120 break;
122 case COMPAT_PSR_MODE_ABT:
123 mode = 4;
124 break;
126 case COMPAT_PSR_MODE_UND:
127 mode = 5;
128 break;
130 case COMPAT_PSR_MODE_SYS:
131 mode = 0; /* SYS maps to USR */
132 break;
134 default:
135 BUG();
138 return reg_array + vcpu_reg_offsets[mode][reg_num];
142 * Return the SPSR for the current mode of the virtual CPU.
144 static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
146 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
147 switch (mode) {
148 case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC;
149 case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT;
150 case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND;
151 case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ;
152 case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ;
153 default: BUG();
157 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
159 int spsr_idx = vcpu_spsr32_mode(vcpu);
161 if (!vcpu->arch.sysregs_loaded_on_cpu)
162 return vcpu_gp_regs(vcpu)->spsr[spsr_idx];
164 switch (spsr_idx) {
165 case KVM_SPSR_SVC:
166 return read_sysreg_el1(spsr);
167 case KVM_SPSR_ABT:
168 return read_sysreg(spsr_abt);
169 case KVM_SPSR_UND:
170 return read_sysreg(spsr_und);
171 case KVM_SPSR_IRQ:
172 return read_sysreg(spsr_irq);
173 case KVM_SPSR_FIQ:
174 return read_sysreg(spsr_fiq);
175 default:
176 BUG();
180 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
182 int spsr_idx = vcpu_spsr32_mode(vcpu);
184 if (!vcpu->arch.sysregs_loaded_on_cpu) {
185 vcpu_gp_regs(vcpu)->spsr[spsr_idx] = v;
186 return;
189 switch (spsr_idx) {
190 case KVM_SPSR_SVC:
191 write_sysreg_el1(v, spsr);
192 case KVM_SPSR_ABT:
193 write_sysreg(v, spsr_abt);
194 case KVM_SPSR_UND:
195 write_sysreg(v, spsr_und);
196 case KVM_SPSR_IRQ:
197 write_sysreg(v, spsr_irq);
198 case KVM_SPSR_FIQ:
199 write_sysreg(v, spsr_fiq);