2 * mmio.c: MMIO emulation components.
3 * Copyright (c) 2004, Intel Corporation.
4 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
5 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
7 * Copyright (c) 2007 Intel Corporation KVM support.
8 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
9 * Xiantao Zhang (xiantao.zhang@intel.com)
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
26 #include <linux/kvm_host.h>
30 static void vlsapic_write_xtp(struct kvm_vcpu
*v
, uint8_t val
)
38 #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
39 #define PIB_OFST_INTA 0x1E0000
40 #define PIB_OFST_XTP 0x1E0008
43 * execute write IPI op.
45 static void vlsapic_write_ipi(struct kvm_vcpu
*vcpu
,
46 uint64_t addr
, uint64_t data
)
48 struct exit_ctl_data
*p
= ¤t_vcpu
->arch
.exit_data
;
53 p
->exit_reason
= EXIT_REASON_IPI
;
54 p
->u
.ipi_data
.addr
.val
= addr
;
55 p
->u
.ipi_data
.data
.val
= data
;
56 vmm_transition(current_vcpu
);
58 local_irq_restore(psr
);
62 void lsapic_write(struct kvm_vcpu
*v
, unsigned long addr
,
63 unsigned long length
, unsigned long val
)
65 addr
&= (PIB_SIZE
- 1);
69 panic_vm(v
, "Undefined write on PIB INTA\n");
73 vlsapic_write_xtp(v
, val
);
75 panic_vm(v
, "Undefined write on PIB XTP\n");
79 if (PIB_LOW_HALF(addr
)) {
82 panic_vm(v
, "Can't LHF write with size %ld!\n",
85 vlsapic_write_ipi(v
, addr
, val
);
86 } else { /*Upper half */
87 panic_vm(v
, "IPI-UHF write %lx\n", addr
);
93 unsigned long lsapic_read(struct kvm_vcpu
*v
, unsigned long addr
,
98 addr
&= (PIB_SIZE
- 1);
102 if (length
== 1) /* 1 byte load */
103 ; /* There is no i8259, there is no INTA access*/
105 panic_vm(v
, "Undefined read on PIB INTA\n");
110 result
= VLSAPIC_XTP(v
);
112 panic_vm(v
, "Undefined read on PIB XTP\n");
116 panic_vm(v
, "Undefined addr access for lsapic!\n");
122 static void mmio_access(struct kvm_vcpu
*vcpu
, u64 src_pa
, u64
*dest
,
123 u16 s
, int ma
, int dir
)
126 struct exit_ctl_data
*p
= &vcpu
->arch
.exit_data
;
129 iot
= __gpfn_is_io(src_pa
>> PAGE_SHIFT
);
133 /*Intercept the acces for PIB range*/
134 if (iot
== GPFN_PIB
) {
136 lsapic_write(vcpu
, src_pa
, s
, *dest
);
138 *dest
= lsapic_read(vcpu
, src_pa
, s
);
141 p
->exit_reason
= EXIT_REASON_MMIO_INSTRUCTION
;
142 p
->u
.ioreq
.addr
= src_pa
;
144 p
->u
.ioreq
.dir
= dir
;
145 if (dir
== IOREQ_WRITE
)
146 p
->u
.ioreq
.data
= *dest
;
147 p
->u
.ioreq
.state
= STATE_IOREQ_READY
;
148 vmm_transition(vcpu
);
150 if (p
->u
.ioreq
.state
== STATE_IORESP_READY
) {
151 if (dir
== IOREQ_READ
)
152 /* it's necessary to ensure zero extending */
153 *dest
= p
->u
.ioreq
.data
& (~0UL >> (64-(s
*8)));
155 panic_vm(vcpu
, "Unhandled mmio access returned!\n");
157 local_irq_restore(psr
);
163 inst_type 0:integer 1:floating point
165 #define SL_INTEGER 0 /* store/load interger*/
166 #define SL_FLOATING 1 /* store/load floating*/
168 void emulate_io_inst(struct kvm_vcpu
*vcpu
, u64 padr
, u64 ma
)
170 struct kvm_pt_regs
*regs
;
175 u64 data
, slot1a
, slot1b
, temp
, update_reg
;
179 regs
= vcpu_regs(vcpu
);
181 if (fetch_code(vcpu
, regs
->cr_iip
, &bundle
)) {
182 /* if fetch code fail, return and try again */
185 slot
= ((struct ia64_psr
*)&(regs
->cr_ipsr
))->ri
;
187 inst
.inst
= bundle
.slot0
;
188 else if (slot
== 1) {
189 slot1a
= bundle
.slot1a
;
190 slot1b
= bundle
.slot1b
;
191 inst
.inst
= slot1a
+ (slot1b
<< 18);
192 } else if (slot
== 2)
193 inst
.inst
= bundle
.slot2
;
195 /* Integer Load/Store */
196 if (inst
.M1
.major
== 4 && inst
.M1
.m
== 0 && inst
.M1
.x
== 0) {
197 inst_type
= SL_INTEGER
;
198 size
= (inst
.M1
.x6
& 0x3);
199 if ((inst
.M1
.x6
>> 2) > 0xb) {
202 data
= vcpu_get_gr(vcpu
, inst
.M4
.r2
);
203 } else if ((inst
.M1
.x6
>> 2) < 0xb) {
207 } else if (inst
.M2
.major
== 4 && inst
.M2
.m
== 1 && inst
.M2
.x
== 0) {
208 /* Integer Load + Reg update */
209 inst_type
= SL_INTEGER
;
211 size
= (inst
.M2
.x6
& 0x3);
212 temp
= vcpu_get_gr(vcpu
, inst
.M2
.r3
);
213 update_reg
= vcpu_get_gr(vcpu
, inst
.M2
.r2
);
215 vcpu_set_gr(vcpu
, inst
.M2
.r3
, temp
, 0);
216 } else if (inst
.M3
.major
== 5) {
217 /*Integer Load/Store + Imm update*/
218 inst_type
= SL_INTEGER
;
219 size
= (inst
.M3
.x6
&0x3);
220 if ((inst
.M5
.x6
>> 2) > 0xb) {
223 data
= vcpu_get_gr(vcpu
, inst
.M5
.r2
);
224 temp
= vcpu_get_gr(vcpu
, inst
.M5
.r3
);
225 imm
= (inst
.M5
.s
<< 31) | (inst
.M5
.i
<< 30) |
226 (inst
.M5
.imm7
<< 23);
228 vcpu_set_gr(vcpu
, inst
.M5
.r3
, temp
, 0);
230 } else if ((inst
.M3
.x6
>> 2) < 0xb) {
233 temp
= vcpu_get_gr(vcpu
, inst
.M3
.r3
);
234 imm
= (inst
.M3
.s
<< 31) | (inst
.M3
.i
<< 30) |
235 (inst
.M3
.imm7
<< 23);
237 vcpu_set_gr(vcpu
, inst
.M3
.r3
, temp
, 0);
240 } else if (inst
.M9
.major
== 6 && inst
.M9
.x6
== 0x3B
241 && inst
.M9
.m
== 0 && inst
.M9
.x
== 0) {
242 /* Floating-point spill*/
245 inst_type
= SL_FLOATING
;
247 vcpu_get_fpreg(vcpu
, inst
.M9
.f2
, &v
);
248 /* Write high word. FIXME: this is a kludge! */
249 v
.u
.bits
[1] &= 0x3ffff;
250 mmio_access(vcpu
, padr
+ 8, (u64
*)&v
.u
.bits
[1], 8,
254 } else if (inst
.M10
.major
== 7 && inst
.M10
.x6
== 0x3B) {
255 /* Floating-point spill + Imm update */
258 inst_type
= SL_FLOATING
;
260 vcpu_get_fpreg(vcpu
, inst
.M10
.f2
, &v
);
261 temp
= vcpu_get_gr(vcpu
, inst
.M10
.r3
);
262 imm
= (inst
.M10
.s
<< 31) | (inst
.M10
.i
<< 30) |
263 (inst
.M10
.imm7
<< 23);
265 vcpu_set_gr(vcpu
, inst
.M10
.r3
, temp
, 0);
267 /* Write high word.FIXME: this is a kludge! */
268 v
.u
.bits
[1] &= 0x3ffff;
269 mmio_access(vcpu
, padr
+ 8, (u64
*)&v
.u
.bits
[1],
273 } else if (inst
.M10
.major
== 7 && inst
.M10
.x6
== 0x31) {
274 /* Floating-point stf8 + Imm update */
276 inst_type
= SL_FLOATING
;
279 vcpu_get_fpreg(vcpu
, inst
.M10
.f2
, &v
);
280 data
= v
.u
.bits
[0]; /* Significand. */
281 temp
= vcpu_get_gr(vcpu
, inst
.M10
.r3
);
282 imm
= (inst
.M10
.s
<< 31) | (inst
.M10
.i
<< 30) |
283 (inst
.M10
.imm7
<< 23);
285 vcpu_set_gr(vcpu
, inst
.M10
.r3
, temp
, 0);
286 } else if (inst
.M15
.major
== 7 && inst
.M15
.x6
>= 0x2c
287 && inst
.M15
.x6
<= 0x2f) {
288 temp
= vcpu_get_gr(vcpu
, inst
.M15
.r3
);
289 imm
= (inst
.M15
.s
<< 31) | (inst
.M15
.i
<< 30) |
290 (inst
.M15
.imm7
<< 23);
292 vcpu_set_gr(vcpu
, inst
.M15
.r3
, temp
, 0);
294 vcpu_increment_iip(vcpu
);
296 } else if (inst
.M12
.major
== 6 && inst
.M12
.m
== 1
297 && inst
.M12
.x
== 1 && inst
.M12
.x6
== 1) {
298 /* Floating-point Load Pair + Imm ldfp8 M12*/
301 inst_type
= SL_FLOATING
;
304 mmio_access(vcpu
, padr
, &data
, size
, ma
, dir
);
306 v
.u
.bits
[1] = 0x1003E;
307 vcpu_set_fpreg(vcpu
, inst
.M12
.f1
, &v
);
309 mmio_access(vcpu
, padr
, &data
, size
, ma
, dir
);
311 v
.u
.bits
[1] = 0x1003E;
312 vcpu_set_fpreg(vcpu
, inst
.M12
.f2
, &v
);
314 vcpu_set_gr(vcpu
, inst
.M12
.r3
, padr
, 0);
315 vcpu_increment_iip(vcpu
);
319 panic_vm(vcpu
, "Unsupported MMIO access instruction! "
320 "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
321 bundle
.i64
[0], bundle
.i64
[1]);
325 if (dir
== IOREQ_WRITE
) {
326 mmio_access(vcpu
, padr
, &data
, size
, ma
, dir
);
328 mmio_access(vcpu
, padr
, &data
, size
, ma
, dir
);
329 if (inst_type
== SL_INTEGER
)
330 vcpu_set_gr(vcpu
, inst
.M1
.r1
, data
, 0);
332 panic_vm(vcpu
, "Unsupported instruction type!\n");
335 vcpu_increment_iip(vcpu
);