2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
6 * Alexander Graf <agraf@suse.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
30 #include <asm/sections.h>
31 #include <asm/cacheflush.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/epapr_hcalls.h>
36 #define KVM_MAGIC_PAGE (-4096L)
37 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
39 #define KVM_INST_LWZ 0x80000000
40 #define KVM_INST_STW 0x90000000
41 #define KVM_INST_LD 0xe8000000
42 #define KVM_INST_STD 0xf8000000
43 #define KVM_INST_NOP 0x60000000
44 #define KVM_INST_B 0x48000000
45 #define KVM_INST_B_MASK 0x03ffffff
46 #define KVM_INST_B_MAX 0x01ffffff
47 #define KVM_INST_LI 0x38000000
49 #define KVM_MASK_RT 0x03e00000
50 #define KVM_RT_30 0x03c00000
51 #define KVM_MASK_RB 0x0000f800
52 #define KVM_INST_MFMSR 0x7c0000a6
57 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
58 (((sprn) & 0x1f) << 16) | \
59 (((sprn) & 0x3e0) << 6) | \
62 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
63 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
65 #define KVM_INST_TLBSYNC 0x7c00046c
66 #define KVM_INST_MTMSRD_L0 0x7c000164
67 #define KVM_INST_MTMSRD_L1 0x7c010164
68 #define KVM_INST_MTMSR 0x7c000124
70 #define KVM_INST_WRTEE 0x7c000106
71 #define KVM_INST_WRTEEI_0 0x7c000146
72 #define KVM_INST_WRTEEI_1 0x7c008146
74 #define KVM_INST_MTSRIN 0x7c0001e4
76 static bool kvm_patching_worked
= true;
77 static char kvm_tmp
[1024 * 1024];
78 static int kvm_tmp_index
;
80 static inline void kvm_patch_ins(u32
*inst
, u32 new_inst
)
83 flush_icache_range((ulong
)inst
, (ulong
)inst
+ 4);
86 static void kvm_patch_ins_ll(u32
*inst
, long addr
, u32 rt
)
89 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
91 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000fffc));
95 static void kvm_patch_ins_ld(u32
*inst
, long addr
, u32 rt
)
98 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
100 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| ((addr
+ 4) & 0x0000fffc));
104 static void kvm_patch_ins_lwz(u32
*inst
, long addr
, u32 rt
)
106 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000ffff));
109 static void kvm_patch_ins_std(u32
*inst
, long addr
, u32 rt
)
112 kvm_patch_ins(inst
, KVM_INST_STD
| rt
| (addr
& 0x0000fffc));
114 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| ((addr
+ 4) & 0x0000fffc));
118 static void kvm_patch_ins_stw(u32
*inst
, long addr
, u32 rt
)
120 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| (addr
& 0x0000fffc));
123 static void kvm_patch_ins_nop(u32
*inst
)
125 kvm_patch_ins(inst
, KVM_INST_NOP
);
128 static void kvm_patch_ins_b(u32
*inst
, int addr
)
130 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */
134 if ((ulong
)inst
< (ulong
)&__end_interrupts
)
138 kvm_patch_ins(inst
, KVM_INST_B
| (addr
& KVM_INST_B_MASK
));
141 static u32
*kvm_alloc(int len
)
145 if ((kvm_tmp_index
+ len
) > ARRAY_SIZE(kvm_tmp
)) {
146 printk(KERN_ERR
"KVM: No more space (%d + %d)\n",
148 kvm_patching_worked
= false;
152 p
= (void*)&kvm_tmp
[kvm_tmp_index
];
153 kvm_tmp_index
+= len
;
158 extern u32 kvm_emulate_mtmsrd_branch_offs
;
159 extern u32 kvm_emulate_mtmsrd_reg_offs
;
160 extern u32 kvm_emulate_mtmsrd_orig_ins_offs
;
161 extern u32 kvm_emulate_mtmsrd_len
;
162 extern u32 kvm_emulate_mtmsrd
[];
164 static void kvm_patch_ins_mtmsrd(u32
*inst
, u32 rt
)
171 p
= kvm_alloc(kvm_emulate_mtmsrd_len
* 4);
175 /* Find out where we are and put everything there */
176 distance_start
= (ulong
)p
- (ulong
)inst
;
177 next_inst
= ((ulong
)inst
+ 4);
178 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsrd_branch_offs
];
180 /* Make sure we only write valid b instructions */
181 if (distance_start
> KVM_INST_B_MAX
) {
182 kvm_patching_worked
= false;
186 /* Modify the chunk to fit the invocation */
187 memcpy(p
, kvm_emulate_mtmsrd
, kvm_emulate_mtmsrd_len
* 4);
188 p
[kvm_emulate_mtmsrd_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
189 switch (get_rt(rt
)) {
191 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
192 magic_var(scratch2
), KVM_RT_30
);
195 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
196 magic_var(scratch1
), KVM_RT_30
);
199 p
[kvm_emulate_mtmsrd_reg_offs
] |= rt
;
203 p
[kvm_emulate_mtmsrd_orig_ins_offs
] = *inst
;
204 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsrd_len
* 4);
206 /* Patch the invocation */
207 kvm_patch_ins_b(inst
, distance_start
);
210 extern u32 kvm_emulate_mtmsr_branch_offs
;
211 extern u32 kvm_emulate_mtmsr_reg1_offs
;
212 extern u32 kvm_emulate_mtmsr_reg2_offs
;
213 extern u32 kvm_emulate_mtmsr_orig_ins_offs
;
214 extern u32 kvm_emulate_mtmsr_len
;
215 extern u32 kvm_emulate_mtmsr
[];
217 static void kvm_patch_ins_mtmsr(u32
*inst
, u32 rt
)
224 p
= kvm_alloc(kvm_emulate_mtmsr_len
* 4);
228 /* Find out where we are and put everything there */
229 distance_start
= (ulong
)p
- (ulong
)inst
;
230 next_inst
= ((ulong
)inst
+ 4);
231 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsr_branch_offs
];
233 /* Make sure we only write valid b instructions */
234 if (distance_start
> KVM_INST_B_MAX
) {
235 kvm_patching_worked
= false;
239 /* Modify the chunk to fit the invocation */
240 memcpy(p
, kvm_emulate_mtmsr
, kvm_emulate_mtmsr_len
* 4);
241 p
[kvm_emulate_mtmsr_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
243 /* Make clobbered registers work too */
244 switch (get_rt(rt
)) {
246 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
247 magic_var(scratch2
), KVM_RT_30
);
248 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
249 magic_var(scratch2
), KVM_RT_30
);
252 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
253 magic_var(scratch1
), KVM_RT_30
);
254 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
255 magic_var(scratch1
), KVM_RT_30
);
258 p
[kvm_emulate_mtmsr_reg1_offs
] |= rt
;
259 p
[kvm_emulate_mtmsr_reg2_offs
] |= rt
;
263 p
[kvm_emulate_mtmsr_orig_ins_offs
] = *inst
;
264 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsr_len
* 4);
266 /* Patch the invocation */
267 kvm_patch_ins_b(inst
, distance_start
);
272 extern u32 kvm_emulate_wrtee_branch_offs
;
273 extern u32 kvm_emulate_wrtee_reg_offs
;
274 extern u32 kvm_emulate_wrtee_orig_ins_offs
;
275 extern u32 kvm_emulate_wrtee_len
;
276 extern u32 kvm_emulate_wrtee
[];
278 static void kvm_patch_ins_wrtee(u32
*inst
, u32 rt
, int imm_one
)
285 p
= kvm_alloc(kvm_emulate_wrtee_len
* 4);
289 /* Find out where we are and put everything there */
290 distance_start
= (ulong
)p
- (ulong
)inst
;
291 next_inst
= ((ulong
)inst
+ 4);
292 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrtee_branch_offs
];
294 /* Make sure we only write valid b instructions */
295 if (distance_start
> KVM_INST_B_MAX
) {
296 kvm_patching_worked
= false;
300 /* Modify the chunk to fit the invocation */
301 memcpy(p
, kvm_emulate_wrtee
, kvm_emulate_wrtee_len
* 4);
302 p
[kvm_emulate_wrtee_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
305 p
[kvm_emulate_wrtee_reg_offs
] =
306 KVM_INST_LI
| __PPC_RT(R30
) | MSR_EE
;
308 /* Make clobbered registers work too */
309 switch (get_rt(rt
)) {
311 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
312 magic_var(scratch2
), KVM_RT_30
);
315 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
316 magic_var(scratch1
), KVM_RT_30
);
319 p
[kvm_emulate_wrtee_reg_offs
] |= rt
;
324 p
[kvm_emulate_wrtee_orig_ins_offs
] = *inst
;
325 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrtee_len
* 4);
327 /* Patch the invocation */
328 kvm_patch_ins_b(inst
, distance_start
);
331 extern u32 kvm_emulate_wrteei_0_branch_offs
;
332 extern u32 kvm_emulate_wrteei_0_len
;
333 extern u32 kvm_emulate_wrteei_0
[];
335 static void kvm_patch_ins_wrteei_0(u32
*inst
)
342 p
= kvm_alloc(kvm_emulate_wrteei_0_len
* 4);
346 /* Find out where we are and put everything there */
347 distance_start
= (ulong
)p
- (ulong
)inst
;
348 next_inst
= ((ulong
)inst
+ 4);
349 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrteei_0_branch_offs
];
351 /* Make sure we only write valid b instructions */
352 if (distance_start
> KVM_INST_B_MAX
) {
353 kvm_patching_worked
= false;
357 memcpy(p
, kvm_emulate_wrteei_0
, kvm_emulate_wrteei_0_len
* 4);
358 p
[kvm_emulate_wrteei_0_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
359 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrteei_0_len
* 4);
361 /* Patch the invocation */
362 kvm_patch_ins_b(inst
, distance_start
);
367 #ifdef CONFIG_PPC_BOOK3S_32
369 extern u32 kvm_emulate_mtsrin_branch_offs
;
370 extern u32 kvm_emulate_mtsrin_reg1_offs
;
371 extern u32 kvm_emulate_mtsrin_reg2_offs
;
372 extern u32 kvm_emulate_mtsrin_orig_ins_offs
;
373 extern u32 kvm_emulate_mtsrin_len
;
374 extern u32 kvm_emulate_mtsrin
[];
376 static void kvm_patch_ins_mtsrin(u32
*inst
, u32 rt
, u32 rb
)
383 p
= kvm_alloc(kvm_emulate_mtsrin_len
* 4);
387 /* Find out where we are and put everything there */
388 distance_start
= (ulong
)p
- (ulong
)inst
;
389 next_inst
= ((ulong
)inst
+ 4);
390 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtsrin_branch_offs
];
392 /* Make sure we only write valid b instructions */
393 if (distance_start
> KVM_INST_B_MAX
) {
394 kvm_patching_worked
= false;
398 /* Modify the chunk to fit the invocation */
399 memcpy(p
, kvm_emulate_mtsrin
, kvm_emulate_mtsrin_len
* 4);
400 p
[kvm_emulate_mtsrin_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
401 p
[kvm_emulate_mtsrin_reg1_offs
] |= (rb
<< 10);
402 p
[kvm_emulate_mtsrin_reg2_offs
] |= rt
;
403 p
[kvm_emulate_mtsrin_orig_ins_offs
] = *inst
;
404 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtsrin_len
* 4);
406 /* Patch the invocation */
407 kvm_patch_ins_b(inst
, distance_start
);
412 static void kvm_map_magic_page(void *data
)
414 u32
*features
= data
;
419 in
[0] = KVM_MAGIC_PAGE
;
420 in
[1] = KVM_MAGIC_PAGE
;
422 kvm_hypercall(in
, out
, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
));
427 static void kvm_check_ins(u32
*inst
, u32 features
)
430 u32 inst_no_rt
= _inst
& ~KVM_MASK_RT
;
431 u32 inst_rt
= _inst
& KVM_MASK_RT
;
433 switch (inst_no_rt
) {
436 kvm_patch_ins_ld(inst
, magic_var(msr
), inst_rt
);
438 case KVM_INST_MFSPR(SPRN_SPRG0
):
439 kvm_patch_ins_ld(inst
, magic_var(sprg0
), inst_rt
);
441 case KVM_INST_MFSPR(SPRN_SPRG1
):
442 kvm_patch_ins_ld(inst
, magic_var(sprg1
), inst_rt
);
444 case KVM_INST_MFSPR(SPRN_SPRG2
):
445 kvm_patch_ins_ld(inst
, magic_var(sprg2
), inst_rt
);
447 case KVM_INST_MFSPR(SPRN_SPRG3
):
448 kvm_patch_ins_ld(inst
, magic_var(sprg3
), inst_rt
);
450 case KVM_INST_MFSPR(SPRN_SRR0
):
451 kvm_patch_ins_ld(inst
, magic_var(srr0
), inst_rt
);
453 case KVM_INST_MFSPR(SPRN_SRR1
):
454 kvm_patch_ins_ld(inst
, magic_var(srr1
), inst_rt
);
457 case KVM_INST_MFSPR(SPRN_DEAR
):
459 case KVM_INST_MFSPR(SPRN_DAR
):
461 kvm_patch_ins_ld(inst
, magic_var(dar
), inst_rt
);
463 case KVM_INST_MFSPR(SPRN_DSISR
):
464 kvm_patch_ins_lwz(inst
, magic_var(dsisr
), inst_rt
);
467 #ifdef CONFIG_PPC_BOOK3E_MMU
468 case KVM_INST_MFSPR(SPRN_MAS0
):
469 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
470 kvm_patch_ins_lwz(inst
, magic_var(mas0
), inst_rt
);
472 case KVM_INST_MFSPR(SPRN_MAS1
):
473 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
474 kvm_patch_ins_lwz(inst
, magic_var(mas1
), inst_rt
);
476 case KVM_INST_MFSPR(SPRN_MAS2
):
477 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
478 kvm_patch_ins_ld(inst
, magic_var(mas2
), inst_rt
);
480 case KVM_INST_MFSPR(SPRN_MAS3
):
481 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
482 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
) + 4, inst_rt
);
484 case KVM_INST_MFSPR(SPRN_MAS4
):
485 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
486 kvm_patch_ins_lwz(inst
, magic_var(mas4
), inst_rt
);
488 case KVM_INST_MFSPR(SPRN_MAS6
):
489 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
490 kvm_patch_ins_lwz(inst
, magic_var(mas6
), inst_rt
);
492 case KVM_INST_MFSPR(SPRN_MAS7
):
493 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
494 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
), inst_rt
);
496 #endif /* CONFIG_PPC_BOOK3E_MMU */
498 case KVM_INST_MFSPR(SPRN_SPRG4
):
500 case KVM_INST_MFSPR(SPRN_SPRG4R
):
502 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
503 kvm_patch_ins_ld(inst
, magic_var(sprg4
), inst_rt
);
505 case KVM_INST_MFSPR(SPRN_SPRG5
):
507 case KVM_INST_MFSPR(SPRN_SPRG5R
):
509 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
510 kvm_patch_ins_ld(inst
, magic_var(sprg5
), inst_rt
);
512 case KVM_INST_MFSPR(SPRN_SPRG6
):
514 case KVM_INST_MFSPR(SPRN_SPRG6R
):
516 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
517 kvm_patch_ins_ld(inst
, magic_var(sprg6
), inst_rt
);
519 case KVM_INST_MFSPR(SPRN_SPRG7
):
521 case KVM_INST_MFSPR(SPRN_SPRG7R
):
523 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
524 kvm_patch_ins_ld(inst
, magic_var(sprg7
), inst_rt
);
528 case KVM_INST_MFSPR(SPRN_ESR
):
529 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
530 kvm_patch_ins_lwz(inst
, magic_var(esr
), inst_rt
);
534 case KVM_INST_MFSPR(SPRN_PIR
):
535 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
536 kvm_patch_ins_lwz(inst
, magic_var(pir
), inst_rt
);
541 case KVM_INST_MTSPR(SPRN_SPRG0
):
542 kvm_patch_ins_std(inst
, magic_var(sprg0
), inst_rt
);
544 case KVM_INST_MTSPR(SPRN_SPRG1
):
545 kvm_patch_ins_std(inst
, magic_var(sprg1
), inst_rt
);
547 case KVM_INST_MTSPR(SPRN_SPRG2
):
548 kvm_patch_ins_std(inst
, magic_var(sprg2
), inst_rt
);
550 case KVM_INST_MTSPR(SPRN_SPRG3
):
551 kvm_patch_ins_std(inst
, magic_var(sprg3
), inst_rt
);
553 case KVM_INST_MTSPR(SPRN_SRR0
):
554 kvm_patch_ins_std(inst
, magic_var(srr0
), inst_rt
);
556 case KVM_INST_MTSPR(SPRN_SRR1
):
557 kvm_patch_ins_std(inst
, magic_var(srr1
), inst_rt
);
560 case KVM_INST_MTSPR(SPRN_DEAR
):
562 case KVM_INST_MTSPR(SPRN_DAR
):
564 kvm_patch_ins_std(inst
, magic_var(dar
), inst_rt
);
566 case KVM_INST_MTSPR(SPRN_DSISR
):
567 kvm_patch_ins_stw(inst
, magic_var(dsisr
), inst_rt
);
569 #ifdef CONFIG_PPC_BOOK3E_MMU
570 case KVM_INST_MTSPR(SPRN_MAS0
):
571 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
572 kvm_patch_ins_stw(inst
, magic_var(mas0
), inst_rt
);
574 case KVM_INST_MTSPR(SPRN_MAS1
):
575 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
576 kvm_patch_ins_stw(inst
, magic_var(mas1
), inst_rt
);
578 case KVM_INST_MTSPR(SPRN_MAS2
):
579 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
580 kvm_patch_ins_std(inst
, magic_var(mas2
), inst_rt
);
582 case KVM_INST_MTSPR(SPRN_MAS3
):
583 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
584 kvm_patch_ins_stw(inst
, magic_var(mas7_3
) + 4, inst_rt
);
586 case KVM_INST_MTSPR(SPRN_MAS4
):
587 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
588 kvm_patch_ins_stw(inst
, magic_var(mas4
), inst_rt
);
590 case KVM_INST_MTSPR(SPRN_MAS6
):
591 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
592 kvm_patch_ins_stw(inst
, magic_var(mas6
), inst_rt
);
594 case KVM_INST_MTSPR(SPRN_MAS7
):
595 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
596 kvm_patch_ins_stw(inst
, magic_var(mas7_3
), inst_rt
);
598 #endif /* CONFIG_PPC_BOOK3E_MMU */
600 case KVM_INST_MTSPR(SPRN_SPRG4
):
601 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
602 kvm_patch_ins_std(inst
, magic_var(sprg4
), inst_rt
);
604 case KVM_INST_MTSPR(SPRN_SPRG5
):
605 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
606 kvm_patch_ins_std(inst
, magic_var(sprg5
), inst_rt
);
608 case KVM_INST_MTSPR(SPRN_SPRG6
):
609 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
610 kvm_patch_ins_std(inst
, magic_var(sprg6
), inst_rt
);
612 case KVM_INST_MTSPR(SPRN_SPRG7
):
613 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
614 kvm_patch_ins_std(inst
, magic_var(sprg7
), inst_rt
);
618 case KVM_INST_MTSPR(SPRN_ESR
):
619 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
620 kvm_patch_ins_stw(inst
, magic_var(esr
), inst_rt
);
625 case KVM_INST_TLBSYNC
:
626 kvm_patch_ins_nop(inst
);
630 case KVM_INST_MTMSRD_L1
:
631 kvm_patch_ins_mtmsrd(inst
, inst_rt
);
634 case KVM_INST_MTMSRD_L0
:
635 kvm_patch_ins_mtmsr(inst
, inst_rt
);
639 kvm_patch_ins_wrtee(inst
, inst_rt
, 0);
644 switch (inst_no_rt
& ~KVM_MASK_RB
) {
645 #ifdef CONFIG_PPC_BOOK3S_32
646 case KVM_INST_MTSRIN
:
647 if (features
& KVM_MAGIC_FEAT_SR
) {
648 u32 inst_rb
= _inst
& KVM_MASK_RB
;
649 kvm_patch_ins_mtsrin(inst
, inst_rt
, inst_rb
);
658 case KVM_INST_WRTEEI_0
:
659 kvm_patch_ins_wrteei_0(inst
);
662 case KVM_INST_WRTEEI_1
:
663 kvm_patch_ins_wrtee(inst
, 0, 1);
669 extern u32 kvm_template_start
[];
670 extern u32 kvm_template_end
[];
672 static void kvm_use_magic_page(void)
679 /* Tell the host to map the magic page to -4096 on all CPUs */
680 on_each_cpu(kvm_map_magic_page
, &features
, 1);
682 /* Quick self-test to see if the mapping works */
683 if (__get_user(tmp
, (u32
*)KVM_MAGIC_PAGE
)) {
684 kvm_patching_worked
= false;
688 /* Now loop through all code and find instructions */
689 start
= (void*)_stext
;
693 * Being interrupted in the middle of patching would
694 * be bad for SPRG4-7, which KVM can't keep in sync
695 * with emulated accesses because reads don't trap.
699 for (p
= start
; p
< end
; p
++) {
700 /* Avoid patching the template code */
701 if (p
>= kvm_template_start
&& p
< kvm_template_end
) {
702 p
= kvm_template_end
- 1;
705 kvm_check_ins(p
, features
);
710 printk(KERN_INFO
"KVM: Live patching for a fast VM %s\n",
711 kvm_patching_worked
? "worked" : "failed");
714 unsigned long kvm_hypercall(unsigned long *in
,
718 unsigned long register r0
asm("r0");
719 unsigned long register r3
asm("r3") = in
[0];
720 unsigned long register r4
asm("r4") = in
[1];
721 unsigned long register r5
asm("r5") = in
[2];
722 unsigned long register r6
asm("r6") = in
[3];
723 unsigned long register r7
asm("r7") = in
[4];
724 unsigned long register r8
asm("r8") = in
[5];
725 unsigned long register r9
asm("r9") = in
[6];
726 unsigned long register r10
asm("r10") = in
[7];
727 unsigned long register r11
asm("r11") = nr
;
728 unsigned long register r12
asm("r12");
730 asm volatile("bl epapr_hypercall_start"
731 : "=r"(r0
), "=r"(r3
), "=r"(r4
), "=r"(r5
), "=r"(r6
),
732 "=r"(r7
), "=r"(r8
), "=r"(r9
), "=r"(r10
), "=r"(r11
),
734 : "r"(r3
), "r"(r4
), "r"(r5
), "r"(r6
), "r"(r7
), "r"(r8
),
735 "r"(r9
), "r"(r10
), "r"(r11
)
736 : "memory", "cc", "xer", "ctr", "lr");
749 EXPORT_SYMBOL_GPL(kvm_hypercall
);
751 static __init
void kvm_free_tmp(void)
753 unsigned long start
, end
;
755 start
= (ulong
)&kvm_tmp
[kvm_tmp_index
+ (PAGE_SIZE
- 1)] & PAGE_MASK
;
756 end
= (ulong
)&kvm_tmp
[ARRAY_SIZE(kvm_tmp
)] & PAGE_MASK
;
758 /* Free the tmp space we don't need */
759 for (; start
< end
; start
+= PAGE_SIZE
) {
760 ClearPageReserved(virt_to_page(start
));
761 init_page_count(virt_to_page(start
));
767 static int __init
kvm_guest_init(void)
769 if (!kvm_para_available())
772 if (!epapr_paravirt_enabled
)
775 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE
))
776 kvm_use_magic_page();
778 #ifdef CONFIG_PPC_BOOK3S_64
789 postcore_initcall(kvm_guest_init
);