staging: quickstart: fix up sysfs file permissions
[linux/fpc-iii.git] / arch / powerpc / kernel / kvm.c
blobdb28032e320e3e5332b6e60d77d1b56d29cea7d9
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
5 * Authors:
6 * Alexander Graf <agraf@suse.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
29 #include <asm/reg.h>
30 #include <asm/sections.h>
31 #include <asm/cacheflush.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/epapr_hcalls.h>
36 #define KVM_MAGIC_PAGE (-4096L)
37 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
39 #define KVM_INST_LWZ 0x80000000
40 #define KVM_INST_STW 0x90000000
41 #define KVM_INST_LD 0xe8000000
42 #define KVM_INST_STD 0xf8000000
43 #define KVM_INST_NOP 0x60000000
44 #define KVM_INST_B 0x48000000
45 #define KVM_INST_B_MASK 0x03ffffff
46 #define KVM_INST_B_MAX 0x01ffffff
47 #define KVM_INST_LI 0x38000000
49 #define KVM_MASK_RT 0x03e00000
50 #define KVM_RT_30 0x03c00000
51 #define KVM_MASK_RB 0x0000f800
52 #define KVM_INST_MFMSR 0x7c0000a6
54 #define SPR_FROM 0
55 #define SPR_TO 0x100
57 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
58 (((sprn) & 0x1f) << 16) | \
59 (((sprn) & 0x3e0) << 6) | \
60 (moveto))
62 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
63 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
65 #define KVM_INST_TLBSYNC 0x7c00046c
66 #define KVM_INST_MTMSRD_L0 0x7c000164
67 #define KVM_INST_MTMSRD_L1 0x7c010164
68 #define KVM_INST_MTMSR 0x7c000124
70 #define KVM_INST_WRTEE 0x7c000106
71 #define KVM_INST_WRTEEI_0 0x7c000146
72 #define KVM_INST_WRTEEI_1 0x7c008146
74 #define KVM_INST_MTSRIN 0x7c0001e4
76 static bool kvm_patching_worked = true;
77 static char kvm_tmp[1024 * 1024];
78 static int kvm_tmp_index;
80 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
82 *inst = new_inst;
83 flush_icache_range((ulong)inst, (ulong)inst + 4);
86 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
88 #ifdef CONFIG_64BIT
89 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90 #else
91 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
92 #endif
95 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
97 #ifdef CONFIG_64BIT
98 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
99 #else
100 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
101 #endif
104 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
106 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
109 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
111 #ifdef CONFIG_64BIT
112 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
113 #else
114 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
115 #endif
118 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
120 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
123 static void kvm_patch_ins_nop(u32 *inst)
125 kvm_patch_ins(inst, KVM_INST_NOP);
128 static void kvm_patch_ins_b(u32 *inst, int addr)
130 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */
134 if ((ulong)inst < (ulong)&__end_interrupts)
135 return;
136 #endif
138 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
141 static u32 *kvm_alloc(int len)
143 u32 *p;
145 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
146 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
147 kvm_tmp_index, len);
148 kvm_patching_worked = false;
149 return NULL;
152 p = (void*)&kvm_tmp[kvm_tmp_index];
153 kvm_tmp_index += len;
155 return p;
158 extern u32 kvm_emulate_mtmsrd_branch_offs;
159 extern u32 kvm_emulate_mtmsrd_reg_offs;
160 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
161 extern u32 kvm_emulate_mtmsrd_len;
162 extern u32 kvm_emulate_mtmsrd[];
164 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
166 u32 *p;
167 int distance_start;
168 int distance_end;
169 ulong next_inst;
171 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
172 if (!p)
173 return;
175 /* Find out where we are and put everything there */
176 distance_start = (ulong)p - (ulong)inst;
177 next_inst = ((ulong)inst + 4);
178 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
180 /* Make sure we only write valid b instructions */
181 if (distance_start > KVM_INST_B_MAX) {
182 kvm_patching_worked = false;
183 return;
186 /* Modify the chunk to fit the invocation */
187 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
188 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
189 switch (get_rt(rt)) {
190 case 30:
191 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
192 magic_var(scratch2), KVM_RT_30);
193 break;
194 case 31:
195 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
196 magic_var(scratch1), KVM_RT_30);
197 break;
198 default:
199 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
200 break;
203 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
204 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
206 /* Patch the invocation */
207 kvm_patch_ins_b(inst, distance_start);
210 extern u32 kvm_emulate_mtmsr_branch_offs;
211 extern u32 kvm_emulate_mtmsr_reg1_offs;
212 extern u32 kvm_emulate_mtmsr_reg2_offs;
213 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
214 extern u32 kvm_emulate_mtmsr_len;
215 extern u32 kvm_emulate_mtmsr[];
217 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
219 u32 *p;
220 int distance_start;
221 int distance_end;
222 ulong next_inst;
224 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
225 if (!p)
226 return;
228 /* Find out where we are and put everything there */
229 distance_start = (ulong)p - (ulong)inst;
230 next_inst = ((ulong)inst + 4);
231 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
233 /* Make sure we only write valid b instructions */
234 if (distance_start > KVM_INST_B_MAX) {
235 kvm_patching_worked = false;
236 return;
239 /* Modify the chunk to fit the invocation */
240 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
241 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
243 /* Make clobbered registers work too */
244 switch (get_rt(rt)) {
245 case 30:
246 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
247 magic_var(scratch2), KVM_RT_30);
248 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
249 magic_var(scratch2), KVM_RT_30);
250 break;
251 case 31:
252 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
253 magic_var(scratch1), KVM_RT_30);
254 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
255 magic_var(scratch1), KVM_RT_30);
256 break;
257 default:
258 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
259 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
260 break;
263 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
264 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
266 /* Patch the invocation */
267 kvm_patch_ins_b(inst, distance_start);
270 #ifdef CONFIG_BOOKE
272 extern u32 kvm_emulate_wrtee_branch_offs;
273 extern u32 kvm_emulate_wrtee_reg_offs;
274 extern u32 kvm_emulate_wrtee_orig_ins_offs;
275 extern u32 kvm_emulate_wrtee_len;
276 extern u32 kvm_emulate_wrtee[];
278 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
280 u32 *p;
281 int distance_start;
282 int distance_end;
283 ulong next_inst;
285 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
286 if (!p)
287 return;
289 /* Find out where we are and put everything there */
290 distance_start = (ulong)p - (ulong)inst;
291 next_inst = ((ulong)inst + 4);
292 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
294 /* Make sure we only write valid b instructions */
295 if (distance_start > KVM_INST_B_MAX) {
296 kvm_patching_worked = false;
297 return;
300 /* Modify the chunk to fit the invocation */
301 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
302 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
304 if (imm_one) {
305 p[kvm_emulate_wrtee_reg_offs] =
306 KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
307 } else {
308 /* Make clobbered registers work too */
309 switch (get_rt(rt)) {
310 case 30:
311 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
312 magic_var(scratch2), KVM_RT_30);
313 break;
314 case 31:
315 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
316 magic_var(scratch1), KVM_RT_30);
317 break;
318 default:
319 p[kvm_emulate_wrtee_reg_offs] |= rt;
320 break;
324 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
325 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
327 /* Patch the invocation */
328 kvm_patch_ins_b(inst, distance_start);
331 extern u32 kvm_emulate_wrteei_0_branch_offs;
332 extern u32 kvm_emulate_wrteei_0_len;
333 extern u32 kvm_emulate_wrteei_0[];
335 static void kvm_patch_ins_wrteei_0(u32 *inst)
337 u32 *p;
338 int distance_start;
339 int distance_end;
340 ulong next_inst;
342 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
343 if (!p)
344 return;
346 /* Find out where we are and put everything there */
347 distance_start = (ulong)p - (ulong)inst;
348 next_inst = ((ulong)inst + 4);
349 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
351 /* Make sure we only write valid b instructions */
352 if (distance_start > KVM_INST_B_MAX) {
353 kvm_patching_worked = false;
354 return;
357 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
358 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
359 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
361 /* Patch the invocation */
362 kvm_patch_ins_b(inst, distance_start);
365 #endif
367 #ifdef CONFIG_PPC_BOOK3S_32
369 extern u32 kvm_emulate_mtsrin_branch_offs;
370 extern u32 kvm_emulate_mtsrin_reg1_offs;
371 extern u32 kvm_emulate_mtsrin_reg2_offs;
372 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
373 extern u32 kvm_emulate_mtsrin_len;
374 extern u32 kvm_emulate_mtsrin[];
376 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
378 u32 *p;
379 int distance_start;
380 int distance_end;
381 ulong next_inst;
383 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
384 if (!p)
385 return;
387 /* Find out where we are and put everything there */
388 distance_start = (ulong)p - (ulong)inst;
389 next_inst = ((ulong)inst + 4);
390 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
392 /* Make sure we only write valid b instructions */
393 if (distance_start > KVM_INST_B_MAX) {
394 kvm_patching_worked = false;
395 return;
398 /* Modify the chunk to fit the invocation */
399 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
400 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
401 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
402 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
403 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
404 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
406 /* Patch the invocation */
407 kvm_patch_ins_b(inst, distance_start);
410 #endif
412 static void kvm_map_magic_page(void *data)
414 u32 *features = data;
416 ulong in[8];
417 ulong out[8];
419 in[0] = KVM_MAGIC_PAGE;
420 in[1] = KVM_MAGIC_PAGE;
422 kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
424 *features = out[0];
427 static void kvm_check_ins(u32 *inst, u32 features)
429 u32 _inst = *inst;
430 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
431 u32 inst_rt = _inst & KVM_MASK_RT;
433 switch (inst_no_rt) {
434 /* Loads */
435 case KVM_INST_MFMSR:
436 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
437 break;
438 case KVM_INST_MFSPR(SPRN_SPRG0):
439 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
440 break;
441 case KVM_INST_MFSPR(SPRN_SPRG1):
442 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
443 break;
444 case KVM_INST_MFSPR(SPRN_SPRG2):
445 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
446 break;
447 case KVM_INST_MFSPR(SPRN_SPRG3):
448 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
449 break;
450 case KVM_INST_MFSPR(SPRN_SRR0):
451 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
452 break;
453 case KVM_INST_MFSPR(SPRN_SRR1):
454 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
455 break;
456 #ifdef CONFIG_BOOKE
457 case KVM_INST_MFSPR(SPRN_DEAR):
458 #else
459 case KVM_INST_MFSPR(SPRN_DAR):
460 #endif
461 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
462 break;
463 case KVM_INST_MFSPR(SPRN_DSISR):
464 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
465 break;
467 #ifdef CONFIG_PPC_BOOK3E_MMU
468 case KVM_INST_MFSPR(SPRN_MAS0):
469 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
470 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
471 break;
472 case KVM_INST_MFSPR(SPRN_MAS1):
473 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
474 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
475 break;
476 case KVM_INST_MFSPR(SPRN_MAS2):
477 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
478 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
479 break;
480 case KVM_INST_MFSPR(SPRN_MAS3):
481 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
482 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
483 break;
484 case KVM_INST_MFSPR(SPRN_MAS4):
485 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
486 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
487 break;
488 case KVM_INST_MFSPR(SPRN_MAS6):
489 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
490 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
491 break;
492 case KVM_INST_MFSPR(SPRN_MAS7):
493 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
494 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
495 break;
496 #endif /* CONFIG_PPC_BOOK3E_MMU */
498 case KVM_INST_MFSPR(SPRN_SPRG4):
499 #ifdef CONFIG_BOOKE
500 case KVM_INST_MFSPR(SPRN_SPRG4R):
501 #endif
502 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
503 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
504 break;
505 case KVM_INST_MFSPR(SPRN_SPRG5):
506 #ifdef CONFIG_BOOKE
507 case KVM_INST_MFSPR(SPRN_SPRG5R):
508 #endif
509 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
510 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
511 break;
512 case KVM_INST_MFSPR(SPRN_SPRG6):
513 #ifdef CONFIG_BOOKE
514 case KVM_INST_MFSPR(SPRN_SPRG6R):
515 #endif
516 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
517 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
518 break;
519 case KVM_INST_MFSPR(SPRN_SPRG7):
520 #ifdef CONFIG_BOOKE
521 case KVM_INST_MFSPR(SPRN_SPRG7R):
522 #endif
523 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
524 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
525 break;
527 #ifdef CONFIG_BOOKE
528 case KVM_INST_MFSPR(SPRN_ESR):
529 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
530 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
531 break;
532 #endif
534 case KVM_INST_MFSPR(SPRN_PIR):
535 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
536 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
537 break;
540 /* Stores */
541 case KVM_INST_MTSPR(SPRN_SPRG0):
542 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
543 break;
544 case KVM_INST_MTSPR(SPRN_SPRG1):
545 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
546 break;
547 case KVM_INST_MTSPR(SPRN_SPRG2):
548 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
549 break;
550 case KVM_INST_MTSPR(SPRN_SPRG3):
551 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
552 break;
553 case KVM_INST_MTSPR(SPRN_SRR0):
554 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
555 break;
556 case KVM_INST_MTSPR(SPRN_SRR1):
557 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
558 break;
559 #ifdef CONFIG_BOOKE
560 case KVM_INST_MTSPR(SPRN_DEAR):
561 #else
562 case KVM_INST_MTSPR(SPRN_DAR):
563 #endif
564 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
565 break;
566 case KVM_INST_MTSPR(SPRN_DSISR):
567 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
568 break;
569 #ifdef CONFIG_PPC_BOOK3E_MMU
570 case KVM_INST_MTSPR(SPRN_MAS0):
571 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
572 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
573 break;
574 case KVM_INST_MTSPR(SPRN_MAS1):
575 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
576 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
577 break;
578 case KVM_INST_MTSPR(SPRN_MAS2):
579 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
580 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
581 break;
582 case KVM_INST_MTSPR(SPRN_MAS3):
583 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
584 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
585 break;
586 case KVM_INST_MTSPR(SPRN_MAS4):
587 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
588 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
589 break;
590 case KVM_INST_MTSPR(SPRN_MAS6):
591 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
592 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
593 break;
594 case KVM_INST_MTSPR(SPRN_MAS7):
595 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
596 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
597 break;
598 #endif /* CONFIG_PPC_BOOK3E_MMU */
600 case KVM_INST_MTSPR(SPRN_SPRG4):
601 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
602 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
603 break;
604 case KVM_INST_MTSPR(SPRN_SPRG5):
605 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
606 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
607 break;
608 case KVM_INST_MTSPR(SPRN_SPRG6):
609 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
610 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
611 break;
612 case KVM_INST_MTSPR(SPRN_SPRG7):
613 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
614 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
615 break;
617 #ifdef CONFIG_BOOKE
618 case KVM_INST_MTSPR(SPRN_ESR):
619 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
620 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
621 break;
622 #endif
624 /* Nops */
625 case KVM_INST_TLBSYNC:
626 kvm_patch_ins_nop(inst);
627 break;
629 /* Rewrites */
630 case KVM_INST_MTMSRD_L1:
631 kvm_patch_ins_mtmsrd(inst, inst_rt);
632 break;
633 case KVM_INST_MTMSR:
634 case KVM_INST_MTMSRD_L0:
635 kvm_patch_ins_mtmsr(inst, inst_rt);
636 break;
637 #ifdef CONFIG_BOOKE
638 case KVM_INST_WRTEE:
639 kvm_patch_ins_wrtee(inst, inst_rt, 0);
640 break;
641 #endif
644 switch (inst_no_rt & ~KVM_MASK_RB) {
645 #ifdef CONFIG_PPC_BOOK3S_32
646 case KVM_INST_MTSRIN:
647 if (features & KVM_MAGIC_FEAT_SR) {
648 u32 inst_rb = _inst & KVM_MASK_RB;
649 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
651 break;
652 break;
653 #endif
656 switch (_inst) {
657 #ifdef CONFIG_BOOKE
658 case KVM_INST_WRTEEI_0:
659 kvm_patch_ins_wrteei_0(inst);
660 break;
662 case KVM_INST_WRTEEI_1:
663 kvm_patch_ins_wrtee(inst, 0, 1);
664 break;
665 #endif
669 extern u32 kvm_template_start[];
670 extern u32 kvm_template_end[];
672 static void kvm_use_magic_page(void)
674 u32 *p;
675 u32 *start, *end;
676 u32 tmp;
677 u32 features;
679 /* Tell the host to map the magic page to -4096 on all CPUs */
680 on_each_cpu(kvm_map_magic_page, &features, 1);
682 /* Quick self-test to see if the mapping works */
683 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
684 kvm_patching_worked = false;
685 return;
688 /* Now loop through all code and find instructions */
689 start = (void*)_stext;
690 end = (void*)_etext;
693 * Being interrupted in the middle of patching would
694 * be bad for SPRG4-7, which KVM can't keep in sync
695 * with emulated accesses because reads don't trap.
697 local_irq_disable();
699 for (p = start; p < end; p++) {
700 /* Avoid patching the template code */
701 if (p >= kvm_template_start && p < kvm_template_end) {
702 p = kvm_template_end - 1;
703 continue;
705 kvm_check_ins(p, features);
708 local_irq_enable();
710 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
711 kvm_patching_worked ? "worked" : "failed");
714 unsigned long kvm_hypercall(unsigned long *in,
715 unsigned long *out,
716 unsigned long nr)
718 unsigned long register r0 asm("r0");
719 unsigned long register r3 asm("r3") = in[0];
720 unsigned long register r4 asm("r4") = in[1];
721 unsigned long register r5 asm("r5") = in[2];
722 unsigned long register r6 asm("r6") = in[3];
723 unsigned long register r7 asm("r7") = in[4];
724 unsigned long register r8 asm("r8") = in[5];
725 unsigned long register r9 asm("r9") = in[6];
726 unsigned long register r10 asm("r10") = in[7];
727 unsigned long register r11 asm("r11") = nr;
728 unsigned long register r12 asm("r12");
730 asm volatile("bl epapr_hypercall_start"
731 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
732 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
733 "=r"(r12)
734 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
735 "r"(r9), "r"(r10), "r"(r11)
736 : "memory", "cc", "xer", "ctr", "lr");
738 out[0] = r4;
739 out[1] = r5;
740 out[2] = r6;
741 out[3] = r7;
742 out[4] = r8;
743 out[5] = r9;
744 out[6] = r10;
745 out[7] = r11;
747 return r3;
749 EXPORT_SYMBOL_GPL(kvm_hypercall);
751 static __init void kvm_free_tmp(void)
753 free_reserved_area(&kvm_tmp[kvm_tmp_index],
754 &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
757 static int __init kvm_guest_init(void)
759 if (!kvm_para_available())
760 goto free_tmp;
762 if (!epapr_paravirt_enabled)
763 goto free_tmp;
765 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
766 kvm_use_magic_page();
768 #ifdef CONFIG_PPC_BOOK3S_64
769 /* Enable napping */
770 powersave_nap = 1;
771 #endif
773 free_tmp:
774 kvm_free_tmp();
776 return 0;
779 postcore_initcall(kvm_guest_init);