Merge branch 'kvm-updates/2.6.36' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6/next.git] / arch / ia64 / kvm / kvm_fw.c
blobcb548ee9fcaed4ce24c1f3c0994d397944afbe9d
1 /*
2 * PAL/SAL call delegation
4 * Copyright (c) 2004 Li Susie <susie.li@intel.com>
5 * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
6 * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
22 #include <linux/kvm_host.h>
23 #include <linux/smp.h>
24 #include <asm/sn/addrs.h>
25 #include <asm/sn/clksupport.h>
26 #include <asm/sn/shub_mmr.h>
28 #include "vti.h"
29 #include "misc.h"
31 #include <asm/pal.h>
32 #include <asm/sal.h>
33 #include <asm/tlb.h>
36 * Handy macros to make sure that the PAL return values start out
37 * as something meaningful.
39 #define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
40 { \
41 x.status = PAL_STATUS_UNIMPLEMENTED; \
42 x.v0 = 0; \
43 x.v1 = 0; \
44 x.v2 = 0; \
47 #define INIT_PAL_STATUS_SUCCESS(x) \
48 { \
49 x.status = PAL_STATUS_SUCCESS; \
50 x.v0 = 0; \
51 x.v1 = 0; \
52 x.v2 = 0; \
55 static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
56 u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
57 struct exit_ctl_data *p;
59 if (vcpu) {
60 p = &vcpu->arch.exit_data;
61 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
62 *gr28 = p->u.pal_data.gr28;
63 *gr29 = p->u.pal_data.gr29;
64 *gr30 = p->u.pal_data.gr30;
65 *gr31 = p->u.pal_data.gr31;
66 return ;
69 printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
72 static void set_pal_result(struct kvm_vcpu *vcpu,
73 struct ia64_pal_retval result) {
75 struct exit_ctl_data *p;
77 p = kvm_get_exit_data(vcpu);
78 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
79 p->u.pal_data.ret = result;
80 return ;
82 INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
85 static void set_sal_result(struct kvm_vcpu *vcpu,
86 struct sal_ret_values result) {
87 struct exit_ctl_data *p;
89 p = kvm_get_exit_data(vcpu);
90 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
91 p->u.sal_data.ret = result;
92 return ;
94 printk(KERN_WARNING"Failed to set sal result!!\n");
97 struct cache_flush_args {
98 u64 cache_type;
99 u64 operation;
100 u64 progress;
101 long status;
104 cpumask_t cpu_cache_coherent_map;
106 static void remote_pal_cache_flush(void *data)
108 struct cache_flush_args *args = data;
109 long status;
110 u64 progress = args->progress;
112 status = ia64_pal_cache_flush(args->cache_type, args->operation,
113 &progress, NULL);
114 if (status != 0)
115 args->status = status;
118 static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
120 u64 gr28, gr29, gr30, gr31;
121 struct ia64_pal_retval result = {0, 0, 0, 0};
122 struct cache_flush_args args = {0, 0, 0, 0};
123 long psr;
125 gr28 = gr29 = gr30 = gr31 = 0;
126 kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
128 if (gr31 != 0)
129 printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
131 /* Always call Host Pal in int=1 */
132 gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
133 args.cache_type = gr29;
134 args.operation = gr30;
135 smp_call_function(remote_pal_cache_flush,
136 (void *)&args, 1);
137 if (args.status != 0)
138 printk(KERN_ERR"pal_cache_flush error!,"
139 "status:0x%lx\n", args.status);
141 * Call Host PAL cache flush
142 * Clear psr.ic when call PAL_CACHE_FLUSH
144 local_irq_save(psr);
145 result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
146 &result.v0);
147 local_irq_restore(psr);
148 if (result.status != 0)
149 printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
150 "in1:%lx,in2:%lx\n",
151 vcpu, result.status, gr29, gr30);
153 #if 0
154 if (gr29 == PAL_CACHE_TYPE_COHERENT) {
155 cpus_setall(vcpu->arch.cache_coherent_map);
156 cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
157 cpus_setall(cpu_cache_coherent_map);
158 cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
160 #endif
161 return result;
164 struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
167 struct ia64_pal_retval result;
169 PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
170 return result;
173 static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
176 struct ia64_pal_retval result;
178 PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
181 * PAL_FREQ_BASE may not be implemented in some platforms,
182 * call SAL instead.
184 if (result.v0 == 0) {
185 result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
186 &result.v0,
187 &result.v1);
188 result.v2 = 0;
191 return result;
195 * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
196 * RTC is used instead. This function patches the ratios from SAL
197 * to match the RTC before providing them to the guest.
199 static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
201 struct pal_freq_ratio *ratio;
202 unsigned long sal_freq, sal_drift, factor;
204 result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
205 &sal_freq, &sal_drift);
206 ratio = (struct pal_freq_ratio *)&result->v2;
207 factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
208 sn_rtc_cycles_per_second;
210 ratio->num = 3;
211 ratio->den = factor;
214 static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
216 struct ia64_pal_retval result;
218 PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
220 if (vcpu->kvm->arch.is_sn2)
221 sn2_patch_itc_freq_ratios(&result);
223 return result;
226 static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
228 struct ia64_pal_retval result;
230 INIT_PAL_STATUS_UNIMPLEMENTED(result);
231 return result;
234 static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
237 struct ia64_pal_retval result;
239 INIT_PAL_STATUS_SUCCESS(result);
240 return result;
243 static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
246 struct ia64_pal_retval result = {0, 0, 0, 0};
247 long in0, in1, in2, in3;
249 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
250 result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
251 &result.v2, in2);
253 return result;
256 static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
259 struct ia64_pal_retval result = {0, 0, 0, 0};
260 long in0, in1, in2, in3;
262 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
263 result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
265 return result;
268 static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
271 pal_cache_config_info_t ci;
272 long status;
273 unsigned long in0, in1, in2, in3, r9, r10;
275 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
276 status = ia64_pal_cache_config_info(in1, in2, &ci);
277 r9 = ci.pcci_info_1.pcci1_data;
278 r10 = ci.pcci_info_2.pcci2_data;
279 return ((struct ia64_pal_retval){status, r9, r10, 0});
282 #define GUEST_IMPL_VA_MSB 59
283 #define GUEST_RID_BITS 18
285 static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
288 pal_vm_info_1_u_t vminfo1;
289 pal_vm_info_2_u_t vminfo2;
290 struct ia64_pal_retval result;
292 PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
293 if (!result.status) {
294 vminfo1.pvi1_val = result.v0;
295 vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
296 vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
297 result.v0 = vminfo1.pvi1_val;
298 vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
299 vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
300 result.v1 = vminfo2.pvi2_val;
303 return result;
306 static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
308 struct ia64_pal_retval result;
309 unsigned long in0, in1, in2, in3;
311 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
313 result.status = ia64_pal_vm_info(in1, in2,
314 (pal_tc_info_u_t *)&result.v1, &result.v2);
316 return result;
319 static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
321 u64 index = 0;
322 struct exit_ctl_data *p;
324 p = kvm_get_exit_data(vcpu);
325 if (p->exit_reason == EXIT_REASON_PAL_CALL)
326 index = p->u.pal_data.gr28;
328 return index;
331 static void prepare_for_halt(struct kvm_vcpu *vcpu)
333 vcpu->arch.timer_pending = 1;
334 vcpu->arch.timer_fired = 0;
337 static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
339 long status;
340 unsigned long in0, in1, in2, in3, r9;
341 unsigned long pm_buffer[16];
343 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
344 status = ia64_pal_perf_mon_info(pm_buffer,
345 (pal_perf_mon_info_u_t *) &r9);
346 if (status != 0) {
347 printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
348 } else {
349 if (in1)
350 memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
351 else {
352 status = PAL_STATUS_EINVAL;
353 printk(KERN_WARNING"Invalid parameters "
354 "for PAL call:0x%lx!\n", in0);
357 return (struct ia64_pal_retval){status, r9, 0, 0};
360 static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
362 unsigned long in0, in1, in2, in3;
363 long status;
364 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
365 | (1UL << 61) | (1UL << 60);
367 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
368 if (in1) {
369 memcpy((void *)in1, &res, sizeof(res));
370 status = 0;
371 } else{
372 status = PAL_STATUS_EINVAL;
373 printk(KERN_WARNING"Invalid parameters "
374 "for PAL call:0x%lx!\n", in0);
377 return (struct ia64_pal_retval){status, 0, 0, 0};
380 static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
382 unsigned long r9;
383 long status;
385 status = ia64_pal_mem_attrib(&r9);
387 return (struct ia64_pal_retval){status, r9, 0, 0};
390 static void remote_pal_prefetch_visibility(void *v)
392 s64 trans_type = (s64)v;
393 ia64_pal_prefetch_visibility(trans_type);
396 static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
398 struct ia64_pal_retval result = {0, 0, 0, 0};
399 unsigned long in0, in1, in2, in3;
400 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
401 result.status = ia64_pal_prefetch_visibility(in1);
402 if (result.status == 0) {
403 /* Must be performed on all remote processors
404 in the coherence domain. */
405 smp_call_function(remote_pal_prefetch_visibility,
406 (void *)in1, 1);
407 /* Unnecessary on remote processor for other vcpus!*/
408 result.status = 1;
410 return result;
413 static void remote_pal_mc_drain(void *v)
415 ia64_pal_mc_drain();
418 static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
420 struct ia64_pal_retval result = {0, 0, 0, 0};
421 unsigned long in0, in1, in2, in3;
423 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
425 if (in1 == 0 && in2) {
426 char brand_info[128];
427 result.status = ia64_pal_get_brand_info(brand_info);
428 if (result.status == PAL_STATUS_SUCCESS)
429 memcpy((void *)in2, brand_info, 128);
430 } else {
431 result.status = PAL_STATUS_REQUIRES_MEMORY;
432 printk(KERN_WARNING"Invalid parameters for "
433 "PAL call:0x%lx!\n", in0);
436 return result;
439 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
442 u64 gr28;
443 struct ia64_pal_retval result;
444 int ret = 1;
446 gr28 = kvm_get_pal_call_index(vcpu);
447 switch (gr28) {
448 case PAL_CACHE_FLUSH:
449 result = pal_cache_flush(vcpu);
450 break;
451 case PAL_MEM_ATTRIB:
452 result = pal_mem_attrib(vcpu);
453 break;
454 case PAL_CACHE_SUMMARY:
455 result = pal_cache_summary(vcpu);
456 break;
457 case PAL_PERF_MON_INFO:
458 result = pal_perf_mon_info(vcpu);
459 break;
460 case PAL_HALT_INFO:
461 result = pal_halt_info(vcpu);
462 break;
463 case PAL_HALT_LIGHT:
465 INIT_PAL_STATUS_SUCCESS(result);
466 prepare_for_halt(vcpu);
467 if (kvm_highest_pending_irq(vcpu) == -1)
468 ret = kvm_emulate_halt(vcpu);
470 break;
472 case PAL_PREFETCH_VISIBILITY:
473 result = pal_prefetch_visibility(vcpu);
474 break;
475 case PAL_MC_DRAIN:
476 result.status = ia64_pal_mc_drain();
477 /* FIXME: All vcpus likely call PAL_MC_DRAIN.
478 That causes the congestion. */
479 smp_call_function(remote_pal_mc_drain, NULL, 1);
480 break;
482 case PAL_FREQ_RATIOS:
483 result = pal_freq_ratios(vcpu);
484 break;
486 case PAL_FREQ_BASE:
487 result = pal_freq_base(vcpu);
488 break;
490 case PAL_LOGICAL_TO_PHYSICAL :
491 result = pal_logical_to_physica(vcpu);
492 break;
494 case PAL_VM_SUMMARY :
495 result = pal_vm_summary(vcpu);
496 break;
498 case PAL_VM_INFO :
499 result = pal_vm_info(vcpu);
500 break;
501 case PAL_PLATFORM_ADDR :
502 result = pal_platform_addr(vcpu);
503 break;
504 case PAL_CACHE_INFO:
505 result = pal_cache_info(vcpu);
506 break;
507 case PAL_PTCE_INFO:
508 INIT_PAL_STATUS_SUCCESS(result);
509 result.v1 = (1L << 32) | 1L;
510 break;
511 case PAL_REGISTER_INFO:
512 result = pal_register_info(vcpu);
513 break;
514 case PAL_VM_PAGE_SIZE:
515 result.status = ia64_pal_vm_page_size(&result.v0,
516 &result.v1);
517 break;
518 case PAL_RSE_INFO:
519 result.status = ia64_pal_rse_info(&result.v0,
520 (pal_hints_u_t *)&result.v1);
521 break;
522 case PAL_PROC_GET_FEATURES:
523 result = pal_proc_get_features(vcpu);
524 break;
525 case PAL_DEBUG_INFO:
526 result.status = ia64_pal_debug_info(&result.v0,
527 &result.v1);
528 break;
529 case PAL_VERSION:
530 result.status = ia64_pal_version(
531 (pal_version_u_t *)&result.v0,
532 (pal_version_u_t *)&result.v1);
533 break;
534 case PAL_FIXED_ADDR:
535 result.status = PAL_STATUS_SUCCESS;
536 result.v0 = vcpu->vcpu_id;
537 break;
538 case PAL_BRAND_INFO:
539 result = pal_get_brand_info(vcpu);
540 break;
541 case PAL_GET_PSTATE:
542 case PAL_CACHE_SHARED_INFO:
543 INIT_PAL_STATUS_UNIMPLEMENTED(result);
544 break;
545 default:
546 INIT_PAL_STATUS_UNIMPLEMENTED(result);
547 printk(KERN_WARNING"kvm: Unsupported pal call,"
548 " index:0x%lx\n", gr28);
550 set_pal_result(vcpu, result);
551 return ret;
554 static struct sal_ret_values sal_emulator(struct kvm *kvm,
555 long index, unsigned long in1,
556 unsigned long in2, unsigned long in3,
557 unsigned long in4, unsigned long in5,
558 unsigned long in6, unsigned long in7)
560 unsigned long r9 = 0;
561 unsigned long r10 = 0;
562 long r11 = 0;
563 long status;
565 status = 0;
566 switch (index) {
567 case SAL_FREQ_BASE:
568 status = ia64_sal_freq_base(in1, &r9, &r10);
569 break;
570 case SAL_PCI_CONFIG_READ:
571 printk(KERN_WARNING"kvm: Not allowed to call here!"
572 " SAL_PCI_CONFIG_READ\n");
573 break;
574 case SAL_PCI_CONFIG_WRITE:
575 printk(KERN_WARNING"kvm: Not allowed to call here!"
576 " SAL_PCI_CONFIG_WRITE\n");
577 break;
578 case SAL_SET_VECTORS:
579 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
580 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
581 status = -2;
582 } else {
583 kvm->arch.rdv_sal_data.boot_ip = in2;
584 kvm->arch.rdv_sal_data.boot_gp = in3;
586 printk("Rendvous called! iip:%lx\n\n", in2);
587 } else
588 printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
589 "ignored...\n", in1);
590 break;
591 case SAL_GET_STATE_INFO:
592 /* No more info. */
593 status = -5;
594 r9 = 0;
595 break;
596 case SAL_GET_STATE_INFO_SIZE:
597 /* Return a dummy size. */
598 status = 0;
599 r9 = 128;
600 break;
601 case SAL_CLEAR_STATE_INFO:
602 /* Noop. */
603 break;
604 case SAL_MC_RENDEZ:
605 printk(KERN_WARNING
606 "kvm: called SAL_MC_RENDEZ. ignored...\n");
607 break;
608 case SAL_MC_SET_PARAMS:
609 printk(KERN_WARNING
610 "kvm: called SAL_MC_SET_PARAMS.ignored!\n");
611 break;
612 case SAL_CACHE_FLUSH:
613 if (1) {
614 /*Flush using SAL.
615 This method is faster but has a side
616 effect on other vcpu running on
617 this cpu. */
618 status = ia64_sal_cache_flush(in1);
619 } else {
620 /*Maybe need to implement the method
621 without side effect!*/
622 status = 0;
624 break;
625 case SAL_CACHE_INIT:
626 printk(KERN_WARNING
627 "kvm: called SAL_CACHE_INIT. ignored...\n");
628 break;
629 case SAL_UPDATE_PAL:
630 printk(KERN_WARNING
631 "kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
632 break;
633 default:
634 printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
635 " index:%ld\n", index);
636 status = -1;
637 break;
639 return ((struct sal_ret_values) {status, r9, r10, r11});
642 static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
643 u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
645 struct exit_ctl_data *p;
647 p = kvm_get_exit_data(vcpu);
649 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
650 *in0 = p->u.sal_data.in0;
651 *in1 = p->u.sal_data.in1;
652 *in2 = p->u.sal_data.in2;
653 *in3 = p->u.sal_data.in3;
654 *in4 = p->u.sal_data.in4;
655 *in5 = p->u.sal_data.in5;
656 *in6 = p->u.sal_data.in6;
657 *in7 = p->u.sal_data.in7;
658 return ;
660 *in0 = 0;
663 void kvm_sal_emul(struct kvm_vcpu *vcpu)
666 struct sal_ret_values result;
667 u64 index, in1, in2, in3, in4, in5, in6, in7;
669 kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
670 &in3, &in4, &in5, &in6, &in7);
671 result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
672 in4, in5, in6, in7);
673 set_sal_result(vcpu, result);