1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM selftest s390x library code - CPU-related functions (page tables...)
5 * Copyright (C) 2019, Red Hat, Inc.
10 #include "../kvm_util_internal.h"
12 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
14 #define PAGES_PER_REGION 4
16 void virt_pgd_alloc(struct kvm_vm
*vm
, uint32_t memslot
)
20 TEST_ASSERT(vm
->page_size
== 4096, "Unsupported page size: 0x%x",
26 paddr
= vm_phy_pages_alloc(vm
, PAGES_PER_REGION
,
27 KVM_GUEST_PAGE_TABLE_MIN_PADDR
, memslot
);
28 memset(addr_gpa2hva(vm
, paddr
), 0xff, PAGES_PER_REGION
* vm
->page_size
);
31 vm
->pgd_created
= true;
35 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
36 * a page table (ri == 4). Returns a suitable region/segment table entry
37 * which points to the freshly allocated pages.
39 static uint64_t virt_alloc_region(struct kvm_vm
*vm
, int ri
, uint32_t memslot
)
43 taddr
= vm_phy_pages_alloc(vm
, ri
< 4 ? PAGES_PER_REGION
: 1,
44 KVM_GUEST_PAGE_TABLE_MIN_PADDR
, memslot
);
45 memset(addr_gpa2hva(vm
, taddr
), 0xff, PAGES_PER_REGION
* vm
->page_size
);
47 return (taddr
& REGION_ENTRY_ORIGIN
)
48 | (((4 - ri
) << 2) & REGION_ENTRY_TYPE
)
49 | ((ri
< 4 ? (PAGES_PER_REGION
- 1) : 0) & REGION_ENTRY_LENGTH
);
52 void virt_pg_map(struct kvm_vm
*vm
, uint64_t gva
, uint64_t gpa
,
58 TEST_ASSERT((gva
% vm
->page_size
) == 0,
59 "Virtual address not on page boundary,\n"
60 " vaddr: 0x%lx vm->page_size: 0x%x",
62 TEST_ASSERT(sparsebit_is_set(vm
->vpages_valid
,
63 (gva
>> vm
->page_shift
)),
64 "Invalid virtual address, vaddr: 0x%lx",
66 TEST_ASSERT((gpa
% vm
->page_size
) == 0,
67 "Physical address not on page boundary,\n"
68 " paddr: 0x%lx vm->page_size: 0x%x",
70 TEST_ASSERT((gpa
>> vm
->page_shift
) <= vm
->max_gfn
,
71 "Physical address beyond beyond maximum supported,\n"
72 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
73 gva
, vm
->max_gfn
, vm
->page_size
);
75 /* Walk through region and segment tables */
76 entry
= addr_gpa2hva(vm
, vm
->pgd
);
77 for (ri
= 1; ri
<= 4; ri
++) {
78 idx
= (gva
>> (64 - 11 * ri
)) & 0x7ffu
;
79 if (entry
[idx
] & REGION_ENTRY_INVALID
)
80 entry
[idx
] = virt_alloc_region(vm
, ri
, memslot
);
81 entry
= addr_gpa2hva(vm
, entry
[idx
] & REGION_ENTRY_ORIGIN
);
84 /* Fill in page table entry */
85 idx
= (gva
>> 12) & 0x0ffu
; /* page index */
86 if (!(entry
[idx
] & PAGE_INVALID
))
88 "WARNING: PTE for gpa=0x%"PRIx64
" already set!\n", gpa
);
92 vm_paddr_t
addr_gva2gpa(struct kvm_vm
*vm
, vm_vaddr_t gva
)
97 TEST_ASSERT(vm
->page_size
== 4096, "Unsupported page size: 0x%x",
100 entry
= addr_gpa2hva(vm
, vm
->pgd
);
101 for (ri
= 1; ri
<= 4; ri
++) {
102 idx
= (gva
>> (64 - 11 * ri
)) & 0x7ffu
;
103 TEST_ASSERT(!(entry
[idx
] & REGION_ENTRY_INVALID
),
104 "No region mapping for vm virtual address 0x%lx",
106 entry
= addr_gpa2hva(vm
, entry
[idx
] & REGION_ENTRY_ORIGIN
);
109 idx
= (gva
>> 12) & 0x0ffu
; /* page index */
111 TEST_ASSERT(!(entry
[idx
] & PAGE_INVALID
),
112 "No page mapping for vm virtual address 0x%lx", gva
);
114 return (entry
[idx
] & ~0xffful
) + (gva
& 0xffful
);
117 static void virt_dump_ptes(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
,
122 for (ptea
= ptea_start
; ptea
< ptea_start
+ 0x100 * 8; ptea
+= 8) {
123 pte
= addr_gpa2hva(vm
, ptea
);
124 if (*pte
& PAGE_INVALID
)
126 fprintf(stream
, "%*spte @ 0x%lx: 0x%016lx\n",
127 indent
, "", ptea
, *pte
);
131 static void virt_dump_region(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
,
132 uint64_t reg_tab_addr
)
134 uint64_t addr
, *entry
;
136 for (addr
= reg_tab_addr
; addr
< reg_tab_addr
+ 0x400 * 8; addr
+= 8) {
137 entry
= addr_gpa2hva(vm
, addr
);
138 if (*entry
& REGION_ENTRY_INVALID
)
140 fprintf(stream
, "%*srt%lde @ 0x%lx: 0x%016lx\n",
141 indent
, "", 4 - ((*entry
& REGION_ENTRY_TYPE
) >> 2),
143 if (*entry
& REGION_ENTRY_TYPE
) {
144 virt_dump_region(stream
, vm
, indent
+ 2,
145 *entry
& REGION_ENTRY_ORIGIN
);
147 virt_dump_ptes(stream
, vm
, indent
+ 2,
148 *entry
& REGION_ENTRY_ORIGIN
);
153 void virt_dump(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
)
155 if (!vm
->pgd_created
)
158 virt_dump_region(stream
, vm
, indent
, vm
->pgd
);
161 void vm_vcpu_add_default(struct kvm_vm
*vm
, uint32_t vcpuid
, void *guest_code
)
163 size_t stack_size
= DEFAULT_STACK_PGS
* getpagesize();
164 uint64_t stack_vaddr
;
165 struct kvm_regs regs
;
166 struct kvm_sregs sregs
;
169 TEST_ASSERT(vm
->page_size
== 4096, "Unsupported page size: 0x%x",
172 stack_vaddr
= vm_vaddr_alloc(vm
, stack_size
,
173 DEFAULT_GUEST_STACK_VADDR_MIN
, 0, 0);
175 vm_vcpu_add(vm
, vcpuid
);
177 /* Setup guest registers */
178 vcpu_regs_get(vm
, vcpuid
, ®s
);
179 regs
.gprs
[15] = stack_vaddr
+ (DEFAULT_STACK_PGS
* getpagesize()) - 160;
180 vcpu_regs_set(vm
, vcpuid
, ®s
);
182 vcpu_sregs_get(vm
, vcpuid
, &sregs
);
183 sregs
.crs
[0] |= 0x00040000; /* Enable floating point regs */
184 sregs
.crs
[1] = vm
->pgd
| 0xf; /* Primary region table */
185 vcpu_sregs_set(vm
, vcpuid
, &sregs
);
187 run
= vcpu_state(vm
, vcpuid
);
188 run
->psw_mask
= 0x0400000180000000ULL
; /* DAT enabled + 64 bit mode */
189 run
->psw_addr
= (uintptr_t)guest_code
;
192 void vcpu_args_set(struct kvm_vm
*vm
, uint32_t vcpuid
, unsigned int num
, ...)
195 struct kvm_regs regs
;
198 TEST_ASSERT(num
>= 1 && num
<= 5, "Unsupported number of args,\n"
203 vcpu_regs_get(vm
, vcpuid
, ®s
);
205 for (i
= 0; i
< num
; i
++)
206 regs
.gprs
[i
+ 2] = va_arg(ap
, uint64_t);
208 vcpu_regs_set(vm
, vcpuid
, ®s
);
212 void vcpu_dump(FILE *stream
, struct kvm_vm
*vm
, uint32_t vcpuid
, uint8_t indent
)
214 struct vcpu
*vcpu
= vcpu_find(vm
, vcpuid
);
219 fprintf(stream
, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
220 indent
, "", vcpu
->state
->psw_mask
, vcpu
->state
->psw_addr
);
223 void assert_on_unhandled_exception(struct kvm_vm
*vm
, uint32_t vcpuid
)