1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2021, Intel, Inc.
7 * Tests for amx #NM exception and save/restore.
13 #include <sys/ioctl.h>
14 #include <sys/syscall.h>
16 #include "test_util.h"
19 #include "processor.h"
23 # error This test is 64-bit only
27 #define TILE_SIZE 1024
28 #define XSAVE_SIZE ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
30 /* Tile configuration associated: */
31 #define PALETTE_TABLE_INDEX 1
33 #define RESERVED_BYTES 14
35 #define XSAVE_HDR_OFFSET 512
40 u8 reserved
[RESERVED_BYTES
];
46 u8 data
[NUM_TILES
* TILE_SIZE
];
58 static struct xtile_info xtile
;
60 static inline void __ldtilecfg(void *cfg
)
62 asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00"
66 static inline void __tileloadd(void *tile
)
68 asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10"
69 : : "a"(tile
), "d"(0));
72 static inline void __tilerelease(void)
74 asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
77 static inline void __xsavec(struct xstate
*xstate
, uint64_t rfbm
)
79 uint32_t rfbm_lo
= rfbm
;
80 uint32_t rfbm_hi
= rfbm
>> 32;
82 asm volatile("xsavec (%%rdi)"
83 : : "D" (xstate
), "a" (rfbm_lo
), "d" (rfbm_hi
)
87 static void check_xtile_info(void)
89 GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE
) == XFEATURE_MASK_XTILE
);
91 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0
));
92 GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0
) <= XSAVE_SIZE
);
94 xtile
.xsave_offset
= this_cpu_property(X86_PROPERTY_XSTATE_TILE_OFFSET
);
95 GUEST_ASSERT(xtile
.xsave_offset
== 2816);
96 xtile
.xsave_size
= this_cpu_property(X86_PROPERTY_XSTATE_TILE_SIZE
);
97 GUEST_ASSERT(xtile
.xsave_size
== 8192);
98 GUEST_ASSERT(sizeof(struct tile_data
) >= xtile
.xsave_size
);
100 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_MAX_PALETTE_TABLES
));
101 GUEST_ASSERT(this_cpu_property(X86_PROPERTY_AMX_MAX_PALETTE_TABLES
) >=
102 PALETTE_TABLE_INDEX
);
104 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_NR_TILE_REGS
));
105 xtile
.max_names
= this_cpu_property(X86_PROPERTY_AMX_NR_TILE_REGS
);
106 GUEST_ASSERT(xtile
.max_names
== 8);
107 xtile
.bytes_per_tile
= this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_TILE
);
108 GUEST_ASSERT(xtile
.bytes_per_tile
== 1024);
109 xtile
.bytes_per_row
= this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_ROW
);
110 GUEST_ASSERT(xtile
.bytes_per_row
== 64);
111 xtile
.max_rows
= this_cpu_property(X86_PROPERTY_AMX_MAX_ROWS
);
112 GUEST_ASSERT(xtile
.max_rows
== 16);
115 static void set_tilecfg(struct tile_config
*cfg
)
119 /* Only palette id 1 */
121 for (i
= 0; i
< xtile
.max_names
; i
++) {
122 cfg
->colsb
[i
] = xtile
.bytes_per_row
;
123 cfg
->rows
[i
] = xtile
.max_rows
;
127 static void __attribute__((__flatten__
)) guest_code(struct tile_config
*amx_cfg
,
128 struct tile_data
*tiledata
,
129 struct xstate
*xstate
)
131 GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE
) &&
132 this_cpu_has(X86_FEATURE_OSXSAVE
));
136 /* xfd=0, enable amx */
137 wrmsr(MSR_IA32_XFD
, 0);
139 GUEST_ASSERT(rdmsr(MSR_IA32_XFD
) == 0);
140 set_tilecfg(amx_cfg
);
141 __ldtilecfg(amx_cfg
);
143 /* Check save/restore when trap to userspace */
144 __tileloadd(tiledata
);
149 * After XSAVEC, XTILEDATA is cleared in the xstate_bv but is set in
152 xstate
->header
.xstate_bv
= XFEATURE_MASK_XTILE_DATA
;
153 __xsavec(xstate
, XFEATURE_MASK_XTILE_DATA
);
154 GUEST_ASSERT(!(xstate
->header
.xstate_bv
& XFEATURE_MASK_XTILE_DATA
));
155 GUEST_ASSERT(xstate
->header
.xcomp_bv
& XFEATURE_MASK_XTILE_DATA
);
157 /* xfd=0x40000, disable amx tiledata */
158 wrmsr(MSR_IA32_XFD
, XFEATURE_MASK_XTILE_DATA
);
161 * XTILEDATA is cleared in xstate_bv but set in xcomp_bv, this property
162 * remains the same even when amx tiledata is disabled by IA32_XFD.
164 xstate
->header
.xstate_bv
= XFEATURE_MASK_XTILE_DATA
;
165 __xsavec(xstate
, XFEATURE_MASK_XTILE_DATA
);
166 GUEST_ASSERT(!(xstate
->header
.xstate_bv
& XFEATURE_MASK_XTILE_DATA
));
167 GUEST_ASSERT((xstate
->header
.xcomp_bv
& XFEATURE_MASK_XTILE_DATA
));
170 GUEST_ASSERT(rdmsr(MSR_IA32_XFD
) == XFEATURE_MASK_XTILE_DATA
);
171 set_tilecfg(amx_cfg
);
172 __ldtilecfg(amx_cfg
);
173 /* Trigger #NM exception */
174 __tileloadd(tiledata
);
180 void guest_nm_handler(struct ex_regs
*regs
)
182 /* Check if #NM is triggered by XFEATURE_MASK_XTILE_DATA */
184 GUEST_ASSERT(!(get_cr0() & X86_CR0_TS
));
185 GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR
) == XFEATURE_MASK_XTILE_DATA
);
186 GUEST_ASSERT(rdmsr(MSR_IA32_XFD
) == XFEATURE_MASK_XTILE_DATA
);
188 GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR
) == XFEATURE_MASK_XTILE_DATA
);
189 GUEST_ASSERT(rdmsr(MSR_IA32_XFD
) == XFEATURE_MASK_XTILE_DATA
);
191 wrmsr(MSR_IA32_XFD_ERR
, 0);
192 /* xfd=0, enable amx */
193 wrmsr(MSR_IA32_XFD
, 0);
197 int main(int argc
, char *argv
[])
199 struct kvm_regs regs1
, regs2
;
200 struct kvm_vcpu
*vcpu
;
202 struct kvm_x86_state
*state
;
203 int xsave_restore_size
;
204 vm_vaddr_t amx_cfg
, tiledata
, xstate
;
210 * Note, all off-by-default features must be enabled before anything
211 * caches KVM_GET_SUPPORTED_CPUID, e.g. before using kvm_cpu_has().
213 vm_xsave_require_permission(XFEATURE_MASK_XTILE_DATA
);
215 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD
));
216 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE
));
217 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_AMX_TILE
));
218 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG
));
219 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA
));
220 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA_XFD
));
223 vm
= vm_create_with_one_vcpu(&vcpu
, guest_code
);
225 TEST_ASSERT(kvm_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE
),
226 "KVM should enumerate max XSAVE size when XSAVE is supported");
227 xsave_restore_size
= kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE
);
229 vcpu_regs_get(vcpu
, ®s1
);
231 /* Register #NM handler */
232 vm_install_exception_handler(vm
, NM_VECTOR
, guest_nm_handler
);
234 /* amx cfg for guest_code */
235 amx_cfg
= vm_vaddr_alloc_page(vm
);
236 memset(addr_gva2hva(vm
, amx_cfg
), 0x0, getpagesize());
238 /* amx tiledata for guest_code */
239 tiledata
= vm_vaddr_alloc_pages(vm
, 2);
240 memset(addr_gva2hva(vm
, tiledata
), rand() | 1, 2 * getpagesize());
242 /* XSAVE state for guest_code */
243 xstate
= vm_vaddr_alloc_pages(vm
, DIV_ROUND_UP(XSAVE_SIZE
, PAGE_SIZE
));
244 memset(addr_gva2hva(vm
, xstate
), 0, PAGE_SIZE
* DIV_ROUND_UP(XSAVE_SIZE
, PAGE_SIZE
));
245 vcpu_args_set(vcpu
, 3, amx_cfg
, tiledata
, xstate
);
249 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
251 switch (get_ucall(vcpu
, &uc
)) {
253 REPORT_GUEST_ASSERT(uc
);
256 switch (uc
.args
[1]) {
264 fprintf(stderr
, "GUEST_SYNC(%ld)\n", uc
.args
[1]);
269 "GUEST_SYNC(%ld), check save/restore status\n", uc
.args
[1]);
271 /* Compacted mode, get amx offset by xsave area
272 * size subtract 8K amx size.
274 amx_offset
= xsave_restore_size
- NUM_TILES
*TILE_SIZE
;
275 state
= vcpu_save_state(vcpu
);
276 void *amx_start
= (void *)state
->xsave
+ amx_offset
;
277 void *tiles_data
= (void *)addr_gva2hva(vm
, tiledata
);
278 /* Only check TMM0 register, 1 tile */
279 ret
= memcmp(amx_start
, tiles_data
, TILE_SIZE
);
280 TEST_ASSERT(ret
== 0, "memcmp failed, ret=%d", ret
);
281 kvm_x86_state_cleanup(state
);
285 "GUEST_SYNC(%ld), #NM exception and enable amx\n", uc
.args
[1]);
290 fprintf(stderr
, "UCALL_DONE\n");
293 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
296 state
= vcpu_save_state(vcpu
);
297 memset(®s1
, 0, sizeof(regs1
));
298 vcpu_regs_get(vcpu
, ®s1
);
302 /* Restore state in a new VM. */
303 vcpu
= vm_recreate_with_one_vcpu(vm
);
304 vcpu_load_state(vcpu
, state
);
305 kvm_x86_state_cleanup(state
);
307 memset(®s2
, 0, sizeof(regs2
));
308 vcpu_regs_get(vcpu
, ®s2
);
309 TEST_ASSERT(!memcmp(®s1
, ®s2
, sizeof(regs2
)),
310 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
311 (ulong
) regs2
.rdi
, (ulong
) regs2
.rsi
);