[MLIR][TOSA] Update CustomOp input and output names (#118408)
[llvm-project.git] / clang / test / CodeGen / X86 / x86_64-xsave.c
blob19b908b63ebdeaf509d8da4f0733669c4f2a0b72
1 // RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVE
2 // RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVE
4 // RUN: %clang_cc1 %s -DTEST_XGETBV -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XGETBV
5 // RUN: %clang_cc1 %s -DTEST_XSETBV -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSETBV
7 // RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -target-feature +xsaveopt -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVEOPT
8 // RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -target-feature +xsaveopt -fno-signed-char -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVEOPT
10 // RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -target-feature +xsavec -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVEC
11 // RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -target-feature +xsavec -fno-signed-char -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVEC
13 // RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -target-feature +xsaves -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVES
14 // RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -target-feature +xsaves -fno-signed-char -emit-llvm -o - -Wall -Wno-unused-but-set-variable -Werror | FileCheck %s --check-prefix=XSAVES
16 // Don't include mm_malloc.h, it's system specific.
17 #define __MM_MALLOC_H
18 #include <x86intrin.h>
21 void test(void) {
22 unsigned long long tmp_ULLi;
23 unsigned int tmp_Ui;
24 void* tmp_vp;
25 tmp_ULLi = 0; tmp_Ui = 0; tmp_vp = 0;
27 #ifdef TEST_XSAVE
28 // XSAVE: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
29 // XSAVE: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
30 // XSAVE: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
31 // XSAVE: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
32 // XSAVE: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
33 // XSAVE: call void @llvm.x86.xsave(ptr [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
34 (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi);
36 // XSAVE: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
37 // XSAVE: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
38 // XSAVE: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
39 // XSAVE: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
40 // XSAVE: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
41 // XSAVE: call void @llvm.x86.xsave64(ptr [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
42 (void)__builtin_ia32_xsave64(tmp_vp, tmp_ULLi);
44 // XSAVE: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
45 // XSAVE: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
46 // XSAVE: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
47 // XSAVE: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
48 // XSAVE: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
49 // XSAVE: call void @llvm.x86.xrstor(ptr [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
50 (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi);
52 // XSAVE: [[tmp_vp_4:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
53 // XSAVE: [[tmp_ULLi_4:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
54 // XSAVE: [[high64_4:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_4]], 32
55 // XSAVE: [[high32_4:%[0-9a-zA-Z]+]] = trunc i64 [[high64_4]] to i32
56 // XSAVE: [[low32_4:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_4]] to i32
57 // XSAVE: call void @llvm.x86.xrstor64(ptr [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]])
58 (void)__builtin_ia32_xrstor64(tmp_vp, tmp_ULLi);
60 // XSAVE: call void @llvm.x86.xsave
61 (void)_xsave(tmp_vp, tmp_ULLi);
63 // XSAVE: call void @llvm.x86.xsave64
64 (void)_xsave64(tmp_vp, tmp_ULLi);
66 // XSAVE: call void @llvm.x86.xrstor
67 (void)_xrstor(tmp_vp, tmp_ULLi);
69 // XSAVE: call void @llvm.x86.xrstor64
70 (void)_xrstor64(tmp_vp, tmp_ULLi);
71 #endif
73 #ifdef TEST_XSAVEOPT
74 // XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
75 // XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
76 // XSAVEOPT: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
77 // XSAVEOPT: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
78 // XSAVEOPT: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
79 // XSAVEOPT: call void @llvm.x86.xsaveopt(ptr [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
80 (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi);
82 // XSAVEOPT: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
83 // XSAVEOPT: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
84 // XSAVEOPT: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
85 // XSAVEOPT: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
86 // XSAVEOPT: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
87 // XSAVEOPT: call void @llvm.x86.xsaveopt64(ptr [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
88 (void)__builtin_ia32_xsaveopt64(tmp_vp, tmp_ULLi);
90 // XSAVEOPT: call void @llvm.x86.xsaveopt
91 (void)_xsaveopt(tmp_vp, tmp_ULLi);
93 // XSAVEOPT: call void @llvm.x86.xsaveopt64
94 (void)_xsaveopt64(tmp_vp, tmp_ULLi);
95 #endif
97 #ifdef TEST_XSAVEC
98 // XSAVEC: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
99 // XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
100 // XSAVEC: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
101 // XSAVEC: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
102 // XSAVEC: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
103 // XSAVEC: call void @llvm.x86.xsavec(ptr [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
104 (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi);
106 // XSAVEC: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
107 // XSAVEC: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
108 // XSAVEC: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
109 // XSAVEC: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
110 // XSAVEC: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
111 // XSAVEC: call void @llvm.x86.xsavec64(ptr [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
112 (void)__builtin_ia32_xsavec64(tmp_vp, tmp_ULLi);
114 // XSAVEC: call void @llvm.x86.xsavec
115 (void)_xsavec(tmp_vp, tmp_ULLi);
117 // XSAVEC: call void @llvm.x86.xsavec64
118 (void)_xsavec64(tmp_vp, tmp_ULLi);
119 #endif
121 #ifdef TEST_XSAVES
122 // XSAVES: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
123 // XSAVES: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
124 // XSAVES: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
125 // XSAVES: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
126 // XSAVES: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
127 // XSAVES: call void @llvm.x86.xsaves(ptr [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
128 (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi);
130 // XSAVES: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
131 // XSAVES: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
132 // XSAVES: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
133 // XSAVES: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
134 // XSAVES: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
135 // XSAVES: call void @llvm.x86.xsaves64(ptr [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
136 (void)__builtin_ia32_xsaves64(tmp_vp, tmp_ULLi);
138 // XSAVES: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
139 // XSAVES: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
140 // XSAVES: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
141 // XSAVES: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
142 // XSAVES: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
143 // XSAVES: call void @llvm.x86.xrstors(ptr [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
144 (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi);
146 // XSAVES: [[tmp_vp_4:%[0-9a-zA-Z]+]] = load ptr, ptr %tmp_vp, align 8
147 // XSAVES: [[tmp_ULLi_4:%[0-9a-zA-Z]+]] = load i64, ptr %tmp_ULLi, align 8
148 // XSAVES: [[high64_4:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_4]], 32
149 // XSAVES: [[high32_4:%[0-9a-zA-Z]+]] = trunc i64 [[high64_4]] to i32
150 // XSAVES: [[low32_4:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_4]] to i32
151 // XSAVES: call void @llvm.x86.xrstors64(ptr [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]])
152 (void)__builtin_ia32_xrstors64(tmp_vp, tmp_ULLi);
154 // XSAVES: call void @llvm.x86.xsaves
155 (void)_xsaves(tmp_vp, tmp_ULLi);
157 // XSAVES: call void @llvm.x86.xsaves64
158 (void)_xsaves64(tmp_vp, tmp_ULLi);
160 // XSAVES: call void @llvm.x86.xrstors
161 (void)_xrstors(tmp_vp, tmp_ULLi);
163 // XSAVES: call void @llvm.x86.xrstors64
164 (void)_xrstors64(tmp_vp, tmp_ULLi);
165 #endif
167 #ifdef TEST_XGETBV
168 // XGETBV: [[tmp_Ui:%[0-9a-zA-z]+]] = load i32, ptr %tmp_Ui, align 4
169 // XGETBV: call i64 @llvm.x86.xgetbv(i32 [[tmp_Ui]])
170 tmp_ULLi = __builtin_ia32_xgetbv(tmp_Ui);
172 // XGETBV: call i64 @llvm.x86.xgetbv
173 tmp_ULLi = _xgetbv(tmp_Ui);
174 #endif
176 #ifdef TEST_XSETBV
177 // XSETBV: [[tmp_Ui:%[0-9a-zA-z]+]] = load i32, ptr %tmp_Ui, align 4
178 // XSETBV: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, ptr %tmp_ULLi, align 8
179 // XSETBV: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
180 // XSETBV: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32
181 // XSETBV: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
182 // XSETBV: call void @llvm.x86.xsetbv(i32 [[tmp_Ui]], i32 [[high32_3]], i32 [[low32_3]])
183 (void)__builtin_ia32_xsetbv(tmp_Ui, tmp_ULLi);
185 // XSETBV: call void @llvm.x86.xsetbv
186 (void)_xsetbv(tmp_Ui, tmp_ULLi);
187 #endif