1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
4 declare half @llvm.amdgcn.sin.f16(half) #0
5 declare float @llvm.amdgcn.sin.f32(float) #0
6 declare double @llvm.amdgcn.sin.f64(double) #0
8 define void @test_f16(ptr %p) {
9 ; CHECK-LABEL: @test_f16(
10 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P:%.*]], align 2
11 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P]], align 2
12 ; CHECK-NEXT: store volatile half 0xH39A8, ptr [[P]], align 2
13 ; CHECK-NEXT: store volatile half 0xHB9A8, ptr [[P]], align 2
14 ; CHECK-NEXT: store volatile half 0xH3C00, ptr [[P]], align 2
15 ; CHECK-NEXT: store volatile half 0xHBC00, ptr [[P]], align 2
16 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P]], align 2
17 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P]], align 2
18 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P]], align 2
19 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P]], align 2
20 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P]], align 2
21 ; CHECK-NEXT: store volatile half 0xH0000, ptr [[P]], align 2
22 ; CHECK-NEXT: [[P1000:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH63D0)
23 ; CHECK-NEXT: store volatile half [[P1000]], ptr [[P]], align 2
24 ; CHECK-NEXT: [[N1000:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xHE3D0)
25 ; CHECK-NEXT: store volatile half [[N1000]], ptr [[P]], align 2
26 ; CHECK-NEXT: [[PINF:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH7C00)
27 ; CHECK-NEXT: store volatile half [[PINF]], ptr [[P]], align 2
28 ; CHECK-NEXT: [[NINF:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xHFC00)
29 ; CHECK-NEXT: store volatile half [[NINF]], ptr [[P]], align 2
30 ; CHECK-NEXT: [[NAN:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH7E00)
31 ; CHECK-NEXT: store volatile half [[NAN]], ptr [[P]], align 2
32 ; CHECK-NEXT: ret void
34 %p0 = call half @llvm.amdgcn.sin.f16(half +0.0)
35 store volatile half %p0, ptr %p
36 %n0 = call half @llvm.amdgcn.sin.f16(half -0.0)
37 store volatile half %n0, ptr %p
38 %p0125 = call half @llvm.amdgcn.sin.f16(half +0.125)
39 store volatile half %p0125, ptr %p
40 %n0125 = call half @llvm.amdgcn.sin.f16(half -0.125)
41 store volatile half %n0125, ptr %p
42 %p025 = call half @llvm.amdgcn.sin.f16(half +0.25)
43 store volatile half %p025, ptr %p
44 %n025 = call half @llvm.amdgcn.sin.f16(half -0.25)
45 store volatile half %n025, ptr %p
46 %p05 = call half @llvm.amdgcn.sin.f16(half +0.5)
47 store volatile half %p05, ptr %p
48 %n05 = call half @llvm.amdgcn.sin.f16(half -0.5)
49 store volatile half %n05, ptr %p
50 %p1 = call half @llvm.amdgcn.sin.f16(half +1.0)
51 store volatile half %p1, ptr %p
52 %n1 = call half @llvm.amdgcn.sin.f16(half -1.0)
53 store volatile half %n1, ptr %p
54 %p256 = call half @llvm.amdgcn.sin.f16(half +256.0)
55 store volatile half %p256, ptr %p
56 %n256 = call half @llvm.amdgcn.sin.f16(half -256.0)
57 store volatile half %n256, ptr %p
58 %p1000 = call half @llvm.amdgcn.sin.f16(half +1000.0)
59 store volatile half %p1000, ptr %p
60 %n1000 = call half @llvm.amdgcn.sin.f16(half -1000.0)
61 store volatile half %n1000, ptr %p
62 %pinf = call half @llvm.amdgcn.sin.f16(half 0xH7C00) ; +inf
63 store volatile half %pinf, ptr %p
64 %ninf = call half @llvm.amdgcn.sin.f16(half 0xHFC00) ; -inf
65 store volatile half %ninf, ptr %p
66 %nan = call half @llvm.amdgcn.sin.f16(half 0xH7E00) ; nan
67 store volatile half %nan, ptr %p
71 define void @test_f32(ptr %p) {
72 ; CHECK-LABEL: @test_f32(
73 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P:%.*]], align 4
74 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P]], align 4
75 ; CHECK-NEXT: store volatile float 0x3FE6A09E60000000, ptr [[P]], align 4
76 ; CHECK-NEXT: store volatile float 0xBFE6A09E60000000, ptr [[P]], align 4
77 ; CHECK-NEXT: store volatile float 1.000000e+00, ptr [[P]], align 4
78 ; CHECK-NEXT: store volatile float -1.000000e+00, ptr [[P]], align 4
79 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P]], align 4
80 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P]], align 4
81 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P]], align 4
82 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P]], align 4
83 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P]], align 4
84 ; CHECK-NEXT: store volatile float 0.000000e+00, ptr [[P]], align 4
85 ; CHECK-NEXT: [[P1000:%.*]] = call float @llvm.amdgcn.sin.f32(float 1.000000e+03)
86 ; CHECK-NEXT: store volatile float [[P1000]], ptr [[P]], align 4
87 ; CHECK-NEXT: [[N1000:%.*]] = call float @llvm.amdgcn.sin.f32(float -1.000000e+03)
88 ; CHECK-NEXT: store volatile float [[N1000]], ptr [[P]], align 4
89 ; CHECK-NEXT: [[PINF:%.*]] = call float @llvm.amdgcn.sin.f32(float 0x7FF0000000000000)
90 ; CHECK-NEXT: store volatile float [[PINF]], ptr [[P]], align 4
91 ; CHECK-NEXT: [[NINF:%.*]] = call float @llvm.amdgcn.sin.f32(float 0xFFF0000000000000)
92 ; CHECK-NEXT: store volatile float [[NINF]], ptr [[P]], align 4
93 ; CHECK-NEXT: [[NAN:%.*]] = call float @llvm.amdgcn.sin.f32(float 0x7FF8000000000000)
94 ; CHECK-NEXT: store volatile float [[NAN]], ptr [[P]], align 4
95 ; CHECK-NEXT: ret void
97 %p0 = call float @llvm.amdgcn.sin.f32(float +0.0)
98 store volatile float %p0, ptr %p
99 %n0 = call float @llvm.amdgcn.sin.f32(float -0.0)
100 store volatile float %n0, ptr %p
101 %p0125 = call float @llvm.amdgcn.sin.f32(float +0.125)
102 store volatile float %p0125, ptr %p
103 %n0125 = call float @llvm.amdgcn.sin.f32(float -0.125)
104 store volatile float %n0125, ptr %p
105 %p025 = call float @llvm.amdgcn.sin.f32(float +0.25)
106 store volatile float %p025, ptr %p
107 %n025 = call float @llvm.amdgcn.sin.f32(float -0.25)
108 store volatile float %n025, ptr %p
109 %p05 = call float @llvm.amdgcn.sin.f32(float +0.5)
110 store volatile float %p05, ptr %p
111 %n05 = call float @llvm.amdgcn.sin.f32(float -0.5)
112 store volatile float %n05, ptr %p
113 %p1 = call float @llvm.amdgcn.sin.f32(float +1.0)
114 store volatile float %p1, ptr %p
115 %n1 = call float @llvm.amdgcn.sin.f32(float -1.0)
116 store volatile float %n1, ptr %p
117 %p256 = call float @llvm.amdgcn.sin.f32(float +256.0)
118 store volatile float %p256, ptr %p
119 %n256 = call float @llvm.amdgcn.sin.f32(float -256.0)
120 store volatile float %n256, ptr %p
121 %p1000 = call float @llvm.amdgcn.sin.f32(float +1000.0)
122 store volatile float %p1000, ptr %p
123 %n1000 = call float @llvm.amdgcn.sin.f32(float -1000.0)
124 store volatile float %n1000, ptr %p
125 %pinf = call float @llvm.amdgcn.sin.f32(float 0x7FF0000000000000) ; +inf
126 store volatile float %pinf, ptr %p
127 %ninf = call float @llvm.amdgcn.sin.f32(float 0xFFF0000000000000) ; -inf
128 store volatile float %ninf, ptr %p
129 %nan = call float @llvm.amdgcn.sin.f32(float 0x7FF8000000000000) ; nan
130 store volatile float %nan, ptr %p
134 define void @test_f64(ptr %p) {
135 ; CHECK-LABEL: @test_f64(
136 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P:%.*]], align 8
137 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P]], align 8
138 ; CHECK-NEXT: store volatile double 0x3FE6A09E667F3B{{.*}}, ptr [[P]], align 8
139 ; CHECK-NEXT: store volatile double 0xBFE6A09E667F3B{{.*}}, ptr [[P]], align 8
140 ; CHECK-NEXT: store volatile double 1.000000e+00, ptr [[P]], align 8
141 ; CHECK-NEXT: store volatile double -1.000000e+00, ptr [[P]], align 8
142 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P]], align 8
143 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P]], align 8
144 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P]], align 8
145 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P]], align 8
146 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P]], align 8
147 ; CHECK-NEXT: store volatile double 0.000000e+00, ptr [[P]], align 8
148 ; CHECK-NEXT: [[P1000:%.*]] = call double @llvm.amdgcn.sin.f64(double 1.000000e+03)
149 ; CHECK-NEXT: store volatile double [[P1000]], ptr [[P]], align 8
150 ; CHECK-NEXT: [[N1000:%.*]] = call double @llvm.amdgcn.sin.f64(double -1.000000e+03)
151 ; CHECK-NEXT: store volatile double [[N1000]], ptr [[P]], align 8
152 ; CHECK-NEXT: [[PINF:%.*]] = call double @llvm.amdgcn.sin.f64(double 0x7FF0000000000000)
153 ; CHECK-NEXT: store volatile double [[PINF]], ptr [[P]], align 8
154 ; CHECK-NEXT: [[NINF:%.*]] = call double @llvm.amdgcn.sin.f64(double 0xFFF0000000000000)
155 ; CHECK-NEXT: store volatile double [[NINF]], ptr [[P]], align 8
156 ; CHECK-NEXT: [[NAN:%.*]] = call double @llvm.amdgcn.sin.f64(double 0x7FF8000000000000)
157 ; CHECK-NEXT: store volatile double [[NAN]], ptr [[P]], align 8
158 ; CHECK-NEXT: ret void
160 %p0 = call double @llvm.amdgcn.sin.f64(double +0.0)
161 store volatile double %p0, ptr %p
162 %n0 = call double @llvm.amdgcn.sin.f64(double -0.0)
163 store volatile double %n0, ptr %p
164 %p0125 = call double @llvm.amdgcn.sin.f64(double +0.125)
165 store volatile double %p0125, ptr %p
166 %n0125 = call double @llvm.amdgcn.sin.f64(double -0.125)
167 store volatile double %n0125, ptr %p
168 %p025 = call double @llvm.amdgcn.sin.f64(double +0.25)
169 store volatile double %p025, ptr %p
170 %n025 = call double @llvm.amdgcn.sin.f64(double -0.25)
171 store volatile double %n025, ptr %p
172 %p05 = call double @llvm.amdgcn.sin.f64(double +0.5)
173 store volatile double %p05, ptr %p
174 %n05 = call double @llvm.amdgcn.sin.f64(double -0.5)
175 store volatile double %n05, ptr %p
176 %p1 = call double @llvm.amdgcn.sin.f64(double +1.0)
177 store volatile double %p1, ptr %p
178 %n1 = call double @llvm.amdgcn.sin.f64(double -1.0)
179 store volatile double %n1, ptr %p
180 %p256 = call double @llvm.amdgcn.sin.f64(double +256.0)
181 store volatile double %p256, ptr %p
182 %n256 = call double @llvm.amdgcn.sin.f64(double -256.0)
183 store volatile double %n256, ptr %p
184 %p1000 = call double @llvm.amdgcn.sin.f64(double +1000.0)
185 store volatile double %p1000, ptr %p
186 %n1000 = call double @llvm.amdgcn.sin.f64(double -1000.0)
187 store volatile double %n1000, ptr %p
188 %pinf = call double @llvm.amdgcn.sin.f64(double 0x7FF0000000000000) ; +inf
189 store volatile double %pinf, ptr %p
190 %ninf = call double @llvm.amdgcn.sin.f64(double 0xFFF0000000000000) ; -inf
191 store volatile double %ninf, ptr %p
192 %nan = call double @llvm.amdgcn.sin.f64(double 0x7FF8000000000000) ; nan
193 store volatile double %nan, ptr %p
197 define void @test_f16_strictfp (ptr %p) #1 {
198 ; CHECK-LABEL: @test_f16_strictfp(
199 ; CHECK-NEXT: [[P0:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH0000) #1
200 ; CHECK-NEXT: store volatile half [[P0]], ptr [[P:%.*]], align 2
201 ; CHECK-NEXT: [[P025:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH3400) #1
202 ; CHECK-NEXT: store volatile half [[P025]], ptr [[P]], align 2
203 ; CHECK-NEXT: ret void
205 %p0 = call half @llvm.amdgcn.sin.f16(half +0.0) #1
206 store volatile half %p0, ptr %p
207 %p025 = call half @llvm.amdgcn.sin.f16(half +0.25) #1
208 store volatile half %p025, ptr %p
212 define void @test_f32_strictfp(ptr %p) #1 {
213 ; CHECK-LABEL: @test_f32_strictfp(
214 ; CHECK-NEXT: [[P0:%.*]] = call float @llvm.amdgcn.sin.f32(float 0.000000e+00) #1
215 ; CHECK-NEXT: store volatile float [[P0]], ptr [[P:%.*]], align 4
216 ; CHECK-NEXT: [[P025:%.*]] = call float @llvm.amdgcn.sin.f32(float 2.500000e-01) #1
217 ; CHECK-NEXT: store volatile float [[P025]], ptr [[P]], align 4
218 ; CHECK-NEXT: ret void
220 %p0 = call float @llvm.amdgcn.sin.f32(float +0.0) #1
221 store volatile float %p0, ptr %p
222 %p025 = call float @llvm.amdgcn.sin.f32(float +0.25) #1
223 store volatile float %p025, ptr %p
227 define void @test_f64_strictfp(ptr %p) #1 {
228 ; CHECK-LABEL: @test_f64_strictfp(
229 ; CHECK-NEXT: [[P0:%.*]] = call double @llvm.amdgcn.sin.f64(double 0.000000e+00) #1
230 ; CHECK-NEXT: store volatile double [[P0]], ptr [[P:%.*]], align 8
231 ; CHECK-NEXT: [[P025:%.*]] = call double @llvm.amdgcn.sin.f64(double 2.500000e-01) #1
232 ; CHECK-NEXT: store volatile double [[P025]], ptr [[P]], align 8
233 ; CHECK-NEXT: ret void
235 %p0 = call double @llvm.amdgcn.sin.f64(double +0.0) #1
236 store volatile double %p0, ptr %p
237 %p025 = call double @llvm.amdgcn.sin.f64(double +0.25) #1
238 store volatile double %p025, ptr %p
242 attributes #0 = { nounwind readnone speculatable }
243 attributes #1 = { strictfp }