1 ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=FUNC,GCN,SI %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=FUNC,GCN,VI %s
3 ; RUN: llc -march=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=FUNC,GCN,GFX11 %s
4 ; RUN: not llc -march=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope -check-prefixes=FUNC,R600 %s
6 ; FUNC-LABEL: {{^}}s_fneg_f32:
9 ; GCN: s_load_{{dword|b32}} [[VAL:s[0-9]+]]
10 ; GCN: s_xor_b32 [[NEG_VAL:s[0-9]+]], [[VAL]], 0x80000000
11 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, [[NEG_VAL]]
12 define amdgpu_kernel void @s_fneg_f32(ptr addrspace(1) %out, float %in) {
13 %fneg = fsub float -0.000000e+00, %in
14 store float %fneg, ptr addrspace(1) %out
18 ; FUNC-LABEL: {{^}}s_fneg_v2f32:
22 ; GCN: s_xor_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x80000000
23 ; GCN: s_xor_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x80000000
24 define amdgpu_kernel void @s_fneg_v2f32(ptr addrspace(1) nocapture %out, <2 x float> %in) {
25 %fneg = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %in
26 store <2 x float> %fneg, ptr addrspace(1) %out
30 ; FUNC-LABEL: {{^}}s_fneg_v4f32:
36 ; GCN: s_xor_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x80000000
37 ; GCN: s_xor_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x80000000
38 ; GCN: s_xor_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x80000000
39 ; GCN: s_xor_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x80000000
40 define amdgpu_kernel void @s_fneg_v4f32(ptr addrspace(1) nocapture %out, <4 x float> %in) {
41 %fneg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %in
42 store <4 x float> %fneg, ptr addrspace(1) %out
46 ; DAGCombiner will transform:
47 ; (fneg (f32 bitcast (i32 a))) => (f32 bitcast (xor (i32 a), 0x80000000))
48 ; unless the target returns true for isNegFree()
50 ; FUNC-LABEL: {{^}}fsub0_f32:
52 ; GCN: v_sub_f32_e64 v{{[0-9]}}, 0, s{{[0-9]+$}}
56 define amdgpu_kernel void @fsub0_f32(ptr addrspace(1) %out, i32 %in) {
57 %bc = bitcast i32 %in to float
58 %fsub = fsub float 0.0, %bc
59 store float %fsub, ptr addrspace(1) %out
62 ; FUNC-LABEL: {{^}}fneg_free_f32:
63 ; SI: s_load_dword [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
64 ; VI: s_load_dword [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
65 ; GFX11: s_load_b32 [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
67 ; GCN: s_xor_b32 [[RES:s[0-9]+]], [[NEG_VALUE]], 0x80000000
68 ; GCN: v_mov_b32_e32 [[V_RES:v[0-9]+]], [[RES]]
69 ; GCN: buffer_store_{{dword|b32}} [[V_RES]]
73 define amdgpu_kernel void @fneg_free_f32(ptr addrspace(1) %out, i32 %in) {
74 %bc = bitcast i32 %in to float
75 %fsub = fsub float -0.0, %bc
76 store float %fsub, ptr addrspace(1) %out
80 ; FUNC-LABEL: {{^}}fneg_fold_f32:
81 ; SI: s_load_dword [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
82 ; VI: s_load_dword [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
83 ; GFX11: s_load_{{dword|b32}} [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
85 ; GCN: v_mul_f32_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]]
86 define amdgpu_kernel void @fneg_fold_f32(ptr addrspace(1) %out, float %in) {
87 %fsub = fsub float -0.0, %in
88 %fmul = fmul float %fsub, %in
89 store float %fmul, ptr addrspace(1) %out
93 ; Make sure we turn some integer operations back into fabs
94 ; FUNC-LABEL: {{^}}bitpreserve_fneg_f32:
95 ; GCN: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -4.0
96 define amdgpu_kernel void @bitpreserve_fneg_f32(ptr addrspace(1) %out, float %in) {
97 %in.bc = bitcast float %in to i32
98 %int.abs = xor i32 %in.bc, 2147483648
99 %bc = bitcast i32 %int.abs to float
100 %fadd = fmul float %bc, 4.0
101 store float %fadd, ptr addrspace(1) %out
105 ; FUNC-LABEL: {{^}}s_fneg_i32:
106 ; GCN: s_load_{{dword|b32}} [[IN:s[0-9]+]]
107 ; GCN: s_xor_b32 [[FNEG:s[0-9]+]], [[IN]], 0x80000000
108 ; GCN: v_mov_b32_e32 [[V_FNEG:v[0-9]+]], [[FNEG]]
109 define amdgpu_kernel void @s_fneg_i32(ptr addrspace(1) %out, i32 %in) {
110 %fneg = xor i32 %in, -2147483648
111 store i32 %fneg, ptr addrspace(1) %out
115 ; FUNC-LABEL: {{^}}v_fneg_i32:
117 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
118 ; GCN-NEXT: s_setpc_b64
119 define i32 @v_fneg_i32(i32 %in) {
120 %fneg = xor i32 %in, -2147483648
124 ; FUNC-LABEL: {{^}}s_fneg_i32_fp_use:
125 ; GCN: s_load_{{dword|b32}} [[IN:s[0-9]+]]
126 ; GCN: v_sub_f32_e64 v{{[0-9]+}}, 2.0, [[IN]]
127 define amdgpu_kernel void @s_fneg_i32_fp_use(ptr addrspace(1) %out, i32 %in) {
128 %fneg = xor i32 %in, -2147483648
129 %bitcast = bitcast i32 %fneg to float
130 %fadd = fadd float %bitcast, 2.0
131 store float %fadd, ptr addrspace(1) %out
135 ; FUNC-LABEL: {{^}}v_fneg_i32_fp_use:
137 ; GCN-NEXT: v_sub_f32_e32 v0, 2.0, v0
138 ; GCN-NEXT: s_setpc_b64
139 define float @v_fneg_i32_fp_use(i32 %in) {
140 %fneg = xor i32 %in, -2147483648
141 %bitcast = bitcast i32 %fneg to float
142 %fadd = fadd float %bitcast, 2.0
146 ; FUNC-LABEL: {{^}}s_fneg_i64:
147 ; GCN: s_xor_b32 s[[NEG_HI:[0-9]+]], s{{[0-9]+}}, 0x80000000
148 define amdgpu_kernel void @s_fneg_i64(ptr addrspace(1) %out, i64 %in) {
149 %fneg = xor i64 %in, -9223372036854775808
150 store i64 %fneg, ptr addrspace(1) %out
154 ; FUNC-LABEL: {{^}}v_fneg_i64:
156 ; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
157 ; GCN-NEXT: s_setpc_b64
158 define i64 @v_fneg_i64(i64 %in) {
159 %fneg = xor i64 %in, -9223372036854775808
163 ; FUNC-LABEL: {{^}}s_fneg_i64_fp_use:
164 ; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, -s{{\[[0-9]+:[0-9]+\]}}, 2.0
165 define amdgpu_kernel void @s_fneg_i64_fp_use(ptr addrspace(1) %out, i64 %in) {
166 %fneg = xor i64 %in, -9223372036854775808
167 %bitcast = bitcast i64 %fneg to double
168 %fadd = fadd double %bitcast, 2.0
169 store double %fadd, ptr addrspace(1) %out
173 ; FUNC-LABEL: {{^}}v_fneg_i64_fp_use:
175 ; GCN-NEXT: v_add_f64 v[0:1], -v[0:1], 2.0
176 ; GCN-NEXT: s_setpc_b64
177 define double @v_fneg_i64_fp_use(i64 %in) {
178 %fneg = xor i64 %in, -9223372036854775808
179 %bitcast = bitcast i64 %fneg to double
180 %fadd = fadd double %bitcast, 2.0
184 ; FUNC-LABEL: {{^}}v_fneg_i16:
186 ; GCN-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0
187 ; GCN-NEXT: s_setpc_b64
188 define i16 @v_fneg_i16(i16 %in) {
189 %fneg = xor i16 %in, -32768
193 ; FUNC-LABEL: {{^}}s_fneg_i16_fp_use:
194 ; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], s{{[0-9]+}}
195 ; SI: v_sub_f32_e32 [[ADD:v[0-9]+]], 2.0, [[CVT0]]
196 ; SI: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], [[ADD]]
198 ; VI: s_load_dword [[IN:s[0-9]+]]
199 ; VI: v_sub_f16_e64 v{{[0-9]+}}, 2.0, [[IN]]
200 define amdgpu_kernel void @s_fneg_i16_fp_use(ptr addrspace(1) %out, i16 %in) {
201 %fneg = xor i16 %in, -32768
202 %bitcast = bitcast i16 %fneg to half
203 %fadd = fadd half %bitcast, 2.0
204 store half %fadd, ptr addrspace(1) %out
208 ; FUNC-LABEL: {{^}}v_fneg_i16_fp_use:
210 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
211 ; SI-NEXT: v_sub_f32_e32 v0, 2.0, v0
212 ; SI-NEXT: s_setpc_b64
215 ; VI-NEXT: v_sub_f16_e32 v0, 2.0, v0
216 ; VI-NEXT: s_setpc_b64
217 define half @v_fneg_i16_fp_use(i16 %in) {
218 %fneg = xor i16 %in, -32768
219 %bitcast = bitcast i16 %fneg to half
220 %fadd = fadd half %bitcast, 2.0
224 ; FUNC-LABEL: {{^}}s_fneg_v2i16:
225 ; SI: s_xor_b32 s4, s4, 0x80008000
227 ; VI: s_lshr_b32 s5, s4, 16
228 ; VI: s_xor_b32 s4, s4, 0x8000
229 ; VI: s_xor_b32 s5, s5, 0x8000
230 ; VI: s_and_b32 s4, s4, 0xffff
231 ; VI: s_lshl_b32 s5, s5, 16
232 ; VI: s_or_b32 s4, s4, s5
233 define amdgpu_kernel void @s_fneg_v2i16(ptr addrspace(1) %out, i32 %arg) {
234 %in = bitcast i32 %arg to <2 x i16>
235 %fneg = xor <2 x i16> %in, <i16 -32768, i16 -32768>
236 store <2 x i16> %fneg, ptr addrspace(1) %out
240 ; FUNC-LABEL: {{^}}v_fneg_v2i16:
241 ; SI: v_xor_b32_e32 v1, 0x8000, v1
242 ; SI: v_xor_b32_e32 v0, 0x8000, v0
243 ; SI: v_lshlrev_b32_e32 v2, 16, v1
244 ; SI: v_and_b32_e32 v0, 0xffff, v0
245 ; SI: v_or_b32_e32 v0, v0, v2
246 ; SI: v_and_b32_e32 v1, 0xffff, v1
249 ; VI-NEXT: v_xor_b32_e32 v0, 0x80008000, v0
250 ; VI-NEXT: s_setpc_b64
251 define <2 x i16> @v_fneg_v2i16(<2 x i16> %in) {
252 %fneg = xor <2 x i16> %in, <i16 -32768, i16 -32768>
256 ; FUNC-LABEL: {{^}}s_fneg_v2i16_fp_use:
257 ; SI: s_lshr_b32 s3, s2, 16
258 ; SI: v_cvt_f32_f16_e32 v0, s3
259 ; SI: v_cvt_f32_f16_e32 v1, s2
260 ; SI: v_sub_f32_e32 v0, 2.0, v0
261 ; SI: v_sub_f32_e32 v1, 2.0, v1
263 ; VI: s_lshr_b32 s5, s4, 16
264 ; VI: s_xor_b32 s5, s5, 0x8000
265 ; VI: s_xor_b32 s4, s4, 0x8000
266 ; VI: v_mov_b32_e32 v0, s5
267 ; VI: v_add_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
268 ; VI: v_add_f16_e64 v1, s4, 2.0
269 ; VI: v_or_b32_e32 v0, v1, v0
270 define amdgpu_kernel void @s_fneg_v2i16_fp_use(ptr addrspace(1) %out, i32 %arg) {
271 %in = bitcast i32 %arg to <2 x i16>
272 %fneg = xor <2 x i16> %in, <i16 -32768, i16 -32768>
273 %bitcast = bitcast <2 x i16> %fneg to <2 x half>
274 %fadd = fadd <2 x half> %bitcast, <half 2.0, half 2.0>
275 store <2 x half> %fadd, ptr addrspace(1) %out
279 ; FUNC-LABEL: {{^}}v_fneg_v2i16_fp_use:
280 ; SI: v_lshrrev_b32_e32 v1, 16, v0
281 ; SI: v_cvt_f32_f16_e32 v0, v0
282 ; SI: v_cvt_f32_f16_e32 v1, v1
283 ; SI: v_sub_f32_e32 v0, 2.0, v0
284 ; SI: v_sub_f32_e32 v1, 2.0, v1
287 ; VI: v_mov_b32_e32 v1, 0x4000
288 ; VI: v_sub_f16_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
289 ; VI: v_sub_f16_e32 v0, 2.0, v0
290 ; VI: v_or_b32_e32 v0, v0, v1
292 define <2 x half> @v_fneg_v2i16_fp_use(i32 %arg) {
293 %in = bitcast i32 %arg to <2 x i16>
294 %fneg = xor <2 x i16> %in, <i16 -32768, i16 -32768>
295 %bitcast = bitcast <2 x i16> %fneg to <2 x half>
296 %fadd = fadd <2 x half> %bitcast, <half 2.0, half 2.0>