1 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,SI,FUNC %s
2 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,VI,FUNC %s
3 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=FUNC,GFX9_10 %s
4 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=FUNC,GFX9_10 %s
5 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=EG,FUNC %s
7 ; mul24 and mad24 are affected
9 ; FUNC-LABEL: {{^}}test_mul_v2i32:
10 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
11 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
13 ; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
14 ; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
16 define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
17 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
18 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
19 %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
20 %result = mul <2 x i32> %a, %b
21 store <2 x i32> %result, <2 x i32> addrspace(1)* %out
25 ; FUNC-LABEL: {{^}}v_mul_v4i32:
26 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
28 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
29 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
31 ; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
32 ; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
33 ; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
34 ; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
36 define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
37 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
38 %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
39 %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
40 %result = mul <4 x i32> %a, %b
41 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
45 ; FUNC-LABEL: {{^}}s_trunc_i64_mul_to_i32:
49 ; GCN: buffer_store_dword
50 define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
52 %trunc = trunc i64 %mul to i32
53 store i32 %trunc, i32 addrspace(1)* %out, align 8
57 ; FUNC-LABEL: {{^}}v_trunc_i64_mul_to_i32:
61 ; GCN: buffer_store_dword
62 define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
63 %a = load i64, i64 addrspace(1)* %aptr, align 8
64 %b = load i64, i64 addrspace(1)* %bptr, align 8
66 %trunc = trunc i64 %mul to i32
67 store i32 %trunc, i32 addrspace(1)* %out, align 8
71 ; This 64-bit multiply should just use MUL_HI and MUL_LO, since the top
72 ; 32-bits of both arguments are sign bits.
73 ; FUNC-LABEL: {{^}}mul64_sext_c:
77 ; SI-DAG: v_mul_hi_i32
79 define amdgpu_kernel void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
81 %0 = sext i32 %in to i64
83 store i64 %1, i64 addrspace(1)* %out
87 ; FUNC-LABEL: {{^}}v_mul64_sext_c:
90 ; SI-DAG: v_mul_lo_u32
91 ; SI-DAG: v_mul_hi_i32
94 define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
95 %val = load i32, i32 addrspace(1)* %in, align 4
96 %ext = sext i32 %val to i64
97 %mul = mul i64 %ext, 80
98 store i64 %mul, i64 addrspace(1)* %out, align 8
102 ; FUNC-LABEL: {{^}}v_mul64_sext_inline_imm:
103 ; SI-DAG: v_mul_lo_u32 v{{[0-9]+}}, v{{[0-9]+}}, 9
104 ; SI-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
105 ; VI: v_mad_i64_i32 v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}, 9, 0
107 define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
108 %val = load i32, i32 addrspace(1)* %in, align 4
109 %ext = sext i32 %val to i64
110 %mul = mul i64 %ext, 9
111 store i64 %mul, i64 addrspace(1)* %out, align 8
115 ; FUNC-LABEL: {{^}}s_mul_i32:
116 ; GCN: s_load_dword [[SRC0:s[0-9]+]],
117 ; GCN: s_load_dword [[SRC1:s[0-9]+]],
118 ; GCN: s_mul_i32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]]
119 ; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
120 ; GCN: buffer_store_dword [[VRESULT]],
122 define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, [8 x i32], i32 %a, [8 x i32], i32 %b) nounwind {
123 %mul = mul i32 %a, %b
124 store i32 %mul, i32 addrspace(1)* %out, align 4
128 ; FUNC-LABEL: {{^}}v_mul_i32:
129 ; GCN: v_mul_lo_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
130 define amdgpu_kernel void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
131 %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
132 %a = load i32, i32 addrspace(1)* %in
133 %b = load i32, i32 addrspace(1)* %b_ptr
134 %result = mul i32 %a, %b
135 store i32 %result, i32 addrspace(1)* %out
139 ; A standard 64-bit multiply. The expansion should be around 6 instructions.
140 ; It would be difficult to match the expansion correctly without writing
141 ; a really complicated list of FileCheck expressions. I don't want
142 ; to confuse people who may 'break' this test with a correct optimization,
143 ; so this test just uses FUNC-LABEL to make sure the compiler does not
144 ; crash with a 'failed to select' error.
146 ; FUNC-LABEL: {{^}}s_mul_i64:
147 ; GFX9_10-DAG: s_mul_i32
148 ; GFX9_10-DAG: s_mul_hi_u32
149 ; GFX9_10-DAG: s_mul_i32
150 ; GFX9_10-DAG: s_mul_i32
152 define amdgpu_kernel void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
153 %mul = mul i64 %a, %b
154 store i64 %mul, i64 addrspace(1)* %out, align 8
158 ; FUNC-LABEL: {{^}}v_mul_i64:
160 define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
161 %a = load i64, i64 addrspace(1)* %aptr, align 8
162 %b = load i64, i64 addrspace(1)* %bptr, align 8
163 %mul = mul i64 %a, %b
164 store i64 %mul, i64 addrspace(1)* %out, align 8
168 ; FUNC-LABEL: {{^}}mul32_in_branch:
170 define amdgpu_kernel void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
172 %0 = icmp eq i32 %a, 0
173 br i1 %0, label %if, label %else
176 %1 = load i32, i32 addrspace(1)* %in
184 %3 = phi i32 [%1, %if], [%2, %else]
185 store i32 %3, i32 addrspace(1)* %out
189 ; FUNC-LABEL: {{^}}mul64_in_branch:
191 ; SI-DAG: v_mul_hi_u32
194 define amdgpu_kernel void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
196 %0 = icmp eq i64 %a, 0
197 br i1 %0, label %if, label %else
200 %1 = load i64, i64 addrspace(1)* %in
208 %3 = phi i64 [%1, %if], [%2, %else]
209 store i64 %3, i64 addrspace(1)* %out
213 ; FIXME: Load dwordx4
214 ; FUNC-LABEL: {{^}}s_mul_i128:
215 ; GCN: s_load_dwordx4
216 ; GCN: s_load_dwordx4
226 ; SI-DAG: v_mul_hi_u32
227 ; SI-DAG: v_mul_hi_u32
230 ; SI-DAG: v_mul_hi_u32
232 ; VI-DAG: v_mad_u64_u32
233 ; VI-DAG: v_mad_u64_u32
234 ; VI-DAG: v_mad_u64_u32
235 ; VI-DAG: v_mad_u64_u32
236 ; VI-DAG: v_mad_u64_u32
237 ; VI-DAG: v_mad_u64_u32
244 ; GCN: buffer_store_dwordx4
245 define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, [8 x i32], i128 %a, [8 x i32], i128 %b) nounwind #0 {
246 %mul = mul i128 %a, %b
247 store i128 %mul, i128 addrspace(1)* %out
251 ; FUNC-LABEL: {{^}}v_mul_i128:
252 ; GCN: {{buffer|flat}}_load_dwordx4
253 ; GCN: {{buffer|flat}}_load_dwordx4
255 ; SI-DAG: v_mul_lo_u32
256 ; SI-DAG: v_mul_hi_u32
257 ; SI-DAG: v_mul_hi_u32
258 ; SI-DAG: v_mul_lo_u32
259 ; SI-DAG: v_mul_hi_u32
260 ; SI-DAG: v_mul_hi_u32
261 ; SI-DAG: v_mul_lo_u32
262 ; SI-DAG: v_mul_lo_u32
263 ; SI-DAG: v_add_i32_e32
265 ; SI-DAG: v_mul_hi_u32
266 ; SI-DAG: v_mul_lo_u32
267 ; SI-DAG: v_mul_hi_u32
268 ; SI-DAG: v_mul_lo_u32
269 ; SI-DAG: v_mul_lo_u32
270 ; SI-DAG: v_mul_lo_u32
271 ; SI-DAG: v_mul_lo_u32
272 ; SI-DAG: v_mul_lo_u32
274 ; VI-DAG: v_mad_u64_u32
275 ; VI-DAG: v_mad_u64_u32
276 ; VI-DAG: v_mad_u64_u32
277 ; VI-DAG: v_mad_u64_u32
278 ; VI-DAG: v_mad_u64_u32
279 ; VI-DAG: v_mad_u64_u32
280 ; VI-DAG: v_mul_lo_u32
281 ; VI-DAG: v_mul_lo_u32
282 ; VI-DAG: v_mul_lo_u32
284 ; GCN: {{buffer|flat}}_store_dwordx4
285 define amdgpu_kernel void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 {
286 %tid = call i32 @llvm.amdgcn.workitem.id.x()
287 %gep.a = getelementptr inbounds i128, i128 addrspace(1)* %aptr, i32 %tid
288 %gep.b = getelementptr inbounds i128, i128 addrspace(1)* %bptr, i32 %tid
289 %gep.out = getelementptr inbounds i128, i128 addrspace(1)* %bptr, i32 %tid
290 %a = load i128, i128 addrspace(1)* %gep.a
291 %b = load i128, i128 addrspace(1)* %gep.b
292 %mul = mul i128 %a, %b
293 store i128 %mul, i128 addrspace(1)* %gep.out
297 declare i32 @llvm.amdgcn.workitem.id.x() #1
299 attributes #0 = { nounwind }
300 attributes #1 = { nounwind readnone}