1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI,FUNC %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,FUNC %s
4 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
5 declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
7 ; FUNC-LABEL: {{^}}test_umul24_i32:
9 define amdgpu_kernel void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
12 %a_24 = lshr i32 %0, 8
14 %b_24 = lshr i32 %1, 8
15 %2 = mul i32 %a_24, %b_24
16 store i32 %2, i32 addrspace(1)* %out
20 ; FUNC-LABEL: {{^}}test_umul24_i16_sext:
21 ; SI: v_mul_u32_u24_e{{(32|64)}} [[VI_MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
22 ; SI: v_bfe_i32 v{{[0-9]}}, [[VI_MUL]], 0, 16
24 ; VI: s_mul_i32 [[MUL:s[0-9]+]]
25 ; VI: s_sext_i32_i16 s{{[0-9]+}}, [[MUL]]
26 define amdgpu_kernel void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
29 %ext = sext i16 %mul to i32
30 store i32 %ext, i32 addrspace(1)* %out
34 ; FUNC-LABEL: {{^}}test_umul24_i16_vgpr_sext:
35 ; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
36 ; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
37 ; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 16
38 define amdgpu_kernel void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
39 %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
40 %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
41 %ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x
42 %ptr_b = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.y
43 %a = load i16, i16 addrspace(1)* %ptr_a
44 %b = load i16, i16 addrspace(1)* %ptr_b
46 %val = sext i16 %mul to i32
47 store i32 %val, i32 addrspace(1)* %out
51 ; FUNC-LABEL: {{^}}test_umul24_i16:
53 ; SI: v_mul_u32_u24_e32
58 define amdgpu_kernel void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) {
61 %ext = zext i16 %mul to i32
62 store i32 %ext, i32 addrspace(1)* %out
66 ; FUNC-LABEL: {{^}}test_umul24_i16_vgpr:
67 ; SI: v_mul_u32_u24_e32
70 define amdgpu_kernel void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
71 %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
72 %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
73 %ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x
74 %ptr_b = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.y
75 %a = load i16, i16 addrspace(1)* %ptr_a
76 %b = load i16, i16 addrspace(1)* %ptr_b
78 %val = zext i16 %mul to i32
79 store i32 %val, i32 addrspace(1)* %out
83 ; FUNC-LABEL: {{^}}test_umul24_i8_vgpr:
84 ; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
85 ; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
86 ; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
87 define amdgpu_kernel void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) {
89 %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
90 %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
91 %a.ptr = getelementptr i8, i8 addrspace(1)* %a, i32 %tid.x
92 %b.ptr = getelementptr i8, i8 addrspace(1)* %b, i32 %tid.y
93 %a.l = load i8, i8 addrspace(1)* %a.ptr
94 %b.l = load i8, i8 addrspace(1)* %b.ptr
95 %mul = mul i8 %a.l, %b.l
96 %ext = sext i8 %mul to i32
97 store i32 %ext, i32 addrspace(1)* %out
101 ; FUNC-LABEL: {{^}}test_umulhi24_i32_i64:
103 ; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
104 ; GCN-NEXT: buffer_store_dword [[RESULT]]
105 define amdgpu_kernel void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
107 %a.24 = and i32 %a, 16777215
108 %b.24 = and i32 %b, 16777215
109 %a.24.i64 = zext i32 %a.24 to i64
110 %b.24.i64 = zext i32 %b.24 to i64
111 %mul48 = mul i64 %a.24.i64, %b.24.i64
112 %mul48.hi = lshr i64 %mul48, 32
113 %mul24hi = trunc i64 %mul48.hi to i32
114 store i32 %mul24hi, i32 addrspace(1)* %out
118 ; FUNC-LABEL: {{^}}test_umulhi24:
120 ; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
121 ; GCN-NEXT: buffer_store_dword [[RESULT]]
122 define amdgpu_kernel void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
124 %a.24 = and i64 %a, 16777215
125 %b.24 = and i64 %b, 16777215
126 %mul48 = mul i64 %a.24, %b.24
127 %mul48.hi = lshr i64 %mul48, 32
128 %mul24.hi = trunc i64 %mul48.hi to i32
129 store i32 %mul24.hi, i32 addrspace(1)* %out
133 ; Multiply with 24-bit inputs and 64-bit output.
134 ; FUNC-LABEL: {{^}}test_umul24_i64:
137 ; GCN-DAG: v_mul_u32_u24_e32
138 ; GCN-DAG: v_mul_hi_u32_u24_e32
139 ; GCN: buffer_store_dwordx2
140 define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
142 %tmp0 = shl i64 %a, 40
143 %a_24 = lshr i64 %tmp0, 40
144 %tmp1 = shl i64 %b, 40
145 %b_24 = lshr i64 %tmp1, 40
146 %tmp2 = mul i64 %a_24, %b_24
147 store i64 %tmp2, i64 addrspace(1)* %out
151 ; FUNC-LABEL: {{^}}test_umul24_i64_square:
152 ; GCN: s_load_dword [[A:s[0-9]+]]
154 ; GCN-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
155 ; GCN-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
156 define amdgpu_kernel void @test_umul24_i64_square(i64 addrspace(1)* %out, [8 x i32], i64 %a) {
158 %tmp0 = shl i64 %a, 40
159 %a.24 = lshr i64 %tmp0, 40
160 %tmp2 = mul i64 %a.24, %a.24
161 store i64 %tmp2, i64 addrspace(1)* %out
165 ; FUNC-LABEL: {{^}}test_umulhi16_i32:
168 ; GCN: v_mul_u32_u24_e32 [[MUL24:v[0-9]+]]
169 ; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[MUL24]]
170 define amdgpu_kernel void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) {
172 %a.16 = and i32 %a, 65535
173 %b.16 = and i32 %b, 65535
174 %mul = mul i32 %a.16, %b.16
175 %hi = lshr i32 %mul, 16
176 %mulhi = trunc i32 %hi to i16
177 store i16 %mulhi, i16 addrspace(1)* %out
181 ; FUNC-LABEL: {{^}}test_umul24_i33:
182 ; GCN: s_load_dword s
183 ; GCN: s_load_dword s
186 ; GCN-DAG: v_mul_u32_u24_e32 v[[MUL_LO:[0-9]+]],
187 ; GCN-DAG: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
188 ; GCN-DAG: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
189 ; GCN: buffer_store_dwordx2 v{{\[}}[[MUL_LO]]:[[HI]]{{\]}}
190 define amdgpu_kernel void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) {
192 %tmp0 = shl i33 %a, 9
193 %a_24 = lshr i33 %tmp0, 9
194 %tmp1 = shl i33 %b, 9
195 %b_24 = lshr i33 %tmp1, 9
196 %tmp2 = mul i33 %a_24, %b_24
197 %ext = zext i33 %tmp2 to i64
198 store i64 %ext, i64 addrspace(1)* %out
202 ; FUNC-LABEL: {{^}}test_umulhi24_i33:
203 ; GCN: s_load_dword s
204 ; GCN: s_load_dword s
207 ; GCN: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
208 ; GCN-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
209 ; GCN-NEXT: buffer_store_dword v[[HI]]
210 define amdgpu_kernel void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
212 %tmp0 = shl i33 %a, 9
213 %a_24 = lshr i33 %tmp0, 9
214 %tmp1 = shl i33 %b, 9
215 %b_24 = lshr i33 %tmp1, 9
216 %tmp2 = mul i33 %a_24, %b_24
217 %hi = lshr i33 %tmp2, 32
218 %trunc = trunc i33 %hi to i32
219 store i32 %trunc, i32 addrspace(1)* %out
224 ; Make sure the created any_extend is ignored to use the real bits
227 ; GCN-LABEL: {{^}}test_umul24_anyextend_i24_src0_src1:
228 ; GCN-DAG: v_mul_u32_u24_e32 v0, 0xea, v0
229 ; GCN-DAG: v_mul_u32_u24_e32 v1, 0x39b, v1
230 ; GCN: v_mul_u32_u24_e32 v0, v0, v1
231 ; GCN: v_and_b32_e32 v0, 0x1fffe, v0
232 ; GCN: v_mul_u32_u24_e32 v0, 0x63, v0
234 define i17 @test_umul24_anyextend_i24_src0_src1(i24 %a, i24 %b) {
236 %aa = mul i24 %a, 234
237 %bb = mul i24 %b, 923
238 %a_32 = zext i24 %aa to i32
239 %b_32 = zext i24 %bb to i32
240 %mul = mul i32 %a_32, %b_32
241 %trunc = trunc i32 %mul to i17
242 %arst = mul i17 %trunc, 99
246 ; GCN-LABEL: {{^}}test_umul24_anyextend_i23_src0_src1:
247 ; GCN: s_mov_b32 [[U23_MASK:s[0-9]+]], 0x7fffff
248 ; GCN-DAG: v_and_b32_e32 v0, [[U23_MASK]], v0
249 ; GCN-DAG: v_and_b32_e32 v1, [[U23_MASK]], v1
250 ; GCN-DAG: v_mul_u32_u24_e32 v0, 0xea, v0
251 ; GCN-DAG: v_mul_u32_u24_e32 v1, 0x39b, v1
252 ; GCN-DAG: v_and_b32_e32 v1, s4, v1
253 ; GCN-DAG: v_and_b32_e32 v0, 0x7ffffe, v0
254 ; GCN: v_mul_u32_u24_e32 v0, v0, v1
255 ; GCN: v_and_b32_e32 v0, 0x1fffe, v0
256 ; GCN: v_mul_u32_u24_e32 v0, 0x63, v0
258 define i17 @test_umul24_anyextend_i23_src0_src1(i23 %a, i23 %b) {
260 %aa = mul i23 %a, 234
261 %bb = mul i23 %b, 923
262 %a_32 = zext i23 %aa to i32
263 %b_32 = zext i23 %bb to i32
264 %mul = mul i32 %a_32, %b_32
265 %trunc = trunc i32 %mul to i17
266 %arst = mul i17 %trunc, 99