1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=W32
4 declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16(<16 x half>, <16 x half> , <8 x float>)
5 declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16(<16 x i16>, <16 x i16> , <8 x float>)
6 declare <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half>, <16 x half> , <16 x half>, i1 immarg)
7 declare <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.tied(<16 x half>, <16 x half> , <16 x half>, i1 immarg)
8 declare <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16>, <16 x i16> , <16 x i16>, i1 immarg)
9 declare <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied(<16 x i16>, <16 x i16> , <16 x i16>, i1 immarg)
10 declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 immarg, <4 x i32>, i1 immarg, <4 x i32> , <8 x i32>, i1 immarg)
11 declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 immarg, <2 x i32>, i1 immarg, <2 x i32> , <8 x i32>, i1 immarg)
13 ; @llvm.amdgcn.wmma.f32.16x16x16.f16
15 define amdgpu_ps void @test_wmma_f32_16x16x16_f16(<16 x half> %A, <16 x half> %B, <8 x float> %C, ptr addrspace(1) %out) {
16 ; W32-LABEL: test_wmma_f32_16x16x16_f16:
18 ; W32-NEXT: v_wmma_f32_16x16x16_f16 v[16:23], v[0:7], v[8:15], v[16:23]
19 ; W32-NEXT: s_clause 0x1
20 ; W32-NEXT: global_store_b128 v[24:25], v[20:23], off offset:16
21 ; W32-NEXT: global_store_b128 v[24:25], v[16:19], off
24 %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16(<16 x half> %A, <16 x half> %B, <8 x float> %C)
25 store <8 x float> %res, ptr addrspace(1) %out, align 32
29 ; @llvm.amdgcn.wmma.f32.16x16x16.bf16
31 define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<16 x i16> %A, <16 x i16> %B, <8 x float> %C, ptr addrspace(1) %out) {
32 ; W32-LABEL: test_wmma_f32_16x16x16_bf16:
34 ; W32-NEXT: v_wmma_f32_16x16x16_bf16 v[16:23], v[0:7], v[8:15], v[16:23]
35 ; W32-NEXT: s_clause 0x1
36 ; W32-NEXT: global_store_b128 v[24:25], v[20:23], off offset:16
37 ; W32-NEXT: global_store_b128 v[24:25], v[16:19], off
40 %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16(<16 x i16> %A, <16 x i16> %B, <8 x float> %C)
41 store <8 x float> %res, ptr addrspace(1) %out, align 32
45 ; @llvm.amdgcn.wmma.f16.16x16x16.f16
47 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_lo(<16 x half> %A, <16 x half> %B, <16 x half> %C, ptr addrspace(1) %out) {
48 ; W32-LABEL: test_wmma_f16_16x16x16_f16_lo:
50 ; W32-NEXT: v_wmma_f16_16x16x16_f16 v[16:23], v[0:7], v[8:15], v[16:23]
51 ; W32-NEXT: s_clause 0x1
52 ; W32-NEXT: global_store_b128 v[24:25], v[20:23], off offset:16
53 ; W32-NEXT: global_store_b128 v[24:25], v[16:19], off
56 %res = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A, <16 x half> %B, <16 x half> %C, i1 0)
57 store <16 x half> %res, ptr addrspace(1) %out, align 32
61 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_hi(<16 x half> %A, <16 x half> %B, <16 x half> %C, ptr addrspace(1) %out) {
62 ; W32-LABEL: test_wmma_f16_16x16x16_f16_hi:
64 ; W32-NEXT: v_wmma_f16_16x16x16_f16 v[16:23], v[0:7], v[8:15], v[16:23] op_sel:[0,0,1]
65 ; W32-NEXT: s_clause 0x1
66 ; W32-NEXT: global_store_b128 v[24:25], v[20:23], off offset:16
67 ; W32-NEXT: global_store_b128 v[24:25], v[16:19], off
70 %res = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A, <16 x half> %B, <16 x half> %C, i1 1)
71 store <16 x half> %res, ptr addrspace(1) %out, align 32
75 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_untied(<16 x half> %A.0, <16 x half> %B.0, <16 x half> %A.1, <16 x half> %B.1, <16 x half> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
76 ; W32-LABEL: test_wmma_f16_16x16x16_f16_untied:
78 ; W32-NEXT: v_wmma_f16_16x16x16_f16 v[44:51], v[0:7], v[8:15], v[32:39]
79 ; W32-NEXT: v_wmma_f16_16x16x16_f16 v[32:39], v[16:23], v[24:31], v[32:39]
80 ; W32-NEXT: s_clause 0x1
81 ; W32-NEXT: global_store_b128 v[40:41], v[48:51], off offset:16
82 ; W32-NEXT: global_store_b128 v[40:41], v[44:47], off
83 ; W32-NEXT: s_clause 0x1
84 ; W32-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16
85 ; W32-NEXT: global_store_b128 v[42:43], v[32:35], off
88 %res.0 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A.0, <16 x half> %B.0, <16 x half> %C, i1 0)
89 %res.1 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A.1, <16 x half> %B.1, <16 x half> %C, i1 0)
90 store <16 x half> %res.0, ptr addrspace(1) %out.0, align 32
91 store <16 x half> %res.1, ptr addrspace(1) %out.1, align 32
95 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_tied(<16 x half> %A.0, <16 x half> %B.0, <16 x half> %A.1, <16 x half> %B.1, <16 x half> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
96 ; W32-LABEL: test_wmma_f16_16x16x16_f16_tied:
98 ; W32-NEXT: v_dual_mov_b32 v51, v39 :: v_dual_mov_b32 v50, v38
99 ; W32-NEXT: v_dual_mov_b32 v49, v37 :: v_dual_mov_b32 v48, v36
100 ; W32-NEXT: v_dual_mov_b32 v47, v35 :: v_dual_mov_b32 v46, v34
101 ; W32-NEXT: v_dual_mov_b32 v45, v33 :: v_dual_mov_b32 v44, v32
102 ; W32-NEXT: v_wmma_f16_16x16x16_f16 v[32:39], v[16:23], v[24:31], v[32:39]
103 ; W32-NEXT: s_delay_alu instid0(VALU_DEP_2)
104 ; W32-NEXT: v_wmma_f16_16x16x16_f16 v[44:51], v[0:7], v[8:15], v[44:51]
105 ; W32-NEXT: s_clause 0x1
106 ; W32-NEXT: global_store_b128 v[40:41], v[48:51], off offset:16
107 ; W32-NEXT: global_store_b128 v[40:41], v[44:47], off
108 ; W32-NEXT: s_clause 0x1
109 ; W32-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16
110 ; W32-NEXT: global_store_b128 v[42:43], v[32:35], off
113 %res.0 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.tied(<16 x half> %A.0, <16 x half> %B.0, <16 x half> %C, i1 0)
114 %res.1 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.tied(<16 x half> %A.1, <16 x half> %B.1, <16 x half> %C, i1 0)
115 store <16 x half> %res.0, ptr addrspace(1) %out.0, align 32
116 store <16 x half> %res.1, ptr addrspace(1) %out.1, align 32
120 ; @llvm.amdgcn.wmma.bf16.16x16x16.bf16
122 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_lo(<16 x i16> %A, <16 x i16> %B, <16 x i16> %C, ptr addrspace(1) %out) {
123 ; W32-LABEL: test_wmma_bf16_16x16x16_bf16_lo:
124 ; W32: ; %bb.0: ; %bb
125 ; W32-NEXT: v_wmma_bf16_16x16x16_bf16 v[16:23], v[0:7], v[8:15], v[16:23]
126 ; W32-NEXT: s_clause 0x1
127 ; W32-NEXT: global_store_b128 v[24:25], v[20:23], off offset:16
128 ; W32-NEXT: global_store_b128 v[24:25], v[16:19], off
131 %res = call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A, <16 x i16> %B, <16 x i16> %C, i1 0)
132 store <16 x i16> %res, ptr addrspace(1) %out, align 32
136 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_hi(<16 x i16> %A, <16 x i16> %B, <16 x i16> %C, ptr addrspace(1) %out) {
137 ; W32-LABEL: test_wmma_bf16_16x16x16_bf16_hi:
138 ; W32: ; %bb.0: ; %bb
139 ; W32-NEXT: v_wmma_bf16_16x16x16_bf16 v[16:23], v[0:7], v[8:15], v[16:23] op_sel:[0,0,1]
140 ; W32-NEXT: s_clause 0x1
141 ; W32-NEXT: global_store_b128 v[24:25], v[20:23], off offset:16
142 ; W32-NEXT: global_store_b128 v[24:25], v[16:19], off
145 %res = call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A, <16 x i16> %B, <16 x i16> %C, i1 1)
146 store <16 x i16> %res, ptr addrspace(1) %out, align 32
150 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_untied(<16 x i16> %A.0, <16 x i16> %B.0, <16 x i16> %A.1, <16 x i16> %B.1, <16 x i16> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
151 ; W32-LABEL: test_wmma_bf16_16x16x16_bf16_untied:
152 ; W32: ; %bb.0: ; %bb
153 ; W32-NEXT: v_wmma_bf16_16x16x16_bf16 v[44:51], v[0:7], v[8:15], v[32:39]
154 ; W32-NEXT: v_wmma_bf16_16x16x16_bf16 v[32:39], v[16:23], v[24:31], v[32:39]
155 ; W32-NEXT: s_clause 0x1
156 ; W32-NEXT: global_store_b128 v[40:41], v[48:51], off offset:16
157 ; W32-NEXT: global_store_b128 v[40:41], v[44:47], off
158 ; W32-NEXT: s_clause 0x1
159 ; W32-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16
160 ; W32-NEXT: global_store_b128 v[42:43], v[32:35], off
163 %res.0 = call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A.0, <16 x i16> %B.0, <16 x i16> %C, i1 0)
164 %res.1 = call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A.1, <16 x i16> %B.1, <16 x i16> %C, i1 0)
165 store <16 x i16> %res.0, ptr addrspace(1) %out.0, align 32
166 store <16 x i16> %res.1, ptr addrspace(1) %out.1, align 32
170 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_tied(<16 x i16> %A.0, <16 x i16> %B.0, <16 x i16> %A.1, <16 x i16> %B.1, <16 x i16> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
171 ; W32-LABEL: test_wmma_bf16_16x16x16_bf16_tied:
172 ; W32: ; %bb.0: ; %bb
173 ; W32-NEXT: v_dual_mov_b32 v51, v39 :: v_dual_mov_b32 v50, v38
174 ; W32-NEXT: v_dual_mov_b32 v49, v37 :: v_dual_mov_b32 v48, v36
175 ; W32-NEXT: v_dual_mov_b32 v47, v35 :: v_dual_mov_b32 v46, v34
176 ; W32-NEXT: v_dual_mov_b32 v45, v33 :: v_dual_mov_b32 v44, v32
177 ; W32-NEXT: v_wmma_bf16_16x16x16_bf16 v[32:39], v[16:23], v[24:31], v[32:39]
178 ; W32-NEXT: s_delay_alu instid0(VALU_DEP_2)
179 ; W32-NEXT: v_wmma_bf16_16x16x16_bf16 v[44:51], v[0:7], v[8:15], v[44:51]
180 ; W32-NEXT: s_clause 0x1
181 ; W32-NEXT: global_store_b128 v[40:41], v[48:51], off offset:16
182 ; W32-NEXT: global_store_b128 v[40:41], v[44:47], off
183 ; W32-NEXT: s_clause 0x1
184 ; W32-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16
185 ; W32-NEXT: global_store_b128 v[42:43], v[32:35], off
188 %res.0 = call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied(<16 x i16> %A.0, <16 x i16> %B.0, <16 x i16> %C, i1 0)
189 %res.1 = call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied(<16 x i16> %A.1, <16 x i16> %B.1, <16 x i16> %C, i1 0)
190 store <16 x i16> %res.0, ptr addrspace(1) %out.0, align 32
191 store <16 x i16> %res.1, ptr addrspace(1) %out.1, align 32
195 ; @llvm.amdgcn.wmma.i32.16x16x16.iu8
197 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_unsigned(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
198 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_unsigned:
199 ; W32: ; %bb.0: ; %bb
200 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15]
201 ; W32-NEXT: s_clause 0x1
202 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
203 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
206 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i1 0)
207 store <8 x i32> %res, ptr addrspace(1) %out, align 32
211 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_signed(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
212 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_signed:
213 ; W32: ; %bb.0: ; %bb
214 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[0,1,0]
215 ; W32-NEXT: s_clause 0x1
216 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
217 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
220 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 1, <4 x i32> %B, <8 x i32> %C, i1 0)
221 store <8 x i32> %res, ptr addrspace(1) %out, align 32
225 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_unsigned(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
226 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_signed_unsigned:
227 ; W32: ; %bb.0: ; %bb
228 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[1,0,0]
229 ; W32-NEXT: s_clause 0x1
230 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
231 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
234 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i1 0)
235 store <8 x i32> %res, ptr addrspace(1) %out, align 32
239 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_signed(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
240 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_signed_signed:
241 ; W32: ; %bb.0: ; %bb
242 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[1,1,0]
243 ; W32-NEXT: s_clause 0x1
244 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
245 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
248 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 1, <4 x i32> %B, <8 x i32> %C, i1 0)
249 store <8 x i32> %res, ptr addrspace(1) %out, align 32
253 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_unsigned_clamp(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
254 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_unsigned_clamp:
255 ; W32: ; %bb.0: ; %bb
256 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15] clamp
257 ; W32-NEXT: s_clause 0x1
258 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
259 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
262 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i1 1)
263 store <8 x i32> %res, ptr addrspace(1) %out, align 32
267 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_signed_clamp(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
268 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_signed_clamp:
269 ; W32: ; %bb.0: ; %bb
270 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[0,1,0] clamp
271 ; W32-NEXT: s_clause 0x1
272 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
273 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
276 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 1, <4 x i32> %B, <8 x i32> %C, i1 1)
277 store <8 x i32> %res, ptr addrspace(1) %out, align 32
281 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_unsigned_clamp(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
282 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_signed_unsigned_clamp:
283 ; W32: ; %bb.0: ; %bb
284 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[1,0,0] clamp
285 ; W32-NEXT: s_clause 0x1
286 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
287 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
290 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i1 1)
291 store <8 x i32> %res, ptr addrspace(1) %out, align 32
295 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_signed_clamp(<4 x i32> %A, <4 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
296 ; W32-LABEL: test_wmma_i32_16x16x16_ui8_signed_signed_clamp:
297 ; W32: ; %bb.0: ; %bb
298 ; W32-NEXT: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[1,1,0] clamp
299 ; W32-NEXT: s_clause 0x1
300 ; W32-NEXT: global_store_b128 v[16:17], v[12:15], off offset:16
301 ; W32-NEXT: global_store_b128 v[16:17], v[8:11], off
304 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 1, <4 x i32> %B, <8 x i32> %C, i1 1)
305 store <8 x i32> %res, ptr addrspace(1) %out, align 32
309 ; @llvm.amdgcn.wmma.i32.16x16x16.iu4
311 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_unsigned(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
312 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_unsigned:
313 ; W32: ; %bb.0: ; %bb
314 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11]
315 ; W32-NEXT: s_clause 0x1
316 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
317 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
320 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 0)
321 store <8 x i32> %res, ptr addrspace(1) %out, align 32
325 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_signed(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
326 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_signed:
327 ; W32: ; %bb.0: ; %bb
328 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[0,1,0]
329 ; W32-NEXT: s_clause 0x1
330 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
331 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
334 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 1, <2 x i32> %B, <8 x i32> %C, i1 0)
335 store <8 x i32> %res, ptr addrspace(1) %out, align 32
339 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_unsigned(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
340 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_signed_unsigned:
341 ; W32: ; %bb.0: ; %bb
342 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[1,0,0]
343 ; W32-NEXT: s_clause 0x1
344 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
345 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
348 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 0)
349 store <8 x i32> %res, ptr addrspace(1) %out, align 32
353 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_signed(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
354 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_signed_signed:
355 ; W32: ; %bb.0: ; %bb
356 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[1,1,0]
357 ; W32-NEXT: s_clause 0x1
358 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
359 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
362 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 1, <2 x i32> %B, <8 x i32> %C, i1 0)
363 store <8 x i32> %res, ptr addrspace(1) %out, align 32
368 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_unsigned_clamp(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
369 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_unsigned_clamp:
370 ; W32: ; %bb.0: ; %bb
371 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11] clamp
372 ; W32-NEXT: s_clause 0x1
373 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
374 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
377 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 1)
378 store <8 x i32> %res, ptr addrspace(1) %out, align 32
382 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_signed_clamp(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
383 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_signed_clamp:
384 ; W32: ; %bb.0: ; %bb
385 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[0,1,0] clamp
386 ; W32-NEXT: s_clause 0x1
387 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
388 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
391 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 1, <2 x i32> %B, <8 x i32> %C, i1 1)
392 store <8 x i32> %res, ptr addrspace(1) %out, align 32
396 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_unsigned_clamp(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
397 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_signed_unsigned_clamp:
398 ; W32: ; %bb.0: ; %bb
399 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[1,0,0] clamp
400 ; W32-NEXT: s_clause 0x1
401 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
402 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
405 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 1)
406 store <8 x i32> %res, ptr addrspace(1) %out, align 32
410 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_signed_clamp(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) {
411 ; W32-LABEL: test_wmma_i32_16x16x16_ui4_signed_signed_clamp:
412 ; W32: ; %bb.0: ; %bb
413 ; W32-NEXT: v_wmma_i32_16x16x16_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[1,1,0] clamp
414 ; W32-NEXT: s_clause 0x1
415 ; W32-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
416 ; W32-NEXT: global_store_b128 v[12:13], v[4:7], off
419 %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 1, <2 x i32> %B, <8 x i32> %C, i1 1)
420 store <8 x i32> %res, ptr addrspace(1) %out, align 32