1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=W64
4 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16(<16 x half>, <16 x half>, <4 x float>)
5 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16(<16 x i16>, <16 x i16>, <4 x float>)
6 declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half>, <16 x half>, <8 x half>, i1 immarg)
7 declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.tied(<16 x half>, <16 x half>, <8 x half>, i1 immarg)
8 declare <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16>, <16 x i16>, <8 x i16>, i1 immarg)
9 declare <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied(<16 x i16>, <16 x i16>, <8 x i16>, i1 immarg)
10 declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 immarg, <4 x i32>, i1 immarg, <4 x i32>, <4 x i32>, i1 immarg)
11 declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <4 x i32>, i1 immarg)
13 ; @llvm.amdgcn.wmma.f32.16x16x16.f16
15 define amdgpu_ps void @test_wmma_f32_16x16x16_f16(<16 x half> %A, <16 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
16 ; W64-LABEL: test_wmma_f32_16x16x16_f16:
18 ; W64-NEXT: v_wmma_f32_16x16x16_f16 v[16:19], v[0:7], v[8:15], v[16:19]
19 ; W64-NEXT: global_store_b128 v[20:21], v[16:19], off
21 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
24 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16(<16 x half> %A, <16 x half> %B, <4 x float> %C)
25 store <4 x float> %res, ptr addrspace(1) %out, align 16
29 ; @llvm.amdgcn.wmma.f32.16x16x16.bf16
31 define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<16 x i16> %A, <16 x i16> %B, <4 x float> %C, ptr addrspace(1) %out) {
32 ; W64-LABEL: test_wmma_f32_16x16x16_bf16:
34 ; W64-NEXT: v_wmma_f32_16x16x16_bf16 v[16:19], v[0:7], v[8:15], v[16:19]
35 ; W64-NEXT: global_store_b128 v[20:21], v[16:19], off
37 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
40 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16(<16 x i16> %A, <16 x i16> %B, <4 x float> %C)
41 store <4 x float> %res, ptr addrspace(1) %out, align 16
45 ; @llvm.amdgcn.wmma.f16.16x16x16.f16
47 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_lo(<16 x half> %A, <16 x half> %B, <8 x half> %C, ptr addrspace(1) %out) {
48 ; W64-LABEL: test_wmma_f16_16x16x16_f16_lo:
50 ; W64-NEXT: v_wmma_f16_16x16x16_f16 v[16:19], v[0:7], v[8:15], v[16:19]
51 ; W64-NEXT: global_store_b128 v[20:21], v[16:19], off
53 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
56 %res = call <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A, <16 x half> %B, <8 x half> %C, i1 0)
57 store <8 x half> %res, ptr addrspace(1) %out, align 16
61 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_hi(<16 x half> %A, <16 x half> %B, <8 x half> %C, ptr addrspace(1) %out) {
62 ; W64-LABEL: test_wmma_f16_16x16x16_f16_hi:
64 ; W64-NEXT: v_wmma_f16_16x16x16_f16 v[16:19], v[0:7], v[8:15], v[16:19] op_sel:[0,0,1]
65 ; W64-NEXT: global_store_b128 v[20:21], v[16:19], off
67 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
70 %res = call <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A, <16 x half> %B, <8 x half> %C, i1 1)
71 store <8 x half> %res, ptr addrspace(1) %out, align 16
75 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_untied(<16 x half> %A.0, <16 x half> %B.0, <16 x half> %A.1, <16 x half> %B.1, <8 x half> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
76 ; W64-LABEL: test_wmma_f16_16x16x16_f16_untied:
78 ; W64-NEXT: v_wmma_f16_16x16x16_f16 v[40:43], v[0:7], v[8:15], v[32:35]
79 ; W64-NEXT: v_wmma_f16_16x16x16_f16 v[32:35], v[16:23], v[24:31], v[32:35]
80 ; W64-NEXT: global_store_b128 v[36:37], v[40:43], off
81 ; W64-NEXT: global_store_b128 v[38:39], v[32:35], off
83 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
86 %res.0 = call <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A.0, <16 x half> %B.0, <8 x half> %C, i1 0)
87 %res.1 = call <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %A.1, <16 x half> %B.1, <8 x half> %C, i1 0)
88 store <8 x half> %res.0, ptr addrspace(1) %out.0, align 32
89 store <8 x half> %res.1, ptr addrspace(1) %out.1, align 32
93 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_tied(<16 x half> %A.0, <16 x half> %B.0, <16 x half> %A.1, <16 x half> %B.1, <8 x half> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
94 ; W64-LABEL: test_wmma_f16_16x16x16_f16_tied:
96 ; W64-NEXT: v_mov_b32_e32 v43, v35
97 ; W64-NEXT: v_mov_b32_e32 v42, v34
98 ; W64-NEXT: v_mov_b32_e32 v41, v33
99 ; W64-NEXT: v_mov_b32_e32 v40, v32
100 ; W64-NEXT: v_wmma_f16_16x16x16_f16 v[32:35], v[16:23], v[24:31], v[32:35]
101 ; W64-NEXT: s_delay_alu instid0(VALU_DEP_2)
102 ; W64-NEXT: v_wmma_f16_16x16x16_f16 v[40:43], v[0:7], v[8:15], v[40:43]
103 ; W64-NEXT: global_store_b128 v[36:37], v[40:43], off
104 ; W64-NEXT: global_store_b128 v[38:39], v[32:35], off
106 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
109 %res.0 = call <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.tied(<16 x half> %A.0, <16 x half> %B.0, <8 x half> %C, i1 0)
110 %res.1 = call <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.tied(<16 x half> %A.1, <16 x half> %B.1, <8 x half> %C, i1 0)
111 store <8 x half> %res.0, ptr addrspace(1) %out.0, align 32
112 store <8 x half> %res.1, ptr addrspace(1) %out.1, align 32
116 ; @llvm.amdgcn.wmma.bf16.16x16x16.bf16
118 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_lo(<16 x i16> %A, <16 x i16> %B, <8 x i16> %C, ptr addrspace(1) %out) {
119 ; W64-LABEL: test_wmma_bf16_16x16x16_bf16_lo:
120 ; W64: ; %bb.0: ; %bb
121 ; W64-NEXT: v_wmma_bf16_16x16x16_bf16 v[16:19], v[0:7], v[8:15], v[16:19]
122 ; W64-NEXT: global_store_b128 v[20:21], v[16:19], off
124 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
127 %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A, <16 x i16> %B, <8 x i16> %C, i1 0)
128 store <8 x i16> %res, ptr addrspace(1) %out, align 16
132 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_hi(<16 x i16> %A, <16 x i16> %B, <8 x i16> %C, ptr addrspace(1) %out) {
133 ; W64-LABEL: test_wmma_bf16_16x16x16_bf16_hi:
134 ; W64: ; %bb.0: ; %bb
135 ; W64-NEXT: v_wmma_bf16_16x16x16_bf16 v[16:19], v[0:7], v[8:15], v[16:19] op_sel:[0,0,1]
136 ; W64-NEXT: global_store_b128 v[20:21], v[16:19], off
138 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
141 %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A, <16 x i16> %B, <8 x i16> %C, i1 1)
142 store <8 x i16> %res, ptr addrspace(1) %out, align 16
146 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_untied(<16 x i16> %A.0, <16 x i16> %B.0, <16 x i16> %A.1, <16 x i16> %B.1, <8 x i16> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
147 ; W64-LABEL: test_wmma_bf16_16x16x16_bf16_untied:
148 ; W64: ; %bb.0: ; %bb
149 ; W64-NEXT: v_wmma_bf16_16x16x16_bf16 v[40:43], v[0:7], v[8:15], v[32:35]
150 ; W64-NEXT: v_wmma_bf16_16x16x16_bf16 v[32:35], v[16:23], v[24:31], v[32:35]
151 ; W64-NEXT: global_store_b128 v[36:37], v[40:43], off
152 ; W64-NEXT: global_store_b128 v[38:39], v[32:35], off
154 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
157 %res.0 = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A.0, <16 x i16> %B.0, <8 x i16> %C, i1 0)
158 %res.1 = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16(<16 x i16> %A.1, <16 x i16> %B.1, <8 x i16> %C, i1 0)
159 store <8 x i16> %res.0, ptr addrspace(1) %out.0, align 32
160 store <8 x i16> %res.1, ptr addrspace(1) %out.1, align 32
164 define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_tied(<16 x i16> %A.0, <16 x i16> %B.0, <16 x i16> %A.1, <16 x i16> %B.1, <8 x i16> %C, ptr addrspace(1) %out.0, ptr addrspace(1) %out.1) {
165 ; W64-LABEL: test_wmma_bf16_16x16x16_bf16_tied:
166 ; W64: ; %bb.0: ; %bb
167 ; W64-NEXT: v_mov_b32_e32 v43, v35
168 ; W64-NEXT: v_mov_b32_e32 v42, v34
169 ; W64-NEXT: v_mov_b32_e32 v41, v33
170 ; W64-NEXT: v_mov_b32_e32 v40, v32
171 ; W64-NEXT: v_wmma_bf16_16x16x16_bf16 v[32:35], v[16:23], v[24:31], v[32:35]
172 ; W64-NEXT: s_delay_alu instid0(VALU_DEP_2)
173 ; W64-NEXT: v_wmma_bf16_16x16x16_bf16 v[40:43], v[0:7], v[8:15], v[40:43]
174 ; W64-NEXT: global_store_b128 v[36:37], v[40:43], off
175 ; W64-NEXT: global_store_b128 v[38:39], v[32:35], off
177 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
180 %res.0 = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied(<16 x i16> %A.0, <16 x i16> %B.0, <8 x i16> %C, i1 0)
181 %res.1 = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied(<16 x i16> %A.1, <16 x i16> %B.1, <8 x i16> %C, i1 0)
182 store <8 x i16> %res.0, ptr addrspace(1) %out.0, align 32
183 store <8 x i16> %res.1, ptr addrspace(1) %out.1, align 32
187 ; @llvm.amdgcn.wmma.i32.16x16x16.iu8
189 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_unsigned(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
190 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_unsigned:
191 ; W64: ; %bb.0: ; %bb
192 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11]
193 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
195 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
198 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 0, <4 x i32> %B, <4 x i32> %C, i1 0)
199 store <4 x i32> %res, ptr addrspace(1) %out, align 16
204 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_signed(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
205 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_signed:
206 ; W64: ; %bb.0: ; %bb
207 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11] neg_lo:[0,1,0]
208 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
210 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
213 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 1, <4 x i32> %B, <4 x i32> %C, i1 0)
214 store <4 x i32> %res, ptr addrspace(1) %out, align 16
218 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_unsigned(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
219 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_signed_unsigned:
220 ; W64: ; %bb.0: ; %bb
221 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11] neg_lo:[1,0,0]
222 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
224 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
227 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 0, <4 x i32> %B, <4 x i32> %C, i1 0)
228 store <4 x i32> %res, ptr addrspace(1) %out, align 16
232 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_signed(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
233 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_signed_signed:
234 ; W64: ; %bb.0: ; %bb
235 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11] neg_lo:[1,1,0]
236 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
238 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
241 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 1, <4 x i32> %B, <4 x i32> %C, i1 0)
242 store <4 x i32> %res, ptr addrspace(1) %out, align 16
246 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_unsigned_clamp(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
247 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_unsigned_clamp:
248 ; W64: ; %bb.0: ; %bb
249 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11] clamp
250 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
252 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
255 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 0, <4 x i32> %B, <4 x i32> %C, i1 1)
256 store <4 x i32> %res, ptr addrspace(1) %out, align 16
260 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_unsigned_signed_clamp(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
261 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_unsigned_signed_clamp:
262 ; W64: ; %bb.0: ; %bb
263 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11] neg_lo:[0,1,0] clamp
264 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
266 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
269 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 0, <4 x i32> %A, i1 1, <4 x i32> %B, <4 x i32> %C, i1 1)
270 store <4 x i32> %res, ptr addrspace(1) %out, align 16
274 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_unsigned_clamp(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
275 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_signed_unsigned_clamp:
276 ; W64: ; %bb.0: ; %bb
277 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11] neg_lo:[1,0,0] clamp
278 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
280 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
283 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 0, <4 x i32> %B, <4 x i32> %C, i1 1)
284 store <4 x i32> %res, ptr addrspace(1) %out, align 16
288 define amdgpu_ps void @test_wmma_i32_16x16x16_ui8_signed_signed_clamp(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
289 ; W64-LABEL: test_wmma_i32_16x16x16_ui8_signed_signed_clamp:
290 ; W64: ; %bb.0: ; %bb
291 ; W64-NEXT: v_wmma_i32_16x16x16_iu8 v[8:11], v[0:3], v[4:7], v[8:11] neg_lo:[1,1,0] clamp
292 ; W64-NEXT: global_store_b128 v[12:13], v[8:11], off
294 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
297 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8(i1 1, <4 x i32> %A, i1 1, <4 x i32> %B, <4 x i32> %C, i1 1)
298 store <4 x i32> %res, ptr addrspace(1) %out, align 16
302 ; @llvm.amdgcn.wmma.i32.16x16x16.iu4
304 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_unsigned(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
305 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_unsigned:
306 ; W64: ; %bb.0: ; %bb
307 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7]
308 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
310 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
313 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 0, <2 x i32> %B, <4 x i32> %C, i1 0)
314 store <4 x i32> %res, ptr addrspace(1) %out, align 16
318 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_signed(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
319 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_signed:
320 ; W64: ; %bb.0: ; %bb
321 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,1,0]
322 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
324 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
327 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 1, <2 x i32> %B, <4 x i32> %C, i1 0)
328 store <4 x i32> %res, ptr addrspace(1) %out, align 16
332 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_unsigned(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
333 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_signed_unsigned:
334 ; W64: ; %bb.0: ; %bb
335 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[1,0,0]
336 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
338 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
341 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 0, <2 x i32> %B, <4 x i32> %C, i1 0)
342 store <4 x i32> %res, ptr addrspace(1) %out, align 16
346 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_signed(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
347 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_signed_signed:
348 ; W64: ; %bb.0: ; %bb
349 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[1,1,0]
350 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
352 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
355 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 1, <2 x i32> %B, <4 x i32> %C, i1 0)
356 store <4 x i32> %res, ptr addrspace(1) %out, align 16
360 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_unsigned_clamp(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
361 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_unsigned_clamp:
362 ; W64: ; %bb.0: ; %bb
363 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7] clamp
364 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
366 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
369 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 0, <2 x i32> %B, <4 x i32> %C, i1 1)
370 store <4 x i32> %res, ptr addrspace(1) %out, align 16
374 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_unsigned_signed_clamp(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
375 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_unsigned_signed_clamp:
376 ; W64: ; %bb.0: ; %bb
377 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,1,0] clamp
378 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
380 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
383 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 0, <2 x i32> %A, i1 1, <2 x i32> %B, <4 x i32> %C, i1 1)
384 store <4 x i32> %res, ptr addrspace(1) %out, align 16
388 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_unsigned_clamp(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
389 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_signed_unsigned_clamp:
390 ; W64: ; %bb.0: ; %bb
391 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[1,0,0] clamp
392 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
394 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
397 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 0, <2 x i32> %B, <4 x i32> %C, i1 1)
398 store <4 x i32> %res, ptr addrspace(1) %out, align 16
402 define amdgpu_ps void @test_wmma_i32_16x16x16_ui4_signed_signed_clamp(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C, ptr addrspace(1) %out) {
403 ; W64-LABEL: test_wmma_i32_16x16x16_ui4_signed_signed_clamp:
404 ; W64: ; %bb.0: ; %bb
405 ; W64-NEXT: v_wmma_i32_16x16x16_iu4 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[1,1,0] clamp
406 ; W64-NEXT: global_store_b128 v[8:9], v[4:7], off
408 ; W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
411 %res = call <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4(i1 1, <2 x i32> %A, i1 1, <2 x i32> %B, <4 x i32> %C, i1 1)
412 store <4 x i32> %res, ptr addrspace(1) %out, align 16