1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=GFX12
4 define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negA(<4 x half> %A, <4 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
5 ; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negA:
6 ; GFX12: ; %bb.0: ; %bb
7 ; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[1,0,0] neg_hi:[1,0,0]
8 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
10 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
11 ; GFX12-NEXT: s_endpgm
13 %fneg.A = fneg <4 x half> %A
14 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %fneg.A, <4 x half> %B, <4 x float> %C)
15 store <4 x float> %res, ptr addrspace(1) %out
19 define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negB(<4 x half> %A, <4 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
20 ; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negB:
21 ; GFX12: ; %bb.0: ; %bb
22 ; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,1,0] neg_hi:[0,1,0]
23 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
25 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
26 ; GFX12-NEXT: s_endpgm
28 %fneg.B = fneg <4 x half> %B
29 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %fneg.B, <4 x float> %C)
30 store <4 x float> %res, ptr addrspace(1) %out
34 define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negC(<4 x half> %A, <4 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
35 ; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negC:
36 ; GFX12: ; %bb.0: ; %bb
37 ; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1]
38 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
40 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
41 ; GFX12-NEXT: s_endpgm
43 %fneg.C = fneg <4 x float> %C
44 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fneg.C)
45 store <4 x float> %res, ptr addrspace(1) %out
49 define amdgpu_ps void @test_wmma_f32_16x16x16_f16_absC(<4 x half> %A, <4 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
50 ; GFX12-LABEL: test_wmma_f32_16x16x16_f16_absC:
51 ; GFX12: ; %bb.0: ; %bb
52 ; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_hi:[0,0,1]
53 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
55 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
56 ; GFX12-NEXT: s_endpgm
58 %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
59 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fabs.C)
60 store <4 x float> %res, ptr addrspace(1) %out
64 define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, ptr addrspace(1) %out) {
65 ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_negC:
66 ; GFX12: ; %bb.0: ; %bb
67 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1]
68 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
70 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
71 ; GFX12-NEXT: s_endpgm
73 %fneg.C = fneg <4 x float> %C
74 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fneg.C)
75 store <4 x float> %res, ptr addrspace(1) %out
79 define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, ptr addrspace(1) %out) {
80 ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_absC:
81 ; GFX12: ; %bb.0: ; %bb
82 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_hi:[0,0,1]
83 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
85 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
86 ; GFX12-NEXT: s_endpgm
88 %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
89 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fabs.C)
90 store <4 x float> %res, ptr addrspace(1) %out
94 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negA(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr addrspace(1) %out) {
95 ; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negA:
96 ; GFX12: ; %bb.0: ; %bb
97 ; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[1,0,0] neg_hi:[1,0,0]
98 ; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
100 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
101 ; GFX12-NEXT: s_endpgm
103 %fneg.A = fneg <4 x half> %A
104 %res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %fneg.A, <4 x half> %B, <4 x half> %C, i1 0)
105 store <4 x half> %res, ptr addrspace(1) %out
109 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negB(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr addrspace(1) %out) {
110 ; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negB:
111 ; GFX12: ; %bb.0: ; %bb
112 ; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,1,0] neg_hi:[0,1,0]
113 ; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
114 ; GFX12-NEXT: s_nop 0
115 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
116 ; GFX12-NEXT: s_endpgm
118 %fneg.B = fneg <4 x half> %B
119 %res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %fneg.B, <4 x half> %C, i1 0)
120 store <4 x half> %res, ptr addrspace(1) %out
124 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negC(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr addrspace(1) %out) {
125 ; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negC:
126 ; GFX12: ; %bb.0: ; %bb
127 ; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,0,1]
128 ; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
129 ; GFX12-NEXT: s_nop 0
130 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
131 ; GFX12-NEXT: s_endpgm
133 %fneg.C = fneg <4 x half> %C
134 %res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fneg.C, i1 0)
135 store <4 x half> %res, ptr addrspace(1) %out
139 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_absC(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr addrspace(1) %out) {
140 ; GFX12-LABEL: test_wmma_f16_16x16x16_f16_absC:
141 ; GFX12: ; %bb.0: ; %bb
142 ; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_hi:[0,0,1]
143 ; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
144 ; GFX12-NEXT: s_nop 0
145 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
146 ; GFX12-NEXT: s_endpgm
148 %fabs.C = call <4 x half> @llvm.fabs.v4f16(<4 x half> %C)
149 %res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fabs.C, i1 0)
150 store <4 x half> %res, ptr addrspace(1) %out
154 define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_fp8_negC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
155 ; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_fp8_negC:
156 ; GFX12: ; %bb.0: ; %bb
157 ; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_fp8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
158 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
159 ; GFX12-NEXT: s_nop 0
160 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
161 ; GFX12-NEXT: s_endpgm
163 %fneg.C = fneg <4 x float> %C
164 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
165 store <4 x float> %res, ptr addrspace(1) %out
169 define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_fp8_absC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
170 ; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_fp8_absC:
171 ; GFX12: ; %bb.0: ; %bb
172 ; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_fp8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
173 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
174 ; GFX12-NEXT: s_nop 0
175 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
176 ; GFX12-NEXT: s_endpgm
178 %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
179 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
180 store <4 x float> %res, ptr addrspace(1) %out
184 define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_fp8_negC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
185 ; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_fp8_negC:
186 ; GFX12: ; %bb.0: ; %bb
187 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_fp8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
188 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
189 ; GFX12-NEXT: s_nop 0
190 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
191 ; GFX12-NEXT: s_endpgm
193 %fneg.C = fneg <4 x float> %C
194 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
195 store <4 x float> %res, ptr addrspace(1) %out
199 define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_fp8_absC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
200 ; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_fp8_absC:
201 ; GFX12: ; %bb.0: ; %bb
202 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_fp8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
203 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
204 ; GFX12-NEXT: s_nop 0
205 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
206 ; GFX12-NEXT: s_endpgm
208 %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
209 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
210 store <4 x float> %res, ptr addrspace(1) %out
214 define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_bf8_negC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
215 ; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_bf8_negC:
216 ; GFX12: ; %bb.0: ; %bb
217 ; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_bf8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
218 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
219 ; GFX12-NEXT: s_nop 0
220 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
221 ; GFX12-NEXT: s_endpgm
223 %fneg.C = fneg <4 x float> %C
224 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
225 store <4 x float> %res, ptr addrspace(1) %out
229 define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_bf8_absC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
230 ; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_bf8_absC:
231 ; GFX12: ; %bb.0: ; %bb
232 ; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_bf8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
233 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
234 ; GFX12-NEXT: s_nop 0
235 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
236 ; GFX12-NEXT: s_endpgm
238 %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
239 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
240 store <4 x float> %res, ptr addrspace(1) %out
244 define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_bf8_negC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
245 ; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_bf8_negC:
246 ; GFX12: ; %bb.0: ; %bb
247 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_bf8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
248 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
249 ; GFX12-NEXT: s_nop 0
250 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
251 ; GFX12-NEXT: s_endpgm
253 %fneg.C = fneg <4 x float> %C
254 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
255 store <4 x float> %res, ptr addrspace(1) %out
259 define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_bf8_absC(i32 %A, i32 %B, <4 x float> %C, ptr addrspace(1) %out) {
260 ; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_bf8_absC:
261 ; GFX12: ; %bb.0: ; %bb
262 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_bf8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
263 ; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
264 ; GFX12-NEXT: s_nop 0
265 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
266 ; GFX12-NEXT: s_endpgm
268 %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
269 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
270 store <4 x float> %res, ptr addrspace(1) %out
274 define amdgpu_ps void @test_swmmac_f32_16x16x32_f16_negA(<4 x half> %A, <8 x half> %B, <4 x float> %C, i16 %Index, ptr addrspace(1) %out) {
275 ; GFX12-LABEL: test_swmmac_f32_16x16x32_f16_negA:
276 ; GFX12: ; %bb.0: ; %bb
277 ; GFX12-NEXT: v_swmmac_f32_16x16x32_f16 v[6:9], v[0:1], v[2:5], v10 neg_lo:[1,0,0] neg_hi:[1,0,0]
278 ; GFX12-NEXT: global_store_b128 v[11:12], v[6:9], off
279 ; GFX12-NEXT: s_nop 0
280 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
281 ; GFX12-NEXT: s_endpgm
283 %fneg.A = fneg <4 x half> %A
284 %res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f16.v8f16.v4f32.i16(<4 x half> %fneg.A, <8 x half> %B, <4 x float> %C, i16 %Index)
285 store <4 x float> %res, ptr addrspace(1) %out
289 define amdgpu_ps void @test_swmmac_f32_16x16x32_f16_negB(<4 x half> %A, <8 x half> %B, <4 x float> %C, i16 %Index, ptr addrspace(1) %out) {
290 ; GFX12-LABEL: test_swmmac_f32_16x16x32_f16_negB:
291 ; GFX12: ; %bb.0: ; %bb
292 ; GFX12-NEXT: v_swmmac_f32_16x16x32_f16 v[6:9], v[0:1], v[2:5], v10 neg_lo:[0,1,0] neg_hi:[0,1,0]
293 ; GFX12-NEXT: global_store_b128 v[11:12], v[6:9], off
294 ; GFX12-NEXT: s_nop 0
295 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
296 ; GFX12-NEXT: s_endpgm
298 %fneg.B = fneg <8 x half> %B
299 %res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f16.v8f16.v4f32.i16(<4 x half> %A, <8 x half> %fneg.B, <4 x float> %C, i16 %Index)
300 store <4 x float> %res, ptr addrspace(1) %out
304 define amdgpu_ps void @test_swmmac_f16_16x16x32_f16_negA(<4 x half> %A, <8 x half> %B, <4 x half> %C, i16 %Index, ptr addrspace(1) %out) {
305 ; GFX12-LABEL: test_swmmac_f16_16x16x32_f16_negA:
306 ; GFX12: ; %bb.0: ; %bb
307 ; GFX12-NEXT: v_swmmac_f16_16x16x32_f16 v[6:7], v[0:1], v[2:5], v8 neg_lo:[1,0,0] neg_hi:[1,0,0]
308 ; GFX12-NEXT: global_store_b64 v[9:10], v[6:7], off
309 ; GFX12-NEXT: s_nop 0
310 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
311 ; GFX12-NEXT: s_endpgm
313 %fneg.A = fneg <4 x half> %A
314 %res = call <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v8f16.v4f16.i16(<4 x half> %fneg.A, <8 x half> %B, <4 x half> %C, i16 %Index)
315 store <4 x half> %res, ptr addrspace(1) %out
319 define amdgpu_ps void @test_swmmac_f16_16x16x32_f16_negB(<4 x half> %A, <8 x half> %B, <4 x half> %C, i16 %Index, ptr addrspace(1) %out) {
320 ; GFX12-LABEL: test_swmmac_f16_16x16x32_f16_negB:
321 ; GFX12: ; %bb.0: ; %bb
322 ; GFX12-NEXT: v_swmmac_f16_16x16x32_f16 v[6:7], v[0:1], v[2:5], v8 neg_lo:[0,1,0] neg_hi:[0,1,0]
323 ; GFX12-NEXT: global_store_b64 v[9:10], v[6:7], off
324 ; GFX12-NEXT: s_nop 0
325 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
326 ; GFX12-NEXT: s_endpgm
328 %fneg.B = fneg <8 x half> %B
329 %res = call <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v8f16.v4f16.i16(<4 x half> %A, <8 x half> %fneg.B, <4 x half> %C, i16 %Index)
330 store <4 x half> %res, ptr addrspace(1) %out
334 ; both neg and abs patterns (wmma matrix C f32 or f16 )
336 define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negabsC(<4 x half> %A, <4 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
337 ; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negabsC:
338 ; GFX12: ; %bb.0: ; %bb
339 ; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1] neg_hi:[0,0,1]
340 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
341 ; GFX12-NEXT: s_nop 0
342 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
343 ; GFX12-NEXT: s_endpgm
345 %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
346 %fneg.fabs.C = fneg <4 x float> %fabs.C
347 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fneg.fabs.C)
348 store <4 x float> %res, ptr addrspace(1) %out
352 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negabsC(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr addrspace(1) %out) {
353 ; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negabsC:
354 ; GFX12: ; %bb.0: ; %bb
355 ; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,0,1] neg_hi:[0,0,1]
356 ; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
357 ; GFX12-NEXT: s_nop 0
358 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
359 ; GFX12-NEXT: s_endpgm
361 %fabs.C = call <4 x half> @llvm.fabs.v4f16(<4 x half> %C)
362 %fneg.fabs.C = fneg <4 x half> %fabs.C
363 %res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fneg.fabs.C, i1 0)
364 store <4 x half> %res, ptr addrspace(1) %out
368 define amdgpu_ps void @test_wmma_f32_16x16x16_f16_neg_partial_fabsA(<4 x half> %A, <4 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
369 ; GFX12-LABEL: test_wmma_f32_16x16x16_f16_neg_partial_fabsA:
370 ; GFX12: ; %bb.0: ; %bb
371 ; GFX12-NEXT: v_and_b32_e32 v7, 0x7fffffff, v7
372 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
373 ; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1]
374 ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
375 ; GFX12-NEXT: s_nop 0
376 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
377 ; GFX12-NEXT: s_endpgm
379 %el3 = extractelement <4 x float> %C, i32 3
380 %el3.fabs = call float @llvm.fabs.f32(float %el3)
381 %partial.fabs.C = insertelement <4 x float> %C, float %el3.fabs, i32 3
382 %fneg.partial.fabs.C = fneg <4 x float> %partial.fabs.C
383 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fneg.partial.fabs.C)
384 store <4 x float> %res, ptr addrspace(1) %out
388 ; A or B matrix modifier and constant in C
390 define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negA_constantC(<4 x half> %A, <4 x half> %B, <4 x float> %C, ptr addrspace(1) %out) {
391 ; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negA_constantC:
392 ; GFX12: ; %bb.0: ; %bb
393 ; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[6:9], v[0:1], v[2:3], 1.0 neg_lo:[1,0,0] neg_hi:[1,0,0]
394 ; GFX12-NEXT: global_store_b128 v[4:5], v[6:9], off
395 ; GFX12-NEXT: s_nop 0
396 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
397 ; GFX12-NEXT: s_endpgm
399 %fneg.A = fneg <4 x half> %A
400 %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %fneg.A, <4 x half> %B, <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>)
401 store <4 x float> %res, ptr addrspace(1) %out
405 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negB_constantC(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr addrspace(1) %out) {
406 ; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negB_constantC:
407 ; GFX12: ; %bb.0: ; %bb
408 ; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[6:7], v[0:1], v[2:3], 1.0 neg_lo:[0,1,0] neg_hi:[0,1,0]
409 ; GFX12-NEXT: global_store_b64 v[4:5], v[6:7], off
410 ; GFX12-NEXT: s_nop 0
411 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
412 ; GFX12-NEXT: s_endpgm
414 %fneg.B = fneg <4 x half> %B
415 %res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %fneg.B, <4 x half> <half 1.0, half 1.0, half 1.0, half 1.0>, i1 0)
416 store <4 x half> %res, ptr addrspace(1) %out
420 ; pack f16 elements with v_perm_b32 since they don't come from same b32
422 define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negC_pack(<4 x half> %A, <4 x half> %B, ptr %Caddr, ptr addrspace(1) %out) {
423 ; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negC_pack:
424 ; GFX12: ; %bb.0: ; %bb
425 ; GFX12-NEXT: flat_load_b128 v[8:11], v[4:5]
426 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
427 ; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v8
428 ; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v10
429 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
430 ; GFX12-NEXT: v_lshl_or_b32 v4, v9, 16, v4
431 ; GFX12-NEXT: v_lshl_or_b32 v5, v11, 16, v5
432 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
433 ; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,0,1]
434 ; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
435 ; GFX12-NEXT: s_nop 0
436 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
437 ; GFX12-NEXT: s_endpgm
439 %C = load <8 x half>, ptr %Caddr
440 %C_shuffle = shufflevector <8 x half> %C, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
441 %fneg.C_shuffle = fneg <4 x half> %C_shuffle
442 %res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fneg.C_shuffle , i1 0)
443 store <4 x half> %res, ptr addrspace(1) %out
447 declare <4 x half> @llvm.fabs.v4f16(<4 x half>)
448 declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
449 declare float @llvm.fabs.f32(float)
451 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>)
452 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>)
453 declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg)
454 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32, i32, <4 x float>)
455 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32, i32, <4 x float>)
456 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.i32.v4f32(i32, i32, <4 x float>)
457 declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.i32.v4f32(i32, i32, <4 x float>)
458 declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f16.v8f16.v4f32.i16(<4 x half>, <8 x half>, <4 x float>, i16)
459 declare <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v8f16.v4f16.i16(<4 x half>, <8 x half>, <4 x half>, i16)