1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f -verify-machineinstrs | FileCheck %s
4 define dso_local void @test_chain(ptr %A_mem, ptr %B_mem, ptr %C_mem) {
5 ; CHECK-LABEL: test_chain:
6 ; CHECK: # %bb.0: # %entry
7 ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
8 ; CHECK-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
9 ; CHECK-NEXT: movb $1, -{{[0-9]+}}(%rsp)
10 ; CHECK-NEXT: movb $16, -{{[0-9]+}}(%rsp)
11 ; CHECK-NEXT: movw $64, -{{[0-9]+}}(%rsp)
12 ; CHECK-NEXT: movb $16, -{{[0-9]+}}(%rsp)
13 ; CHECK-NEXT: movw $64, -{{[0-9]+}}(%rsp)
14 ; CHECK-NEXT: movb $16, -{{[0-9]+}}(%rsp)
15 ; CHECK-NEXT: movw $64, -{{[0-9]+}}(%rsp)
16 ; CHECK-NEXT: movb $16, -{{[0-9]+}}(%rsp)
17 ; CHECK-NEXT: movw $64, -{{[0-9]+}}(%rsp)
18 ; CHECK-NEXT: movb $16, -{{[0-9]+}}(%rsp)
19 ; CHECK-NEXT: movw $64, -{{[0-9]+}}(%rsp)
20 ; CHECK-NEXT: ldtilecfg -{{[0-9]+}}(%rsp)
21 ; CHECK-NEXT: movl $64, %eax
22 ; CHECK-NEXT: movw $64, %cx
23 ; CHECK-NEXT: movw $16, %r8w
24 ; CHECK-NEXT: tileloadd (%rdi,%rax), %tmm0
25 ; CHECK-NEXT: addq $1024, %rdi # imm = 0x400
26 ; CHECK-NEXT: tileloadd (%rdi,%rax), %tmm1
27 ; CHECK-NEXT: tileloadd (%rdx,%rax), %tmm3
28 ; CHECK-NEXT: leaq 1024(%rdx), %rdi
29 ; CHECK-NEXT: tileloadd (%rdi,%rax), %tmm2
30 ; CHECK-NEXT: tileloadd (%rsi,%rax), %tmm4
31 ; CHECK-NEXT: tdpbssd %tmm4, %tmm0, %tmm3
32 ; CHECK-NEXT: tilestored %tmm3, (%rdx,%rax)
33 ; CHECK-NEXT: tdpbssd %tmm4, %tmm1, %tmm2
34 ; CHECK-NEXT: tilestored %tmm2, (%rdi,%rax)
35 ; CHECK-NEXT: tilerelease
36 ; CHECK-NEXT: vzeroupper
39 %a1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %A_mem, i64 64)
40 %addr = getelementptr inbounds i8, ptr %A_mem, i64 1024
41 %a2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %addr, i64 64)
42 %c1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %C_mem, i64 64)
43 %caddr = getelementptr inbounds i8, ptr %C_mem, i64 1024
44 %c2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %caddr, i64 64)
48 %b = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %B_mem, i64 64)
49 %dp1 = call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %c1, x86_amx %a1, x86_amx %b)
50 call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr nonnull %C_mem, i64 64, x86_amx %dp1)
51 %dp2 = call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %c2, x86_amx %a2, x86_amx %b)
52 call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr nonnull %caddr, i64 64, x86_amx %dp2)
56 declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
57 declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
58 declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)