1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -instcombine -S < %s | FileCheck %s
3 ; RUN: opt -passes=instcombine -S < %s | FileCheck %s
5 ; Prohibit poiter cast for amx.
6 define dso_local void @test_amx_load_store(<256 x i32>* %src, i8* %dst) {
7 ; CHECK-LABEL: @test_amx_load_store(
9 ; CHECK-NEXT: [[VEC:%.*]] = load <256 x i32>, <256 x i32>* [[SRC:%.*]], align 64
10 ; CHECK-NEXT: [[BC:%.*]] = bitcast <256 x i32> [[VEC]] to x86_amx
11 ; CHECK-NEXT: tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, i8* [[DST:%.*]], i64 64, x86_amx [[BC]])
12 ; CHECK-NEXT: ret void
15 %vec = load <256 x i32>, <256 x i32>* %src, align 64
16 %bc = bitcast <256 x i32> %vec to x86_amx
17 tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, i8* %dst, i64 64, x86_amx %bc)
21 ; Prohibit poiter cast for amx.
22 define dso_local void @test_amx_load_store2(<256 x i32>* %dst, i8* %src) {
23 ; CHECK-LABEL: @test_amx_load_store2(
25 ; CHECK-NEXT: [[AMX:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, i8* [[SRC:%.*]], i64 64)
26 ; CHECK-NEXT: [[BC:%.*]] = bitcast x86_amx [[AMX]] to <256 x i32>
27 ; CHECK-NEXT: store <256 x i32> [[BC]], <256 x i32>* [[DST:%.*]], align 1024
28 ; CHECK-NEXT: ret void
31 %amx = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, i8* %src, i64 64)
32 %bc = bitcast x86_amx %amx to <256 x i32>
33 store <256 x i32> %bc, <256 x i32>* %dst
37 declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
38 declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)