1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -lower-matrix-intrinsics -S < %s | FileCheck %s
3 ; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s
5 ; Currently we only lower stores with shape information, but need to embed the
6 ; matrix in a flat vector for function calls and returns.
7 define <8 x double> @strided_load_4x4(<8 x double> %in, <8 x double>* %Ptr) {
8 ; CHECK-LABEL: @strided_load_4x4(
9 ; CHECK-NEXT: [[SPLIT:%.*]] = shufflevector <8 x double> [[IN:%.*]], <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
10 ; CHECK-NEXT: [[SPLIT1:%.*]] = shufflevector <8 x double> [[IN]], <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
11 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x double> [[SPLIT]], i64 0
12 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[TMP1]], i64 0
13 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 0
14 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP2]], double [[TMP3]], i64 1
15 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x double> [[SPLIT]], i64 1
16 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> undef, double [[TMP5]], i64 0
17 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 1
18 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP6]], double [[TMP7]], i64 1
19 ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x double> [[SPLIT]], i64 2
20 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> undef, double [[TMP9]], i64 0
21 ; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 2
22 ; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP10]], double [[TMP11]], i64 1
23 ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x double> [[SPLIT]], i64 3
24 ; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x double> undef, double [[TMP13]], i64 0
25 ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 3
26 ; CHECK-NEXT: [[TMP16:%.*]] = insertelement <2 x double> [[TMP14]], double [[TMP15]], i64 1
27 ; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> [[TMP8]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
28 ; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <2 x double> [[TMP12]], <2 x double> [[TMP16]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
29 ; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <4 x double> [[TMP17]], <4 x double> [[TMP18]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
30 ; CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x double>* [[PTR:%.*]] to double*
31 ; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP20]] to <2 x double>*
32 ; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[VEC_CAST]], align 8
33 ; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP20]], i64 2
34 ; CHECK-NEXT: [[VEC_CAST2:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
35 ; CHECK-NEXT: store <2 x double> [[TMP8]], <2 x double>* [[VEC_CAST2]], align 8
36 ; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, double* [[TMP20]], i64 4
37 ; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[VEC_GEP3]] to <2 x double>*
38 ; CHECK-NEXT: store <2 x double> [[TMP12]], <2 x double>* [[VEC_CAST4]], align 8
39 ; CHECK-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, double* [[TMP20]], i64 6
40 ; CHECK-NEXT: [[VEC_CAST6:%.*]] = bitcast double* [[VEC_GEP5]] to <2 x double>*
41 ; CHECK-NEXT: store <2 x double> [[TMP16]], <2 x double>* [[VEC_CAST6]], align 8
42 ; CHECK-NEXT: call void @foo(<8 x double> [[TMP19]])
43 ; CHECK-NEXT: ret <8 x double> [[TMP19]]
45 %transposed = call <8 x double> @llvm.matrix.transpose(<8 x double> %in, i32 4, i32 2)
46 store <8 x double> %transposed, <8 x double>* %Ptr, align 8
47 call void @foo(<8 x double> %transposed)
48 ret <8 x double> %transposed
51 declare <8 x double> @llvm.matrix.transpose(<8 x double>, i32, i32)
53 declare void @foo(<8 x double>)