1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s
4 ; Currently we only lower stores with shape information, but need to embed the
5 ; matrix in a flat vector for function calls and returns.
6 define <8 x double> @strided_load_4x4(<8 x double> %in, <8 x double>* %Ptr) {
7 ; CHECK-LABEL: @strided_load_4x4(
8 ; CHECK-NEXT: [[SPLIT:%.*]] = shufflevector <8 x double> [[IN:%.*]], <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
9 ; CHECK-NEXT: [[SPLIT1:%.*]] = shufflevector <8 x double> [[IN]], <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
10 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x double> [[SPLIT]], i64 0
11 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[TMP1]], i64 0
12 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 0
13 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP2]], double [[TMP3]], i64 1
14 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x double> [[SPLIT]], i64 1
15 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> undef, double [[TMP5]], i64 0
16 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 1
17 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP6]], double [[TMP7]], i64 1
18 ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x double> [[SPLIT]], i64 2
19 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> undef, double [[TMP9]], i64 0
20 ; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 2
21 ; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP10]], double [[TMP11]], i64 1
22 ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x double> [[SPLIT]], i64 3
23 ; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x double> undef, double [[TMP13]], i64 0
24 ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 3
25 ; CHECK-NEXT: [[TMP16:%.*]] = insertelement <2 x double> [[TMP14]], double [[TMP15]], i64 1
26 ; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> [[TMP8]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
27 ; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <2 x double> [[TMP12]], <2 x double> [[TMP16]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
28 ; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <4 x double> [[TMP17]], <4 x double> [[TMP18]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
29 ; CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x double>* [[PTR:%.*]] to double*
30 ; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP20]] to <2 x double>*
31 ; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[VEC_CAST]], align 8
32 ; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP20]], i64 2
33 ; CHECK-NEXT: [[VEC_CAST2:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
34 ; CHECK-NEXT: store <2 x double> [[TMP8]], <2 x double>* [[VEC_CAST2]], align 8
35 ; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, double* [[TMP20]], i64 4
36 ; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[VEC_GEP3]] to <2 x double>*
37 ; CHECK-NEXT: store <2 x double> [[TMP12]], <2 x double>* [[VEC_CAST4]], align 8
38 ; CHECK-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, double* [[TMP20]], i64 6
39 ; CHECK-NEXT: [[VEC_CAST6:%.*]] = bitcast double* [[VEC_GEP5]] to <2 x double>*
40 ; CHECK-NEXT: store <2 x double> [[TMP16]], <2 x double>* [[VEC_CAST6]], align 8
41 ; CHECK-NEXT: call void @foo(<8 x double> [[TMP19]])
42 ; CHECK-NEXT: ret <8 x double> [[TMP19]]
44 %transposed = call <8 x double> @llvm.matrix.transpose(<8 x double> %in, i32 4, i32 2)
45 store <8 x double> %transposed, <8 x double>* %Ptr, align 8
46 call void @foo(<8 x double> %transposed)
47 ret <8 x double> %transposed
50 declare <8 x double> @llvm.matrix.transpose(<8 x double>, i32, i32)
52 declare void @foo(<8 x double>)