1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -instcombine -S < %s | FileCheck %s
4 define void @fixed_array16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
5 ; CHECK-LABEL: @fixed_array16i32_to_scalable4i32(
7 ; CHECK-NEXT: [[TMP:%.*]] = alloca [16 x i32], align 16
8 ; CHECK-NEXT: [[CAST:%.*]] = bitcast [16 x i32]* [[TMP]] to <vscale x 4 x i32>*
9 ; CHECK-NEXT: store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
10 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
11 ; CHECK-NEXT: store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
12 ; CHECK-NEXT: ret void
15 %tmp = alloca [16 x i32], align 16
16 %cast = bitcast [16 x i32]* %tmp to <vscale x 4 x i32>*
17 store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
18 %reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
19 store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
23 define void @scalable4i32_to_fixed16i32(<16 x i32>* %out) {
24 ; CHECK-LABEL: @scalable4i32_to_fixed16i32(
26 ; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 64
27 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 4 x i32>* [[TMP]] to <16 x i32>*
28 ; CHECK-NEXT: store <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
29 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
30 ; CHECK-NEXT: store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
31 ; CHECK-NEXT: ret void
34 %tmp = alloca <vscale x 4 x i32>, align 16
35 %cast = bitcast <vscale x 4 x i32>* %tmp to <16 x i32>*
36 store <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
37 %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
38 store <16 x i32> %reload, <16 x i32>* %out, align 16
42 define void @fixed16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
43 ; CHECK-LABEL: @fixed16i32_to_scalable4i32(
45 ; CHECK-NEXT: [[TMP:%.*]] = alloca <16 x i32>, align 16
46 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <16 x i32>* [[TMP]] to <vscale x 4 x i32>*
47 ; CHECK-NEXT: store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
48 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
49 ; CHECK-NEXT: store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
50 ; CHECK-NEXT: ret void
53 %tmp = alloca <16 x i32>, align 16
54 %cast = bitcast <16 x i32>* %tmp to <vscale x 4 x i32>*
55 store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
56 %reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
57 store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
61 define void @scalable16i32_to_fixed16i32(<16 x i32>* %out) {
62 ; CHECK-LABEL: @scalable16i32_to_fixed16i32(
64 ; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
65 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 16 x i32>* [[TMP]] to <16 x i32>*
66 ; CHECK-NEXT: store volatile <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
67 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
68 ; CHECK-NEXT: store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
69 ; CHECK-NEXT: ret void
72 %tmp = alloca <vscale x 16 x i32>, align 16
73 %cast = bitcast <vscale x 16 x i32>* %tmp to <16 x i32>*
74 store volatile <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
75 %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
76 store <16 x i32> %reload, <16 x i32>* %out, align 16
80 define void @scalable32i32_to_scalable16i32(<vscale x 16 x i32>* %out) {
81 ; CHECK-LABEL: @scalable32i32_to_scalable16i32(
83 ; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 64
84 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 32 x i32>* [[TMP]] to <vscale x 16 x i32>*
85 ; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
86 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
87 ; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
88 ; CHECK-NEXT: ret void
91 %tmp = alloca <vscale x 32 x i32>, align 16
92 %cast = bitcast <vscale x 32 x i32>* %tmp to <vscale x 16 x i32>*
93 store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
94 %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
95 store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
99 define void @scalable32i16_to_scalable16i32(<vscale x 16 x i32>* %out) {
100 ; CHECK-LABEL: @scalable32i16_to_scalable16i32(
102 ; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
103 ; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[TMP]], align 64
104 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[TMP]], align 64
105 ; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
106 ; CHECK-NEXT: ret void
109 %tmp = alloca <vscale x 32 x i16>, align 16
110 %cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
111 store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
112 %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
113 store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
117 define void @scalable32i16_to_scalable16i32_multiuse(<vscale x 16 x i32>* %out, <vscale x 32 x i16>* %out2) {
118 ; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
120 ; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
121 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <vscale x 32 x i16>* [[TMP]] to <vscale x 16 x i32>*
122 ; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
123 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
124 ; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
125 ; CHECK-NEXT: [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* [[TMP]], align 64
126 ; CHECK-NEXT: store <vscale x 32 x i16> [[RELOAD2]], <vscale x 32 x i16>* [[OUT2:%.*]], align 16
127 ; CHECK-NEXT: ret void
130 %tmp = alloca <vscale x 32 x i16>, align 16
131 %cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
132 store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
133 %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
134 store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
135 %reload2 = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* %tmp, align 16
136 store <vscale x 32 x i16> %reload2, <vscale x 32 x i16>* %out2, align 16