1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +bf16 -mvscale-min=1 -mvscale-max=1 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-128
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +bf16 -mvscale-min=2 -mvscale-max=2 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-256
4 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +bf16 -mvscale-min=4 -mvscale-max=4 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-512
6 // REQUIRES: aarch64-registered-target
10 #define N __ARM_FEATURE_SVE_BITS
12 typedef svint64_t fixed_int64_t
__attribute__((arm_sve_vector_bits(N
)));
13 typedef svfloat64_t fixed_float64_t
__attribute__((arm_sve_vector_bits(N
)));
14 typedef svbfloat16_t fixed_bfloat16_t
__attribute__((arm_sve_vector_bits(N
)));
15 typedef svbool_t fixed_bool_t
__attribute__((arm_sve_vector_bits(N
)));
17 #define DEFINE_STRUCT(ty) \
18 struct struct_##ty { \
19 fixed_##ty##_t x, y[3]; \
23 DEFINE_STRUCT(float64
)
24 DEFINE_STRUCT(bfloat16
)
27 //===----------------------------------------------------------------------===//
29 //===----------------------------------------------------------------------===//
31 // CHECK-128-LABEL: @read_int64(
32 // CHECK-128-NEXT: entry:
33 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
34 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[Y]], align 16, !tbaa [[TBAA2:![0-9]+]]
35 // CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP0]], i64 0)
36 // CHECK-128-NEXT: ret <vscale x 2 x i64> [[CAST_SCALABLE]]
38 // CHECK-256-LABEL: @read_int64(
39 // CHECK-256-NEXT: entry:
40 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
41 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr [[Y]], align 16, !tbaa [[TBAA2:![0-9]+]]
42 // CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP0]], i64 0)
43 // CHECK-256-NEXT: ret <vscale x 2 x i64> [[CAST_SCALABLE]]
45 // CHECK-512-LABEL: @read_int64(
46 // CHECK-512-NEXT: entry:
47 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 64
48 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr [[Y]], align 16, !tbaa [[TBAA2:![0-9]+]]
49 // CHECK-512-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[TMP0]], i64 0)
50 // CHECK-512-NEXT: ret <vscale x 2 x i64> [[CAST_SCALABLE]]
52 svint64_t
read_int64(struct struct_int64
*s
) {
56 // CHECK-128-LABEL: @write_int64(
57 // CHECK-128-NEXT: entry:
58 // CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
59 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
60 // CHECK-128-NEXT: store <2 x i64> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
61 // CHECK-128-NEXT: ret void
63 // CHECK-256-LABEL: @write_int64(
64 // CHECK-256-NEXT: entry:
65 // CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
66 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
67 // CHECK-256-NEXT: store <4 x i64> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
68 // CHECK-256-NEXT: ret void
70 // CHECK-512-LABEL: @write_int64(
71 // CHECK-512-NEXT: entry:
72 // CHECK-512-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
73 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 64
74 // CHECK-512-NEXT: store <8 x i64> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
75 // CHECK-512-NEXT: ret void
77 void write_int64(struct struct_int64
*s
, svint64_t x
) {
81 //===----------------------------------------------------------------------===//
83 //===----------------------------------------------------------------------===//
85 // CHECK-128-LABEL: @read_float64(
86 // CHECK-128-NEXT: entry:
87 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
88 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[Y]], align 16, !tbaa [[TBAA2]]
89 // CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP0]], i64 0)
90 // CHECK-128-NEXT: ret <vscale x 2 x double> [[CAST_SCALABLE]]
92 // CHECK-256-LABEL: @read_float64(
93 // CHECK-256-NEXT: entry:
94 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
95 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[Y]], align 16, !tbaa [[TBAA2]]
96 // CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP0]], i64 0)
97 // CHECK-256-NEXT: ret <vscale x 2 x double> [[CAST_SCALABLE]]
99 // CHECK-512-LABEL: @read_float64(
100 // CHECK-512-NEXT: entry:
101 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 64
102 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x double>, ptr [[Y]], align 16, !tbaa [[TBAA2]]
103 // CHECK-512-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[TMP0]], i64 0)
104 // CHECK-512-NEXT: ret <vscale x 2 x double> [[CAST_SCALABLE]]
106 svfloat64_t
read_float64(struct struct_float64
*s
) {
110 // CHECK-128-LABEL: @write_float64(
111 // CHECK-128-NEXT: entry:
112 // CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <2 x double> @llvm.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
113 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
114 // CHECK-128-NEXT: store <2 x double> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
115 // CHECK-128-NEXT: ret void
117 // CHECK-256-LABEL: @write_float64(
118 // CHECK-256-NEXT: entry:
119 // CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <4 x double> @llvm.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
120 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
121 // CHECK-256-NEXT: store <4 x double> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
122 // CHECK-256-NEXT: ret void
124 // CHECK-512-LABEL: @write_float64(
125 // CHECK-512-NEXT: entry:
126 // CHECK-512-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
127 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 64
128 // CHECK-512-NEXT: store <8 x double> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
129 // CHECK-512-NEXT: ret void
131 void write_float64(struct struct_float64
*s
, svfloat64_t x
) {
135 //===----------------------------------------------------------------------===//
137 //===----------------------------------------------------------------------===//
139 // CHECK-128-LABEL: @read_bfloat16(
140 // CHECK-128-NEXT: entry:
141 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
142 // CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, ptr [[Y]], align 16, !tbaa [[TBAA2]]
143 // CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP0]], i64 0)
144 // CHECK-128-NEXT: ret <vscale x 8 x bfloat> [[CAST_SCALABLE]]
146 // CHECK-256-LABEL: @read_bfloat16(
147 // CHECK-256-NEXT: entry:
148 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
149 // CHECK-256-NEXT: [[TMP0:%.*]] = load <16 x bfloat>, ptr [[Y]], align 16, !tbaa [[TBAA2]]
150 // CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v16bf16(<vscale x 8 x bfloat> undef, <16 x bfloat> [[TMP0]], i64 0)
151 // CHECK-256-NEXT: ret <vscale x 8 x bfloat> [[CAST_SCALABLE]]
153 // CHECK-512-LABEL: @read_bfloat16(
154 // CHECK-512-NEXT: entry:
155 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 64
156 // CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, ptr [[Y]], align 16, !tbaa [[TBAA2]]
157 // CHECK-512-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v32bf16(<vscale x 8 x bfloat> undef, <32 x bfloat> [[TMP0]], i64 0)
158 // CHECK-512-NEXT: ret <vscale x 8 x bfloat> [[CAST_SCALABLE]]
160 svbfloat16_t
read_bfloat16(struct struct_bfloat16
*s
) {
164 // CHECK-128-LABEL: @write_bfloat16(
165 // CHECK-128-NEXT: entry:
166 // CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
167 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
168 // CHECK-128-NEXT: store <8 x bfloat> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
169 // CHECK-128-NEXT: ret void
171 // CHECK-256-LABEL: @write_bfloat16(
172 // CHECK-256-NEXT: entry:
173 // CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
174 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
175 // CHECK-256-NEXT: store <16 x bfloat> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
176 // CHECK-256-NEXT: ret void
178 // CHECK-512-LABEL: @write_bfloat16(
179 // CHECK-512-NEXT: entry:
180 // CHECK-512-NEXT: [[CAST_FIXED:%.*]] = tail call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
181 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 64
182 // CHECK-512-NEXT: store <32 x bfloat> [[CAST_FIXED]], ptr [[Y]], align 16, !tbaa [[TBAA2]]
183 // CHECK-512-NEXT: ret void
185 void write_bfloat16(struct struct_bfloat16
*s
, svbfloat16_t x
) {
189 //===----------------------------------------------------------------------===//
191 //===----------------------------------------------------------------------===//
193 // CHECK-128-LABEL: @read_bool(
194 // CHECK-128-NEXT: entry:
195 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 2
196 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr [[Y]], align 2, !tbaa [[TBAA2]]
197 // CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> [[TMP0]], i64 0)
198 // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
199 // CHECK-128-NEXT: ret <vscale x 16 x i1> [[TMP1]]
201 // CHECK-256-LABEL: @read_bool(
202 // CHECK-256-NEXT: entry:
203 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 4
204 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[Y]], align 2, !tbaa [[TBAA2]]
205 // CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> [[TMP0]], i64 0)
206 // CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
207 // CHECK-256-NEXT: ret <vscale x 16 x i1> [[TMP1]]
209 // CHECK-512-LABEL: @read_bool(
210 // CHECK-512-NEXT: entry:
211 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
212 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[Y]], align 2, !tbaa [[TBAA2]]
213 // CHECK-512-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP0]], i64 0)
214 // CHECK-512-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
215 // CHECK-512-NEXT: ret <vscale x 16 x i1> [[TMP1]]
217 svbool_t
read_bool(struct struct_bool
*s
) {
221 // CHECK-128-LABEL: @write_bool(
222 // CHECK-128-NEXT: entry:
223 // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[X:%.*]] to <vscale x 2 x i8>
224 // CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
225 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 2
226 // CHECK-128-NEXT: store <2 x i8> [[CAST_FIXED]], ptr [[Y]], align 2, !tbaa [[TBAA2]]
227 // CHECK-128-NEXT: ret void
229 // CHECK-256-LABEL: @write_bool(
230 // CHECK-256-NEXT: entry:
231 // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[X:%.*]] to <vscale x 2 x i8>
232 // CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
233 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 4
234 // CHECK-256-NEXT: store <4 x i8> [[CAST_FIXED]], ptr [[Y]], align 2, !tbaa [[TBAA2]]
235 // CHECK-256-NEXT: ret void
237 // CHECK-512-LABEL: @write_bool(
238 // CHECK-512-NEXT: entry:
239 // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[X:%.*]] to <vscale x 2 x i8>
240 // CHECK-512-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
241 // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
242 // CHECK-512-NEXT: store <8 x i8> [[CAST_FIXED]], ptr [[Y]], align 2, !tbaa [[TBAA2]]
243 // CHECK-512-NEXT: ret void
245 void write_bool(struct struct_bool
*s
, svbool_t x
) {