1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=1 -mvscale-max=1 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-64
3 // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=2 -mvscale-max=2 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-128
4 // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=4 -mvscale-max=4 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-256
6 // REQUIRES: riscv-registered-target
10 typedef __rvv_int8m1_t vint8m1_t
;
11 typedef __rvv_uint8m1_t vuint8m1_t
;
12 typedef __rvv_int16m1_t vint16m1_t
;
13 typedef __rvv_uint16m1_t vuint16m1_t
;
14 typedef __rvv_int32m1_t vint32m1_t
;
15 typedef __rvv_uint32m1_t vuint32m1_t
;
16 typedef __rvv_int64m1_t vint64m1_t
;
17 typedef __rvv_uint64m1_t vuint64m1_t
;
18 typedef __rvv_float32m1_t vfloat32m1_t
;
19 typedef __rvv_float64m1_t vfloat64m1_t
;
21 typedef __rvv_bool1_t vbool1_t
;
22 typedef __rvv_bool2_t vbool2_t
;
23 typedef __rvv_bool4_t vbool4_t
;
24 typedef __rvv_bool8_t vbool8_t
;
25 typedef __rvv_bool16_t vbool16_t
;
26 typedef __rvv_bool32_t vbool32_t
;
27 typedef __rvv_bool64_t vbool64_t
;
29 typedef vint64m1_t fixed_int64m1_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
)));
30 typedef vfloat64m1_t fixed_float64m1_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
)));
31 typedef vbool1_t fixed_bool1_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
)));
32 typedef vbool2_t fixed_bool2_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/ 2)));
33 typedef vbool4_t fixed_bool4_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/ 4)));
34 typedef vbool8_t fixed_bool8_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/ 8)));
35 #if __riscv_v_fixed_vlen >= 128
36 typedef vbool16_t fixed_bool16_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/ 16)));
38 #if __riscv_v_fixed_vlen >= 256
39 typedef vbool32_t fixed_bool32_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/ 32)));
41 #if __riscv_v_fixed_vlen >= 512
42 typedef vbool64_t fixed_bool64_t
__attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen
/ 64)));
45 #define DEFINE_STRUCT(ty) \
46 struct struct_##ty { \
47 fixed_##ty##_t x, y[3]; \
50 DEFINE_STRUCT(int64m1
)
51 DEFINE_STRUCT(float64m1
)
56 #if __riscv_v_fixed_vlen >= 128
59 #if __riscv_v_fixed_vlen >= 256
62 #if __riscv_v_fixed_vlen >= 512
66 //===----------------------------------------------------------------------===//
68 //===----------------------------------------------------------------------===//
70 // CHECK-64-LABEL: @read_int64m1(
71 // CHECK-64-NEXT: entry:
72 // CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
73 // CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr [[Y]], align 8, !tbaa [[TBAA6:![0-9]+]]
74 // CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v1i64(<vscale x 1 x i64> poison, <1 x i64> [[TMP0]], i64 0)
75 // CHECK-64-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
77 // CHECK-128-LABEL: @read_int64m1(
78 // CHECK-128-NEXT: entry:
79 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
80 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[Y]], align 8, !tbaa [[TBAA6:![0-9]+]]
81 // CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v2i64(<vscale x 1 x i64> poison, <2 x i64> [[TMP0]], i64 0)
82 // CHECK-128-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
84 // CHECK-256-LABEL: @read_int64m1(
85 // CHECK-256-NEXT: entry:
86 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
87 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr [[Y]], align 8, !tbaa [[TBAA6:![0-9]+]]
88 // CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[TMP0]], i64 0)
89 // CHECK-256-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
91 vint64m1_t
read_int64m1(struct struct_int64m1
*s
) {
95 // CHECK-64-LABEL: @write_int64m1(
96 // CHECK-64-NEXT: entry:
97 // CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <1 x i64> @llvm.vector.extract.v1i64.nxv1i64(<vscale x 1 x i64> [[X:%.*]], i64 0)
98 // CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
99 // CHECK-64-NEXT: store <1 x i64> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
100 // CHECK-64-NEXT: ret void
102 // CHECK-128-LABEL: @write_int64m1(
103 // CHECK-128-NEXT: entry:
104 // CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <2 x i64> @llvm.vector.extract.v2i64.nxv1i64(<vscale x 1 x i64> [[X:%.*]], i64 0)
105 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
106 // CHECK-128-NEXT: store <2 x i64> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
107 // CHECK-128-NEXT: ret void
109 // CHECK-256-LABEL: @write_int64m1(
110 // CHECK-256-NEXT: entry:
111 // CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[X:%.*]], i64 0)
112 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
113 // CHECK-256-NEXT: store <4 x i64> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
114 // CHECK-256-NEXT: ret void
116 void write_int64m1(struct struct_int64m1
*s
, vint64m1_t x
) {
120 //===----------------------------------------------------------------------===//
122 //===----------------------------------------------------------------------===//
124 // CHECK-64-LABEL: @read_float64m1(
125 // CHECK-64-NEXT: entry:
126 // CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
127 // CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x double>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
128 // CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v1f64(<vscale x 1 x double> poison, <1 x double> [[TMP0]], i64 0)
129 // CHECK-64-NEXT: ret <vscale x 1 x double> [[CAST_SCALABLE]]
131 // CHECK-128-LABEL: @read_float64m1(
132 // CHECK-128-NEXT: entry:
133 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
134 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
135 // CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v2f64(<vscale x 1 x double> poison, <2 x double> [[TMP0]], i64 0)
136 // CHECK-128-NEXT: ret <vscale x 1 x double> [[CAST_SCALABLE]]
138 // CHECK-256-LABEL: @read_float64m1(
139 // CHECK-256-NEXT: entry:
140 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
141 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
142 // CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> poison, <4 x double> [[TMP0]], i64 0)
143 // CHECK-256-NEXT: ret <vscale x 1 x double> [[CAST_SCALABLE]]
145 vfloat64m1_t
read_float64m1(struct struct_float64m1
*s
) {
149 // CHECK-64-LABEL: @write_float64m1(
150 // CHECK-64-NEXT: entry:
151 // CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <1 x double> @llvm.vector.extract.v1f64.nxv1f64(<vscale x 1 x double> [[X:%.*]], i64 0)
152 // CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
153 // CHECK-64-NEXT: store <1 x double> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
154 // CHECK-64-NEXT: ret void
156 // CHECK-128-LABEL: @write_float64m1(
157 // CHECK-128-NEXT: entry:
158 // CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <2 x double> @llvm.vector.extract.v2f64.nxv1f64(<vscale x 1 x double> [[X:%.*]], i64 0)
159 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
160 // CHECK-128-NEXT: store <2 x double> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
161 // CHECK-128-NEXT: ret void
163 // CHECK-256-LABEL: @write_float64m1(
164 // CHECK-256-NEXT: entry:
165 // CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[X:%.*]], i64 0)
166 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
167 // CHECK-256-NEXT: store <4 x double> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
168 // CHECK-256-NEXT: ret void
170 void write_float64m1(struct struct_float64m1
*s
, vfloat64m1_t x
) {
174 //===----------------------------------------------------------------------===//
176 //===----------------------------------------------------------------------===//
178 // CHECK-64-LABEL: @read_bool1(
179 // CHECK-64-NEXT: entry:
180 // CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
181 // CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
182 // CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> poison, <8 x i8> [[TMP0]], i64 0)
183 // CHECK-64-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
184 // CHECK-64-NEXT: ret <vscale x 64 x i1> [[TMP1]]
186 // CHECK-128-LABEL: @read_bool1(
187 // CHECK-128-NEXT: entry:
188 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
189 // CHECK-128-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
190 // CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> poison, <16 x i8> [[TMP0]], i64 0)
191 // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
192 // CHECK-128-NEXT: ret <vscale x 64 x i1> [[TMP1]]
194 // CHECK-256-LABEL: @read_bool1(
195 // CHECK-256-NEXT: entry:
196 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
197 // CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
198 // CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[TMP0]], i64 0)
199 // CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
200 // CHECK-256-NEXT: ret <vscale x 64 x i1> [[TMP1]]
202 vbool1_t
read_bool1(struct struct_bool1
*s
) {
206 // CHECK-64-LABEL: @write_bool1(
207 // CHECK-64-NEXT: entry:
208 // CHECK-64-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[X:%.*]] to <vscale x 8 x i8>
209 // CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
210 // CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
211 // CHECK-64-NEXT: store <8 x i8> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
212 // CHECK-64-NEXT: ret void
214 // CHECK-128-LABEL: @write_bool1(
215 // CHECK-128-NEXT: entry:
216 // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[X:%.*]] to <vscale x 8 x i8>
217 // CHECK-128-NEXT: [[CAST_FIXED:%.*]] = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
218 // CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
219 // CHECK-128-NEXT: store <16 x i8> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
220 // CHECK-128-NEXT: ret void
222 // CHECK-256-LABEL: @write_bool1(
223 // CHECK-256-NEXT: entry:
224 // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[X:%.*]] to <vscale x 8 x i8>
225 // CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
226 // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
227 // CHECK-256-NEXT: store <32 x i8> [[CAST_FIXED]], ptr [[Y]], align 8, !tbaa [[TBAA6]]
228 // CHECK-256-NEXT: ret void
230 void write_bool1(struct struct_bool1
*s
, vbool1_t x
) {