1 // RUN: %clang_cc1 -x c++ -triple aarch64-none-linux-gnu -target-feature +sve -S -O1 -Werror -Wall -emit-llvm -o - %s -mvscale-min=1 -mvscale-max=1 | FileCheck %s -D#VBITS=128 --check-prefixes=CHECK,CHECK128
2 // RUN: %clang_cc1 -x c++ -triple aarch64-none-linux-gnu -target-feature +sve -S -O1 -Werror -Wall -emit-llvm -o - %s -mvscale-min=2 -mvscale-max=2 | FileCheck %s -D#VBITS=256 --check-prefixes=CHECK,CHECKWIDE
3 // RUN: %clang_cc1 -x c++ -triple aarch64-none-linux-gnu -target-feature +sve -S -O1 -Werror -Wall -emit-llvm -o - %s -mvscale-min=4 -mvscale-max=4 | FileCheck %s -D#VBITS=512 --check-prefixes=CHECK,CHECKWIDE
4 // RUN: %clang_cc1 -x c++ -triple aarch64-none-linux-gnu -target-feature +sve -S -O1 -Werror -Wall -emit-llvm -o - %s -mvscale-min=8 -mvscale-max=8 | FileCheck %s -D#VBITS=1024 --check-prefixes=CHECK,CHECKWIDE
5 // RUN: %clang_cc1 -x c++ -triple aarch64-none-linux-gnu -target-feature +sve -S -O1 -Werror -Wall -emit-llvm -o - %s -mvscale-min=16 -mvscale-max=16 | FileCheck %s -D#VBITS=2048 --check-prefixes=CHECK,CHECKWIDE
7 // REQUIRES: aarch64-registered-target
9 // Examples taken from section "3.7.3.3 Behavior specific to SVE
10 // vectors" of the SVE ACLE (Version 00bet6) that can be found at
11 // https://developer.arm.com/documentation/100987/latest
13 // Example has been expanded to work with mutiple values of
14 // -mvscale-{min,max}.
18 // Page 26, first paragraph of 3.7.3.3: sizeof and alignof
19 #if __ARM_FEATURE_SVE_BITS
20 #define N __ARM_FEATURE_SVE_BITS
21 typedef svfloat32_t fixed_svfloat
__attribute__((arm_sve_vector_bits(N
)));
23 static_assert(alignof(fixed_svfloat
) == 16,
24 "Invalid align of Vector Length Specific Type.");
25 static_assert(sizeof(fixed_svfloat
) == N
/ 8,
26 "Invalid size of Vector Length Specific Type.");
30 // Page 26, items 1 and 2 of 3.7.3.3: how VLST and GNUT are related.
31 #if __ARM_FEATURE_SVE_BITS && __ARM_FEATURE_SVE_VECTOR_OPERATORS
32 #define N __ARM_FEATURE_SVE_BITS
33 typedef svfloat64_t fixed_svfloat64
__attribute__((arm_sve_vector_bits(N
)));
34 typedef float64_t gnufloat64
__attribute__((vector_size(N
/ 8)));
36 static_assert(alignof(fixed_svfloat64
) == alignof(gnufloat64
),
37 "Align of Vector Length Specific Type and GNU Vector Types "
38 "should be the same.");
39 static_assert(sizeof(fixed_svfloat64
) == sizeof(gnufloat64
),
40 "Size of Vector Length Specific Type and GNU Vector Types "
41 "should be the same.");
46 #if __ARM_FEATURE_SVE_BITS && __ARM_FEATURE_SVE_VECTOR_OPERATORS
47 #define N __ARM_FEATURE_SVE_BITS
48 // CHECK-LABEL: define{{.*}} <vscale x 4 x i32> @_Z1f9__SVE_VLSIu11__SVInt32_tLj
49 // CHECK-SAME: [[#VBITS]]
50 // CHECK-SAME: EES_(<vscale x 4 x i32> noundef %x.coerce, <vscale x 4 x i32> noundef %y.coerce)
52 // CHECK-NEXT: [[X:%.*]] = tail call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
53 // CHECK-NEXT: [[Y:%.*]] = tail call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE1:%.*]], i64 0)
54 // CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS, 32)]] x i32> [[Y]], [[X]]
55 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32(<vscale x 4 x i32> undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0)
56 // CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
57 typedef svint32_t vec
__attribute__((arm_sve_vector_bits(N
)));
58 auto f(vec x
, vec y
) { return x
+ y
; } // Returns a vec.
61 // Page 27, item 3, adapted for a generic value of __ARM_FEATURE_SVE_BITS
62 #if __ARM_FEATURE_SVE_BITS && __ARM_FEATURE_SVE_VECTOR_OPERATORS
63 #define N __ARM_FEATURE_SVE_BITS
64 typedef int16_t vec1
__attribute__((vector_size(N
/ 8)));
66 typedef svint16_t vec2
__attribute__((arm_sve_vector_bits(N
)));
67 // CHECK-LABEL: define{{.*}} void @_Z1g9__SVE_VLSIu11__SVInt16_tLj
68 // CHECK-SAME: [[#VBITS]]
69 // CHECK-SAME: EE(<vscale x 8 x i16> noundef %x.coerce)
71 // CHECK128-NEXT: [[X:%.*]] = tail call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
72 // CHECK128-NEXT: call void @_Z1fDv8_s(<8 x i16> noundef [[X]]) [[ATTR5:#.*]]
73 // CHECK128-NEXT: ret void
74 // CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16
75 // CHECKWIDE-NEXT: [[X:%.*]] = tail call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
76 // CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]]
77 // CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(ptr noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
78 // CHECKWIDE-NEXT: ret void
79 void g(vec2 x
) { f(x
); } // OK