1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +bf16 -mvscale-min=1 -mvscale-max=1 -O1 -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s --check-prefix=CHECK-128
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +bf16 -mvscale-min=4 -mvscale-max=4 -O1 -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s --check-prefix=CHECK-512
5 // REQUIRES: aarch64-registered-target
9 #define N __ARM_FEATURE_SVE_BITS
11 typedef svint64_t fixed_int64_t
__attribute__((arm_sve_vector_bits(N
)));
12 typedef svbfloat16_t fixed_bfloat16_t
__attribute__((arm_sve_vector_bits(N
)));
13 typedef svbool_t fixed_bool_t
__attribute__((arm_sve_vector_bits(N
)));
15 fixed_int64_t global_i64
;
16 fixed_bfloat16_t global_bf16
;
17 fixed_bool_t global_bool
;
19 //===----------------------------------------------------------------------===//
21 //===----------------------------------------------------------------------===//
23 // CHECK-128-LABEL: @write_global_i64(
24 // CHECK-128-NEXT: entry:
25 // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[V:%.*]], i64 0)
26 // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], ptr @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]]
27 // CHECK-128-NEXT: ret void
29 // CHECK-512-LABEL: @write_global_i64(
30 // CHECK-512-NEXT: entry:
31 // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[V:%.*]], i64 0)
32 // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], ptr @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]]
33 // CHECK-512-NEXT: ret void
35 void write_global_i64(svint64_t v
) { global_i64
= v
; }
37 // CHECK-128-LABEL: @write_global_bf16(
38 // CHECK-128-NEXT: entry:
39 // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[V:%.*]], i64 0)
40 // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], ptr @global_bf16, align 16, !tbaa [[TBAA6]]
41 // CHECK-128-NEXT: ret void
43 // CHECK-512-LABEL: @write_global_bf16(
44 // CHECK-512-NEXT: entry:
45 // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16(<vscale x 8 x bfloat> [[V:%.*]], i64 0)
46 // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], ptr @global_bf16, align 16, !tbaa [[TBAA6]]
47 // CHECK-512-NEXT: ret void
49 void write_global_bf16(svbfloat16_t v
) { global_bf16
= v
; }
51 // CHECK-128-LABEL: @write_global_bool(
52 // CHECK-128-NEXT: entry:
53 // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[V:%.*]] to <vscale x 2 x i8>
54 // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
55 // CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], ptr @global_bool, align 2, !tbaa [[TBAA6]]
56 // CHECK-128-NEXT: ret void
58 // CHECK-512-LABEL: @write_global_bool(
59 // CHECK-512-NEXT: entry:
60 // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[V:%.*]] to <vscale x 2 x i8>
61 // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
62 // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], ptr @global_bool, align 2, !tbaa [[TBAA6]]
63 // CHECK-512-NEXT: ret void
65 void write_global_bool(svbool_t v
) { global_bool
= v
; }
67 //===----------------------------------------------------------------------===//
69 //===----------------------------------------------------------------------===//
71 // CHECK-128-LABEL: @read_global_i64(
72 // CHECK-128-NEXT: entry:
73 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @global_i64, align 16, !tbaa [[TBAA6]]
74 // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP0]], i64 0)
75 // CHECK-128-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
77 // CHECK-512-LABEL: @read_global_i64(
78 // CHECK-512-NEXT: entry:
79 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @global_i64, align 16, !tbaa [[TBAA6]]
80 // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[TMP0]], i64 0)
81 // CHECK-512-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
83 svint64_t
read_global_i64() { return global_i64
; }
85 // CHECK-128-LABEL: @read_global_bf16(
86 // CHECK-128-NEXT: entry:
87 // CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, ptr @global_bf16, align 16, !tbaa [[TBAA6]]
88 // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP0]], i64 0)
89 // CHECK-128-NEXT: ret <vscale x 8 x bfloat> [[CASTSCALABLESVE]]
91 // CHECK-512-LABEL: @read_global_bf16(
92 // CHECK-512-NEXT: entry:
93 // CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, ptr @global_bf16, align 16, !tbaa [[TBAA6]]
94 // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v32bf16(<vscale x 8 x bfloat> undef, <32 x bfloat> [[TMP0]], i64 0)
95 // CHECK-512-NEXT: ret <vscale x 8 x bfloat> [[CASTSCALABLESVE]]
97 svbfloat16_t
read_global_bf16() { return global_bf16
; }
99 // CHECK-128-LABEL: @read_global_bool(
100 // CHECK-128-NEXT: entry:
101 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr @global_bool, align 2, !tbaa [[TBAA6]]
102 // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> [[TMP0]], i64 0)
103 // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
104 // CHECK-128-NEXT: ret <vscale x 16 x i1> [[TMP1]]
106 // CHECK-512-LABEL: @read_global_bool(
107 // CHECK-512-NEXT: entry:
108 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @global_bool, align 2, !tbaa [[TBAA6]]
109 // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP0]], i64 0)
110 // CHECK-512-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
111 // CHECK-512-NEXT: ret <vscale x 16 x i1> [[TMP1]]
113 svbool_t
read_global_bool() { return global_bool
; }