Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / attr-riscv-rvv-vector-bits-globals.c
blob447c3803dec8e3651f7b13b3e1056d5ec66e827d
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=1 -mvscale-max=1 -S -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-128
3 // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=4 -mvscale-max=4 -S -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-512
5 // REQUIRES: riscv-registered-target
7 #include <stdint.h>
9 typedef __rvv_int8m1_t vint8m1_t;
10 typedef __rvv_uint8m1_t vuint8m1_t;
11 typedef __rvv_int16m1_t vint16m1_t;
12 typedef __rvv_uint16m1_t vuint16m1_t;
13 typedef __rvv_int32m1_t vint32m1_t;
14 typedef __rvv_uint32m1_t vuint32m1_t;
15 typedef __rvv_int64m1_t vint64m1_t;
16 typedef __rvv_uint64m1_t vuint64m1_t;
17 typedef __rvv_float32m1_t vfloat32m1_t;
18 typedef __rvv_float64m1_t vfloat64m1_t;
20 typedef vint64m1_t fixed_int64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
22 fixed_int64m1_t global_i64;
24 //===----------------------------------------------------------------------===//
25 // WRITES
26 //===----------------------------------------------------------------------===//
28 // CHECK-128-LABEL: @write_global_i64(
29 // CHECK-128-NEXT: entry:
30 // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <1 x i64> @llvm.vector.extract.v1i64.nxv1i64(<vscale x 1 x i64> [[V:%.*]], i64 0)
31 // CHECK-128-NEXT: store <1 x i64> [[CASTFIXEDSVE]], ptr @global_i64, align 8, !tbaa [[TBAA4:![0-9]+]]
32 // CHECK-128-NEXT: ret void
34 // CHECK-512-LABEL: @write_global_i64(
35 // CHECK-512-NEXT: entry:
36 // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[V:%.*]], i64 0)
37 // CHECK-512-NEXT: store <4 x i64> [[CASTFIXEDSVE]], ptr @global_i64, align 8, !tbaa [[TBAA4:![0-9]+]]
38 // CHECK-512-NEXT: ret void
40 void write_global_i64(vint64m1_t v) { global_i64 = v; }
42 //===----------------------------------------------------------------------===//
43 // READS
44 //===----------------------------------------------------------------------===//
46 // CHECK-128-LABEL: @read_global_i64(
47 // CHECK-128-NEXT: entry:
48 // CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr @global_i64, align 8, !tbaa [[TBAA4]]
49 // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v1i64(<vscale x 1 x i64> undef, <1 x i64> [[TMP0]], i64 0)
50 // CHECK-128-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
52 // CHECK-512-LABEL: @read_global_i64(
53 // CHECK-512-NEXT: entry:
54 // CHECK-512-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @global_i64, align 8, !tbaa [[TBAA4]]
55 // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[TMP0]], i64 0)
56 // CHECK-512-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]]
58 vint64m1_t read_global_i64() { return global_i64; }