Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / sve-intrinsics / acle_sve_index.c
blob688ede1fecf90bb5492de875dc6dcf013ebbb8ad
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -o /dev/null %s
6 // RUN: %clang_cc1 -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -o /dev/null %s
8 #include <arm_sve.h>
10 #if defined __ARM_FEATURE_SME
11 #define MODE_ATTR __arm_streaming
12 #else
13 #define MODE_ATTR
14 #endif
16 // CHECK-LABEL: @test_svindex_s8(
17 // CHECK-NEXT: entry:
18 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 [[BASE:%.*]], i8 [[STEP:%.*]])
19 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
21 // CPP-CHECK-LABEL: @_Z15test_svindex_s8aa(
22 // CPP-CHECK-NEXT: entry:
23 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 [[BASE:%.*]], i8 [[STEP:%.*]])
24 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
26 svint8_t test_svindex_s8(int8_t base, int8_t step) MODE_ATTR
28 return svindex_s8(base, step);
31 // CHECK-LABEL: @test_svindex_s16(
32 // CHECK-NEXT: entry:
33 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 [[BASE:%.*]], i16 [[STEP:%.*]])
34 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
36 // CPP-CHECK-LABEL: @_Z16test_svindex_s16ss(
37 // CPP-CHECK-NEXT: entry:
38 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 [[BASE:%.*]], i16 [[STEP:%.*]])
39 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
41 svint16_t test_svindex_s16(int16_t base, int16_t step) MODE_ATTR
43 return svindex_s16(base, step);
46 // CHECK-LABEL: @test_svindex_s32(
47 // CHECK-NEXT: entry:
48 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 [[BASE:%.*]], i32 [[STEP:%.*]])
49 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
51 // CPP-CHECK-LABEL: @_Z16test_svindex_s32ii(
52 // CPP-CHECK-NEXT: entry:
53 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 [[BASE:%.*]], i32 [[STEP:%.*]])
54 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
56 svint32_t test_svindex_s32(int32_t base, int32_t step) MODE_ATTR
58 return svindex_s32(base, step);
61 // CHECK-LABEL: @test_svindex_s64(
62 // CHECK-NEXT: entry:
63 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 [[BASE:%.*]], i64 [[STEP:%.*]])
64 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
66 // CPP-CHECK-LABEL: @_Z16test_svindex_s64ll(
67 // CPP-CHECK-NEXT: entry:
68 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 [[BASE:%.*]], i64 [[STEP:%.*]])
69 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
71 svint64_t test_svindex_s64(int64_t base, int64_t step) MODE_ATTR
73 return svindex_s64(base, step);
76 // CHECK-LABEL: @test_svindex_u8(
77 // CHECK-NEXT: entry:
78 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 [[BASE:%.*]], i8 [[STEP:%.*]])
79 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
81 // CPP-CHECK-LABEL: @_Z15test_svindex_u8hh(
82 // CPP-CHECK-NEXT: entry:
83 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 [[BASE:%.*]], i8 [[STEP:%.*]])
84 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
86 svuint8_t test_svindex_u8(uint8_t base, uint8_t step) MODE_ATTR
88 return svindex_u8(base, step);
91 // CHECK-LABEL: @test_svindex_u16(
92 // CHECK-NEXT: entry:
93 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 [[BASE:%.*]], i16 [[STEP:%.*]])
94 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
96 // CPP-CHECK-LABEL: @_Z16test_svindex_u16tt(
97 // CPP-CHECK-NEXT: entry:
98 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 [[BASE:%.*]], i16 [[STEP:%.*]])
99 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
101 svuint16_t test_svindex_u16(uint16_t base, uint16_t step) MODE_ATTR
103 return svindex_u16(base, step);
106 // CHECK-LABEL: @test_svindex_u32(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 [[BASE:%.*]], i32 [[STEP:%.*]])
109 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
111 // CPP-CHECK-LABEL: @_Z16test_svindex_u32jj(
112 // CPP-CHECK-NEXT: entry:
113 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 [[BASE:%.*]], i32 [[STEP:%.*]])
114 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
116 svuint32_t test_svindex_u32(uint32_t base, uint32_t step) MODE_ATTR
118 return svindex_u32(base, step);
121 // CHECK-LABEL: @test_svindex_u64(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 [[BASE:%.*]], i64 [[STEP:%.*]])
124 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
126 // CPP-CHECK-LABEL: @_Z16test_svindex_u64mm(
127 // CPP-CHECK-NEXT: entry:
128 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 [[BASE:%.*]], i64 [[STEP:%.*]])
129 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
131 svuint64_t test_svindex_u64(uint64_t base, uint64_t step) MODE_ATTR
133 return svindex_u64(base, step);