Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / sve-vls-subscript-ops.c
blob057b3c58f7a2cac10072629cbac60e213d8253ee
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
3 // RUN: -disable-O0-optnone -mvscale-min=4 -mvscale-max=4 \
4 // RUN: -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s
6 // REQUIRES: aarch64-registered-target
8 #include <arm_sve.h>
9 #include <stddef.h>
11 #define N 512
13 typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N)));
14 typedef svint16_t fixed_int16_t __attribute__((arm_sve_vector_bits(N)));
15 typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
16 typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(N)));
18 typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(N)));
19 typedef svuint16_t fixed_uint16_t __attribute__((arm_sve_vector_bits(N)));
20 typedef svuint32_t fixed_uint32_t __attribute__((arm_sve_vector_bits(N)));
21 typedef svuint64_t fixed_uint64_t __attribute__((arm_sve_vector_bits(N)));
23 typedef svfloat16_t fixed_float16_t __attribute__((arm_sve_vector_bits(N)));
24 typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(N)));
25 typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));
27 typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
29 // CHECK-LABEL: @subscript_int16(
30 // CHECK-NEXT: entry:
31 // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
32 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]]
33 // CHECK-NEXT: ret i16 [[VECEXT]]
35 int16_t subscript_int16(fixed_int16_t a, size_t b) {
36 return a[b];
39 // CHECK-LABEL: @subscript_uint16(
40 // CHECK-NEXT: entry:
41 // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
42 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]]
43 // CHECK-NEXT: ret i16 [[VECEXT]]
45 uint16_t subscript_uint16(fixed_uint16_t a, size_t b) {
46 return a[b];
49 // CHECK-LABEL: @subscript_int32(
50 // CHECK-NEXT: entry:
51 // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
52 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]]
53 // CHECK-NEXT: ret i32 [[VECEXT]]
55 int32_t subscript_int32(fixed_int32_t a, size_t b) {
56 return a[b];
59 // CHECK-LABEL: @subscript_uint32(
60 // CHECK-NEXT: entry:
61 // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
62 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]]
63 // CHECK-NEXT: ret i32 [[VECEXT]]
65 uint32_t subscript_uint32(fixed_uint32_t a, size_t b) {
66 return a[b];
69 // CHECK-LABEL: @subscript_int64(
70 // CHECK-NEXT: entry:
71 // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
72 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]]
73 // CHECK-NEXT: ret i64 [[VECEXT]]
75 int64_t subscript_int64(fixed_int64_t a, size_t b) {
76 return a[b];
79 // CHECK-LABEL: @subscript_uint64(
80 // CHECK-NEXT: entry:
81 // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
82 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]]
83 // CHECK-NEXT: ret i64 [[VECEXT]]
85 uint64_t subscript_uint64(fixed_uint64_t a, size_t b) {
86 return a[b];
89 // CHECK-LABEL: @subscript_float16(
90 // CHECK-NEXT: entry:
91 // CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
92 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x half> [[A]], i64 [[B:%.*]]
93 // CHECK-NEXT: ret half [[VECEXT]]
95 __fp16 subscript_float16(fixed_float16_t a, size_t b) {
96 return a[b];
99 // CHECK-LABEL: @subscript_float32(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
102 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x float> [[A]], i64 [[B:%.*]]
103 // CHECK-NEXT: ret float [[VECEXT]]
105 float subscript_float32(fixed_float32_t a, size_t b) {
106 return a[b];
109 // CHECK-LABEL: @subscript_float64(
110 // CHECK-NEXT: entry:
111 // CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
112 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x double> [[A]], i64 [[B:%.*]]
113 // CHECK-NEXT: ret double [[VECEXT]]
115 double subscript_float64(fixed_float64_t a, size_t b) {
116 return a[b];