MCAsmInfo: remove unused DwarfSectionSizeRequired
[llvm-project.git] / clang / test / CodeGen / AArch64 / sve-intrinsics / acle_sve_zip1-fp64.c
blobb5aef2270c3ccb0beba3aa1b107ac68e16153fad
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +f64mm -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
3 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +f64mm -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
4 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +f64mm -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
5 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +f64mm -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // REQUIRES: aarch64-registered-target
9 #include <arm_sve.h>
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
16 #endif
18 // CHECK-LABEL: @test_svzip1_s8(
19 // CHECK-NEXT: entry:
20 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
21 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
23 // CPP-CHECK-LABEL: @_Z14test_svzip1_s8u10__SVInt8_tS_(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
26 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
28 svint8_t test_svzip1_s8(svint8_t op1, svint8_t op2) {
29 return SVE_ACLE_FUNC(svzip1q, _s8, , )(op1, op2);
32 // CHECK-LABEL: @test_svzip1_s16(
33 // CHECK-NEXT: entry:
34 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
35 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
37 // CPP-CHECK-LABEL: @_Z15test_svzip1_s16u11__SVInt16_tS_(
38 // CPP-CHECK-NEXT: entry:
39 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
40 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
42 svint16_t test_svzip1_s16(svint16_t op1, svint16_t op2) {
43 return SVE_ACLE_FUNC(svzip1q, _s16, , )(op1, op2);
46 // CHECK-LABEL: @test_svzip1_s32(
47 // CHECK-NEXT: entry:
48 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
49 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
51 // CPP-CHECK-LABEL: @_Z15test_svzip1_s32u11__SVInt32_tS_(
52 // CPP-CHECK-NEXT: entry:
53 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
54 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
56 svint32_t test_svzip1_s32(svint32_t op1, svint32_t op2) {
57 return SVE_ACLE_FUNC(svzip1q, _s32, , )(op1, op2);
60 // CHECK-LABEL: @test_svzip1_s64(
61 // CHECK-NEXT: entry:
62 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
63 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
65 // CPP-CHECK-LABEL: @_Z15test_svzip1_s64u11__SVInt64_tS_(
66 // CPP-CHECK-NEXT: entry:
67 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
68 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
70 svint64_t test_svzip1_s64(svint64_t op1, svint64_t op2) {
71 return SVE_ACLE_FUNC(svzip1q, _s64, , )(op1, op2);
74 // CHECK-LABEL: @test_svzip1_u8(
75 // CHECK-NEXT: entry:
76 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
77 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
79 // CPP-CHECK-LABEL: @_Z14test_svzip1_u8u11__SVUint8_tS_(
80 // CPP-CHECK-NEXT: entry:
81 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
82 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
84 svuint8_t test_svzip1_u8(svuint8_t op1, svuint8_t op2) {
85 return SVE_ACLE_FUNC(svzip1q, _u8, , )(op1, op2);
88 // CHECK-LABEL: @test_svzip1_u16(
89 // CHECK-NEXT: entry:
90 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
91 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
93 // CPP-CHECK-LABEL: @_Z15test_svzip1_u16u12__SVUint16_tS_(
94 // CPP-CHECK-NEXT: entry:
95 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
96 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
98 svuint16_t test_svzip1_u16(svuint16_t op1, svuint16_t op2) {
99 return SVE_ACLE_FUNC(svzip1q, _u16, , )(op1, op2);
102 // CHECK-LABEL: @test_svzip1_u32(
103 // CHECK-NEXT: entry:
104 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
105 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
107 // CPP-CHECK-LABEL: @_Z15test_svzip1_u32u12__SVUint32_tS_(
108 // CPP-CHECK-NEXT: entry:
109 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
110 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
112 svuint32_t test_svzip1_u32(svuint32_t op1, svuint32_t op2) {
113 return SVE_ACLE_FUNC(svzip1q, _u32, , )(op1, op2);
116 // CHECK-LABEL: @test_svzip1_u64(
117 // CHECK-NEXT: entry:
118 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
119 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
121 // CPP-CHECK-LABEL: @_Z15test_svzip1_u64u12__SVUint64_tS_(
122 // CPP-CHECK-NEXT: entry:
123 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
124 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
126 svuint64_t test_svzip1_u64(svuint64_t op1, svuint64_t op2) {
127 return SVE_ACLE_FUNC(svzip1q, _u64, , )(op1, op2);
130 // CHECK-LABEL: @test_svzip1_f16(
131 // CHECK-NEXT: entry:
132 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.zip1q.nxv8f16(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]])
133 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
135 // CPP-CHECK-LABEL: @_Z15test_svzip1_f16u13__SVFloat16_tS_(
136 // CPP-CHECK-NEXT: entry:
137 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.zip1q.nxv8f16(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]])
138 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
140 svfloat16_t test_svzip1_f16(svfloat16_t op1, svfloat16_t op2) {
141 return SVE_ACLE_FUNC(svzip1q, _f16, , )(op1, op2);
144 // CHECK-LABEL: @test_svzip1_f32(
145 // CHECK-NEXT: entry:
146 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.zip1q.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]])
147 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
149 // CPP-CHECK-LABEL: @_Z15test_svzip1_f32u13__SVFloat32_tS_(
150 // CPP-CHECK-NEXT: entry:
151 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.zip1q.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]])
152 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
154 svfloat32_t test_svzip1_f32(svfloat32_t op1, svfloat32_t op2) {
155 return SVE_ACLE_FUNC(svzip1q, _f32, , )(op1, op2);
158 // CHECK-LABEL: @test_svzip1_f64(
159 // CHECK-NEXT: entry:
160 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.zip1q.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]])
161 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
163 // CPP-CHECK-LABEL: @_Z15test_svzip1_f64u13__SVFloat64_tS_(
164 // CPP-CHECK-NEXT: entry:
165 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.zip1q.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]])
166 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
168 svfloat64_t test_svzip1_f64(svfloat64_t op1, svfloat64_t op2) {
169 return SVE_ACLE_FUNC(svzip1q, _f64, , )(op1, op2);