1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // REQUIRES: aarch64-registered-target
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
18 // CHECK-LABEL: @test_svget4_bf16_0(
20 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 0)
21 // CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
23 // CPP-CHECK-LABEL: @_Z18test_svget4_bf16_014svbfloat16x4_t(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 0)
26 // CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
28 svbfloat16_t
test_svget4_bf16_0(svbfloat16x4_t tuple
)
30 return SVE_ACLE_FUNC(svget4
,_bf16
,,)(tuple
, 0);
33 // CHECK-LABEL: @test_svget4_bf16_1(
35 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 8)
36 // CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
38 // CPP-CHECK-LABEL: @_Z18test_svget4_bf16_114svbfloat16x4_t(
39 // CPP-CHECK-NEXT: entry:
40 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 8)
41 // CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
43 svbfloat16_t
test_svget4_bf16_1(svbfloat16x4_t tuple
)
45 return SVE_ACLE_FUNC(svget4
,_bf16
,,)(tuple
, 1);
48 // CHECK-LABEL: @test_svget4_bf16_2(
50 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 16)
51 // CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
53 // CPP-CHECK-LABEL: @_Z18test_svget4_bf16_214svbfloat16x4_t(
54 // CPP-CHECK-NEXT: entry:
55 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 16)
56 // CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
58 svbfloat16_t
test_svget4_bf16_2(svbfloat16x4_t tuple
)
60 return SVE_ACLE_FUNC(svget4
,_bf16
,,)(tuple
, 2);
63 // CHECK-LABEL: @test_svget4_bf16_3(
65 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 24)
66 // CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
68 // CPP-CHECK-LABEL: @_Z18test_svget4_bf16_314svbfloat16x4_t(
69 // CPP-CHECK-NEXT: entry:
70 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[TUPLE:%.*]], i64 24)
71 // CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
73 svbfloat16_t
test_svget4_bf16_3(svbfloat16x4_t tuple
)
75 return SVE_ACLE_FUNC(svget4
,_bf16
,,)(tuple
, 3);