1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +i8mm -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
3 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +i8mm -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
4 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +i8mm -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
5 // RUN: %clang_cc1 -fclang-abi-compat=latest -target-feature +i8mm -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // REQUIRES: aarch64-registered-target
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
15 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
18 // CHECK-LABEL: @test_svmmla_s32(
20 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.smmla.nxv4i32(<vscale x 4 x i32> [[X:%.*]], <vscale x 16 x i8> [[Y:%.*]], <vscale x 16 x i8> [[Z:%.*]])
21 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
23 // CPP-CHECK-LABEL: @_Z15test_svmmla_s32u11__SVInt32_tu10__SVInt8_tS0_(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.smmla.nxv4i32(<vscale x 4 x i32> [[X:%.*]], <vscale x 16 x i8> [[Y:%.*]], <vscale x 16 x i8> [[Z:%.*]])
26 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
28 svint32_t
test_svmmla_s32(svint32_t x
, svint8_t y
, svint8_t z
) {
29 return SVE_ACLE_FUNC(svmmla
,_s32
,,)(x
, y
, z
);
32 // CHECK-LABEL: @test_svmmla_u32(
34 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ummla.nxv4i32(<vscale x 4 x i32> [[X:%.*]], <vscale x 16 x i8> [[Y:%.*]], <vscale x 16 x i8> [[Z:%.*]])
35 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
37 // CPP-CHECK-LABEL: @_Z15test_svmmla_u32u12__SVUint32_tu11__SVUint8_tS0_(
38 // CPP-CHECK-NEXT: entry:
39 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ummla.nxv4i32(<vscale x 4 x i32> [[X:%.*]], <vscale x 16 x i8> [[Y:%.*]], <vscale x 16 x i8> [[Z:%.*]])
40 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
42 svuint32_t
test_svmmla_u32(svuint32_t x
, svuint8_t y
, svuint8_t z
) {
43 return SVE_ACLE_FUNC(svmmla
,_u32
,,)(x
, y
, z
);
46 // CHECK-LABEL: @test_svusmmla_s32(
48 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usmmla.nxv4i32(<vscale x 4 x i32> [[X:%.*]], <vscale x 16 x i8> [[Y:%.*]], <vscale x 16 x i8> [[Z:%.*]])
49 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
51 // CPP-CHECK-LABEL: @_Z17test_svusmmla_s32u11__SVInt32_tu11__SVUint8_tu10__SVInt8_t(
52 // CPP-CHECK-NEXT: entry:
53 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usmmla.nxv4i32(<vscale x 4 x i32> [[X:%.*]], <vscale x 16 x i8> [[Y:%.*]], <vscale x 16 x i8> [[Z:%.*]])
54 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
56 svint32_t
test_svusmmla_s32(svint32_t x
, svuint8_t y
, svint8_t z
) {
57 return SVE_ACLE_FUNC(svusmmla
,_s32
,,)(x
, y
, z
);