1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -emit-llvm -target-feature +v \
7 #include <riscv_vector.h>
9 #define __rvv_generic \
10 static inline __attribute__((__always_inline__, __nodebug__))
13 __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv
)))
14 vint8m1_t
vadd_generic (vint8m1_t op0
, vint8m1_t op1
, size_t op2
);
16 // CHECK-LABEL: @test(
18 // CHECK-NEXT: [[OP0_ADDR:%.*]] = alloca <vscale x 8 x i8>, align 1
19 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <vscale x 8 x i8>, align 1
20 // CHECK-NEXT: [[VL_ADDR:%.*]] = alloca i64, align 8
21 // CHECK-NEXT: [[RET:%.*]] = alloca <vscale x 8 x i8>, align 1
22 // CHECK-NEXT: store <vscale x 8 x i8> [[OP0:%.*]], ptr [[OP0_ADDR]], align 1
23 // CHECK-NEXT: store <vscale x 8 x i8> [[OP1:%.*]], ptr [[OP1_ADDR]], align 1
24 // CHECK-NEXT: store i64 [[VL:%.*]], ptr [[VL_ADDR]], align 8
25 // CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i8>, ptr [[OP0_ADDR]], align 1
26 // CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 8 x i8>, ptr [[OP1_ADDR]], align 1
27 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[VL_ADDR]], align 8
28 // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[TMP0]], <vscale x 8 x i8> [[TMP1]], i64 [[TMP2]])
29 // CHECK-NEXT: store <vscale x 8 x i8> [[TMP3]], ptr [[RET]], align 1
30 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 8 x i8>, ptr [[RET]], align 1
31 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP4]]
33 vint8m1_t
test(vint8m1_t op0
, vint8m1_t op1
, size_t vl
) {
34 vint8m1_t ret
= vadd_generic(op0
, op1
, vl
);