1 ; RUN: llc < %s -mtriple aarch64-none-linux-gnu -mattr=+sve -stop-after=finalize-isel | FileCheck %s --check-prefix=CHECK
3 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
4 target triple = "aarch64-none-linux-gnu"
6 ; Function Attrs: nounwind readnone
7 ; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
8 ; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
9 ; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
10 ; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]]
11 define <vscale x 16 x i8> @test_svadd_i8(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm) {
12 %1 = tail call <vscale x 16 x i8> asm "add $0.b, $1.b, $2.b", "=w,w,y"(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm)
13 ret <vscale x 16 x i8> %1
16 ; Function Attrs: nounwind readnone
17 ; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
18 ; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
19 ; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
20 ; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]]
21 define <vscale x 2 x i64> @test_svsub_i64(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm) {
22 %1 = tail call <vscale x 2 x i64> asm "sub $0.d, $1.d, $2.d", "=w,w,x"(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm)
23 ret <vscale x 2 x i64> %1
26 ; Function Attrs: nounwind readnone
27 ; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
28 ; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
29 ; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
30 ; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]]
31 define <vscale x 8 x half> @test_svfmul_f16(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) {
32 %1 = tail call <vscale x 8 x half> asm "fmul $0.h, $1.h, $2.h", "=w,w,y"(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm)
33 ret <vscale x 8 x half> %1
36 ; Function Attrs: nounwind readnone
37 ; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
38 ; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
39 ; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
40 ; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]]
41 define <vscale x 4 x float> @test_svfmul_f(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm) {
42 %1 = tail call <vscale x 4 x float> asm "fmul $0.s, $1.s, $2.s", "=w,w,x"(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm)
43 ret <vscale x 4 x float> %1