1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
12 ; CHECK-LABEL: name: uaddlv_v8s8
13 ; CHECK: %copy:fpr64 = COPY $d0
14 ; CHECK: [[UADDLVv8i8v:%[0-9]+]]:fpr16 = UADDLVv8i8v %copy
15 ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
16 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv8i8v]], %subreg.hsub
17 ; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
18 ; CHECK: $w0 = COPY %intrin
19 ; CHECK: RET_ReallyLR implicit $w0
20 %copy:fpr(<8 x s8>) = COPY $d0
21 %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<8 x s8>)
22 $w0 = COPY %intrin(s32)
23 RET_ReallyLR implicit $w0
34 ; CHECK-LABEL: name: uaddlv_v16s8
35 ; CHECK: %copy:fpr128 = COPY $q0
36 ; CHECK: [[UADDLVv16i8v:%[0-9]+]]:fpr16 = UADDLVv16i8v %copy
37 ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
38 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv16i8v]], %subreg.hsub
39 ; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
40 ; CHECK: $w0 = COPY %intrin
41 ; CHECK: RET_ReallyLR implicit $w0
42 %copy:fpr(<16 x s8>) = COPY $q0
43 %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
44 $w0 = COPY %intrin(s32)
45 RET_ReallyLR implicit $w0
54 ; CHECK-LABEL: name: uaddlv_v4s16
55 ; CHECK: %copy:fpr64 = COPY $d0
56 ; CHECK: [[UADDLVv4i16v:%[0-9]+]]:fpr32 = UADDLVv4i16v %copy
57 ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
58 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv4i16v]], %subreg.ssub
59 ; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
60 ; CHECK: $w0 = COPY %intrin
61 ; CHECK: RET_ReallyLR implicit $w0
62 %copy:fpr(<4 x s16>) = COPY $d0
63 %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<4 x s16>)
64 $w0 = COPY %intrin(s32)
65 RET_ReallyLR implicit $w0
76 ; CHECK-LABEL: name: uaddlv_v8s16
77 ; CHECK: %copy:fpr128 = COPY $q0
78 ; CHECK: [[UADDLVv8i16v:%[0-9]+]]:fpr32 = UADDLVv8i16v %copy
79 ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
80 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv8i16v]], %subreg.ssub
81 ; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
82 ; CHECK: $w0 = COPY %intrin
83 ; CHECK: RET_ReallyLR implicit $w0
84 %copy:fpr(<8 x s16>) = COPY $q0
85 %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<8 x s16>)
86 $w0 = COPY %intrin(s32)
87 RET_ReallyLR implicit $w0
98 ; CHECK-LABEL: name: uaddlv_v4s32
99 ; CHECK: %copy:fpr128 = COPY $q0
100 ; CHECK: [[UADDLVv4i32v:%[0-9]+]]:fpr64 = UADDLVv4i32v %copy
101 ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
102 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv4i32v]], %subreg.dsub
103 ; CHECK: %intrin:fpr64 = COPY [[INSERT_SUBREG]].dsub
104 ; CHECK: $x0 = COPY %intrin
105 ; CHECK: RET_ReallyLR implicit $x0
106 %copy:fpr(<4 x s32>) = COPY $q0
107 %intrin:fpr(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<4 x s32>)
108 $x0 = COPY %intrin(s64)
109 RET_ReallyLR implicit $x0