1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel-abort=1 -run-pass=instruction-select %s -o - | FileCheck %s
10 tracksRegLiveness: true
13 liveins: $w0, $w1, $x2
15 ; CHECK-LABEL: name: uaddo_s32
16 ; CHECK: liveins: $w0, $w1, $x2
18 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
19 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
20 ; CHECK-NEXT: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
21 ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
22 ; CHECK-NEXT: $w0 = COPY [[ADDSWrr]]
23 ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
24 ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
25 %0:gpr(s32) = COPY $w0
26 %1:gpr(s32) = COPY $w1
27 %3:gpr(s32), %4:gpr(s32) = G_UADDO %0, %1
30 RET_ReallyLR implicit $w0, implicit $w1
38 tracksRegLiveness: true
41 liveins: $x0, $x1, $x2
43 ; CHECK-LABEL: name: uaddo_s64
44 ; CHECK: liveins: $x0, $x1, $x2
46 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
47 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
48 ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
49 ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
50 ; CHECK-NEXT: $x0 = COPY [[ADDSXrr]]
51 ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
52 ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $w1
53 %0:gpr(s64) = COPY $x0
54 %1:gpr(s64) = COPY $x1
55 %3:gpr(s64), %4:gpr(s32) = G_UADDO %0, %1
58 RET_ReallyLR implicit $x0, implicit $w1
66 tracksRegLiveness: true
69 liveins: $w0, $w1, $x2
70 ; Check that we get ADDSWri when we can fold in a constant.
72 ; CHECK-LABEL: name: uaddo_s32_imm
73 ; CHECK: liveins: $w0, $w1, $x2
75 ; CHECK-NEXT: %copy:gpr32sp = COPY $w0
76 ; CHECK-NEXT: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv
77 ; CHECK-NEXT: $w0 = COPY %add
78 ; CHECK-NEXT: RET_ReallyLR implicit $w0
79 %copy:gpr(s32) = COPY $w0
80 %constant:gpr(s32) = G_CONSTANT i32 16
81 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant
83 RET_ReallyLR implicit $w0
87 name: uaddo_s32_shifted
91 tracksRegLiveness: true
94 liveins: $w0, $w1, $x2
95 ; Check that we get ADDSWrs when we can fold in a shift.
97 ; CHECK-LABEL: name: uaddo_s32_shifted
98 ; CHECK: liveins: $w0, $w1, $x2
100 ; CHECK-NEXT: %copy1:gpr32 = COPY $w0
101 ; CHECK-NEXT: %copy2:gpr32 = COPY $w1
102 ; CHECK-NEXT: %add:gpr32 = ADDSWrs %copy1, %copy2, 16, implicit-def $nzcv
103 ; CHECK-NEXT: $w0 = COPY %add
104 ; CHECK-NEXT: RET_ReallyLR implicit $w0
105 %copy1:gpr(s32) = COPY $w0
106 %copy2:gpr(s32) = COPY $w1
107 %constant:gpr(s32) = G_CONSTANT i32 16
108 %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
109 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy1, %shift
111 RET_ReallyLR implicit $w0
115 name: uaddo_s32_neg_imm
118 regBankSelected: true
119 tracksRegLiveness: true
122 liveins: $w0, $w1, $x2
123 ; Check that we get SUBSWri when we can fold in a negative constant.
125 ; CHECK-LABEL: name: uaddo_s32_neg_imm
126 ; CHECK: liveins: $w0, $w1, $x2
128 ; CHECK-NEXT: %copy:gpr32sp = COPY $w0
129 ; CHECK-NEXT: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv
130 ; CHECK-NEXT: $w0 = COPY %add
131 ; CHECK-NEXT: RET_ReallyLR implicit $w0
132 %copy:gpr(s32) = COPY $w0
133 %constant:gpr(s32) = G_CONSTANT i32 -16
134 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant
136 RET_ReallyLR implicit $w0
140 name: uaddo_arith_extended
143 regBankSelected: true
144 tracksRegLiveness: true
148 ; Check that we get ADDSXrx.
149 ; CHECK-LABEL: name: uaddo_arith_extended
150 ; CHECK: liveins: $w0, $x0
152 ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
153 ; CHECK-NEXT: %reg1:gpr32 = COPY $w0
154 ; CHECK-NEXT: %add:gpr64 = ADDSXrx %reg0, %reg1, 18, implicit-def $nzcv
155 ; CHECK-NEXT: $x0 = COPY %add
156 ; CHECK-NEXT: RET_ReallyLR implicit $x0
157 %reg0:gpr(s64) = COPY $x0
158 %reg1:gpr(s32) = COPY $w0
159 %ext:gpr(s64) = G_ZEXT %reg1(s32)
160 %cst:gpr(s64) = G_CONSTANT i64 2
161 %shift:gpr(s64) = G_SHL %ext, %cst(s64)
162 %add:gpr(s64), %flags:gpr(s32) = G_UADDO %reg0, %shift
164 RET_ReallyLR implicit $x0