1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s
9 tracksRegLiveness: true
14 ; CHECK-LABEL: name: ashr_v4s32
15 ; CHECK: liveins: $d0, $d1
16 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
17 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
18 ; CHECK: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], [[C]](s32)
19 ; CHECK: $q0 = COPY [[VASHR]](<4 x s32>)
20 ; CHECK: RET_ReallyLR implicit $q0
21 %0:_(<4 x s32>) = COPY $q0
22 %1:_(s32) = G_CONSTANT i32 5
23 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
24 %3:_(<4 x s32>) = G_ASHR %0, %2(<4 x s32>)
25 $q0 = COPY %3(<4 x s32>)
26 RET_ReallyLR implicit $q0
32 tracksRegLiveness: true
37 ; CHECK-LABEL: name: lshr_v4s32
38 ; CHECK: liveins: $d0, $d1
39 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
40 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
41 ; CHECK: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], [[C]](s32)
42 ; CHECK: $q0 = COPY [[VLSHR]](<4 x s32>)
43 ; CHECK: RET_ReallyLR implicit $q0
44 %0:_(<4 x s32>) = COPY $q0
45 %1:_(s32) = G_CONSTANT i32 5
46 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
47 %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
48 $q0 = COPY %3(<4 x s32>)
49 RET_ReallyLR implicit $q0
55 tracksRegLiveness: true
60 ; CHECK-LABEL: name: lshr_v8s16
61 ; CHECK: liveins: $d0, $d1
62 ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
63 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
64 ; CHECK: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], [[C]](s32)
65 ; CHECK: $q0 = COPY [[VLSHR]](<8 x s16>)
66 ; CHECK: RET_ReallyLR implicit $q0
67 %0:_(<8 x s16>) = COPY $q0
68 %1:_(s16) = G_CONSTANT i16 5
69 %2:_(<8 x s16>) = G_BUILD_VECTOR %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16)
70 %3:_(<8 x s16>) = G_LSHR %0, %2(<8 x s16>)
71 $q0 = COPY %3(<8 x s16>)
72 RET_ReallyLR implicit $q0
78 tracksRegLiveness: true
83 ; CHECK-LABEL: name: imm_too_large
84 ; CHECK: liveins: $d0, $d1
85 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
86 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
87 ; CHECK: [[DUP:%[0-9]+]]:_(<4 x s32>) = G_DUP [[C]](s32)
88 ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[DUP]](<4 x s32>)
89 ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
90 ; CHECK: RET_ReallyLR implicit $q0
91 %0:_(<4 x s32>) = COPY $q0
92 %1:_(s32) = G_CONSTANT i32 40
93 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
94 %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
95 $q0 = COPY %3(<4 x s32>)
96 RET_ReallyLR implicit $q0
102 tracksRegLiveness: true
107 ; CHECK-LABEL: name: imm_zero
108 ; CHECK: liveins: $d0, $d1
109 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
110 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
111 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
112 ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
113 ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
114 ; CHECK: RET_ReallyLR implicit $q0
115 %0:_(<4 x s32>) = COPY $q0
116 %1:_(s32) = G_CONSTANT i32 0
117 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
118 %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
119 $q0 = COPY %3(<4 x s32>)
120 RET_ReallyLR implicit $q0
126 tracksRegLiveness: true
131 ; CHECK-LABEL: name: imm_not_splat
132 ; CHECK: liveins: $d0, $d1
133 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
134 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
135 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
136 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C]](s32), [[C]](s32)
137 ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
138 ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
139 ; CHECK: RET_ReallyLR implicit $q0
140 %0:_(<4 x s32>) = COPY $q0
141 %1:_(s32) = G_CONSTANT i32 4
142 %4:_(s32) = G_CONSTANT i32 6
143 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %4(s32), %1(s32), %1(s32)
144 %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
145 $q0 = COPY %3(<4 x s32>)
146 RET_ReallyLR implicit $q0