1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -march=aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
4 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
5 target triple = "aarch64"
7 define void @test_memmove1(i32* nocapture %dst, i32* nocapture readonly %src, i64 %len) local_unnamed_addr #0 {
9 %0 = bitcast i32* %dst to i8*
10 %1 = bitcast i32* %src to i8*
11 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 %len, i1 false)
15 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1 immarg) #1
17 define void @test_memmove2_const(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
19 %0 = bitcast i32* %dst to i8*
20 %1 = bitcast i32* %src to i8*
21 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 48, i1 false)
25 define void @test_memmove3_const_toolarge(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
27 %0 = bitcast i32* %dst to i8*
28 %1 = bitcast i32* %src to i8*
29 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 96, i1 false)
33 define void @test_memmove4_const_unaligned(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
35 %0 = bitcast i32* %dst to i8*
36 %1 = bitcast i32* %src to i8*
37 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 52, i1 false)
41 attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2,+zcm,+zcz" "unsafe-fp-math"="false" "use-soft-float"="false" }
42 attributes #1 = { argmemonly nounwind }
48 tracksRegLiveness: true
51 liveins: $x0, $x1, $x2
53 ; CHECK-LABEL: name: test_memmove1
54 ; CHECK: liveins: $x0, $x1, $x2
55 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
56 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
57 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
58 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
63 G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
68 name: test_memmove2_const
70 tracksRegLiveness: true
75 ; CHECK-LABEL: name: test_memmove2_const
76 ; CHECK: liveins: $x0, $x1
77 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
78 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
79 ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
80 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
81 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
82 ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
83 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
84 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
85 ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
86 ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
87 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
88 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C2]](s64)
89 ; CHECK: G_STORE [[LOAD1]](s128), [[GEP2]](p0) :: (store 16 into %ir.0 + 16, align 4)
90 ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
91 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C3]](s64)
92 ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
96 %2:_(s64) = G_CONSTANT i64 48
97 G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
102 name: test_memmove3_const_toolarge
104 tracksRegLiveness: true
109 ; CHECK-LABEL: name: test_memmove3_const_toolarge
110 ; CHECK: liveins: $x0, $x1
111 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
112 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
113 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
114 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
115 ; CHECK: RET_ReallyLR
118 %2:_(s64) = G_CONSTANT i64 96
119 G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
124 name: test_memmove4_const_unaligned
126 tracksRegLiveness: true
131 ; CHECK-LABEL: name: test_memmove4_const_unaligned
132 ; CHECK: liveins: $x0, $x1
133 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
134 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
135 ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
136 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
137 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
138 ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
139 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
140 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
141 ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
142 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
143 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C2]](s64)
144 ; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.1 + 48)
145 ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
146 ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
147 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C3]](s64)
148 ; CHECK: G_STORE [[LOAD1]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 16, align 4)
149 ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
150 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C4]](s64)
151 ; CHECK: G_STORE [[LOAD2]](s128), [[GEP4]](p0) :: (store 16 into %ir.0 + 32, align 4)
152 ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
153 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C5]](s64)
154 ; CHECK: G_STORE [[LOAD3]](s32), [[GEP5]](p0) :: (store 4 into %ir.0 + 48)
155 ; CHECK: RET_ReallyLR
158 %2:_(s64) = G_CONSTANT i64 52
159 G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)