1 ; RUN: llc -mtriple=x86_64-linux-gnu -stop-after=finalize-isel -o - %s | FileCheck --check-prefix=MIR %s
3 ; Ensure that the scoped AA is attached on loads/stores lowered from mem ops.
5 ; Re-evaluate the slot numbers of scopes as that numbering could be changed run-by-run.
7 ; MIR-DAG: ![[DOMAIN:[0-9]+]] = distinct !{!{{[0-9]+}}, !"bax"}
8 ; MIR-DAG: ![[SCOPE0:[0-9]+]] = distinct !{!{{[0-9]+}}, ![[DOMAIN]], !"bax: %p"}
9 ; MIR-DAG: ![[SCOPE1:[0-9]+]] = distinct !{!{{[0-9]+}}, ![[DOMAIN]], !"bax: %q"}
10 ; MIR-DAG: ![[SET0:[0-9]+]] = !{![[SCOPE0]]}
11 ; MIR-DAG: ![[SET1:[0-9]+]] = !{![[SCOPE1]]}
13 ; MIR-LABEL: name: test_memcpy
14 ; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
15 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
16 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
17 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
18 define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
19 %p0 = bitcast i32* %p to i8*
20 %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
21 %p1 = bitcast i32* %add.ptr to i8*
22 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
23 %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
24 %q1 = getelementptr inbounds i32, i32* %q, i64 1
25 %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
26 %add = add i32 %v0, %v1
30 ; MIR-LABEL: name: test_memcpy_inline
31 ; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
32 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
33 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
34 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
35 define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
36 %p0 = bitcast i32* %p to i8*
37 %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
38 %p1 = bitcast i32* %add.ptr to i8*
39 tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
40 %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
41 %q1 = getelementptr inbounds i32, i32* %q, i64 1
42 %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
43 %add = add i32 %v0, %v1
47 ; MIR-LABEL: name: test_memmove
48 ; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
49 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
50 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
51 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
52 define i32 @test_memmove(i32* nocapture %p, i32* nocapture readonly %q) {
53 %p0 = bitcast i32* %p to i8*
54 %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
55 %p1 = bitcast i32* %add.ptr to i8*
56 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
57 %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
58 %q1 = getelementptr inbounds i32, i32* %q, i64 1
59 %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
60 %add = add i32 %v0, %v1
64 ; MIR-LABEL: name: test_memset
65 ; MIR: %2:gr64 = MOV64ri -6148914691236517206
66 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, %2 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
67 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
68 define i32 @test_memset(i32* nocapture %p, i32* nocapture readonly %q) {
69 %p0 = bitcast i32* %p to i8*
70 tail call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
71 %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
72 %q1 = getelementptr inbounds i32, i32* %q, i64 1
73 %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
74 %add = add i32 %v0, %v1
78 ; MIR-LABEL: name: test_mempcpy
79 ; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
80 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
81 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
82 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
83 define i32 @test_mempcpy(i32* nocapture %p, i32* nocapture readonly %q) {
84 %p0 = bitcast i32* %p to i8*
85 %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
86 %p1 = bitcast i32* %add.ptr to i8*
87 %call = tail call i8* @mempcpy(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !2, !noalias !4
88 %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
89 %q1 = getelementptr inbounds i32, i32* %q, i64 1
90 %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
91 %add = add i32 %v0, %v1
95 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
96 declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
97 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg)
98 declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
100 declare i8* @mempcpy(i8*, i8*, i64)
102 !0 = distinct !{!0, !"bax"}
103 !1 = distinct !{!1, !0, !"bax: %p"}
105 !3 = distinct !{!3, !0, !"bax: %q"}