1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -simplify-mir -global-isel -mtriple=amdgcn -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s
4 ; Check the flags set on the memory operands for loads determined to
5 ; be constants by alias analysis.
7 @const_gv0 = external addrspace(1) constant i32, align 4
8 @const_gv1 = external addrspace(1) constant i32, align 4
9 @const_struct_gv = external addrspace(1) constant { i32, i64 }, align 8
11 define i32 @load_const_i32_gv() {
12 ; CHECK-LABEL: name: load_const_i32_gv
13 ; CHECK: bb.1 (%ir-block.0):
14 ; CHECK-NEXT: [[GV:%[0-9]+]]:_(p1) = G_GLOBAL_VALUE @const_gv0
15 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p1) :: (dereferenceable invariant load (s32) from @const_gv0, addrspace 1)
16 ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32)
17 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
18 %load = load i32, ptr addrspace(1) @const_gv0, align 4
22 define i32 @load_select_const_i32_gv(i1 %cond) {
23 ; CHECK-LABEL: name: load_select_const_i32_gv
24 ; CHECK: bb.1 (%ir-block.0):
25 ; CHECK-NEXT: liveins: $vgpr0
27 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
28 ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
29 ; CHECK-NEXT: [[GV:%[0-9]+]]:_(p1) = G_GLOBAL_VALUE @const_gv0
30 ; CHECK-NEXT: [[GV1:%[0-9]+]]:_(p1) = G_GLOBAL_VALUE @const_gv1
31 ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(p1) = G_SELECT [[TRUNC]](s1), [[GV]], [[GV1]]
32 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[SELECT]](p1) :: (dereferenceable invariant load (s32) from %ir.select, addrspace 1)
33 ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32)
34 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
35 %select = select i1 %cond, ptr addrspace(1) @const_gv0, ptr addrspace(1) @const_gv1
36 %load = load i32, ptr addrspace(1) %select, align 4
40 define { i32, i64 } @load_const_struct_gv() {
41 ; CHECK-LABEL: name: load_const_struct_gv
42 ; CHECK: bb.1 (%ir-block.0):
43 ; CHECK-NEXT: [[GV:%[0-9]+]]:_(p1) = G_GLOBAL_VALUE @const_struct_gv
44 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p1) :: (dereferenceable invariant load (s32) from @const_struct_gv, align 8, addrspace 1)
45 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
46 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[GV]], [[C]](s64)
47 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (dereferenceable invariant load (s64) from @const_struct_gv + 8, addrspace 1)
48 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64)
49 ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32)
50 ; CHECK-NEXT: $vgpr1 = COPY [[UV]](s32)
51 ; CHECK-NEXT: $vgpr2 = COPY [[UV1]](s32)
52 ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
53 %load = load { i32, i64 }, ptr addrspace(1) @const_struct_gv, align 8
54 ret { i32, i64 } %load
57 define void @test_memcpy_p1_constaddr_i64(ptr addrspace(1) %dst, ptr addrspace(4) %src) {
58 ; CHECK-LABEL: name: test_memcpy_p1_constaddr_i64
59 ; CHECK: bb.1 (%ir-block.0):
60 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
62 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
63 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
64 ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
65 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
66 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
67 ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
68 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
69 ; CHECK-NEXT: G_MEMCPY [[MV]](p1), [[MV1]](p4), [[C]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1), (dereferenceable invariant load (s8) from %ir.src, addrspace 4)
70 ; CHECK-NEXT: SI_RETURN
71 call void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) %dst, ptr addrspace(4) %src, i64 32, i1 false)
75 define void @test_memcpy_inline_p1_constaddr_i64(ptr addrspace(1) %dst, ptr addrspace(4) %src) {
76 ; CHECK-LABEL: name: test_memcpy_inline_p1_constaddr_i64
77 ; CHECK: bb.1 (%ir-block.0):
78 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
80 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
81 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
82 ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
83 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
84 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
85 ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
86 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
87 ; CHECK-NEXT: G_MEMCPY_INLINE [[MV]](p1), [[MV1]](p4), [[C]](s64) :: (store (s8) into %ir.dst, addrspace 1), (dereferenceable invariant load (s8) from %ir.src, addrspace 4)
88 ; CHECK-NEXT: SI_RETURN
89 call void @llvm.memcpy.inline.p1.p4.i64(ptr addrspace(1) %dst, ptr addrspace(4) %src, i64 32, i1 false)
93 define void @test_memmove_p1_constaddr_i64(ptr addrspace(1) %dst, ptr addrspace(4) %src) {
94 ; CHECK-LABEL: name: test_memmove_p1_constaddr_i64
95 ; CHECK: bb.1 (%ir-block.0):
96 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
98 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
99 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
100 ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
101 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
102 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
103 ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
104 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
105 ; CHECK-NEXT: G_MEMMOVE [[MV]](p1), [[MV1]](p4), [[C]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1), (dereferenceable invariant load (s8) from %ir.src, addrspace 4)
106 ; CHECK-NEXT: SI_RETURN
107 call void @llvm.memmove.p1.p4.i64(ptr addrspace(1) %dst, ptr addrspace(4) %src, i64 32, i1 false)
111 declare void @llvm.memcpy.p1.p4.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) #0
112 declare void @llvm.memcpy.inline.p1.p4.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) #0
113 declare void @llvm.memmove.p1.p4.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(4) nocapture readonly, i64, i1 immarg) #0
115 attributes #0 = { argmemonly nofree nounwind willreturn }