1 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -loop-reduce %s | FileCheck %s
3 ; Test for assert resulting from inconsistent isLegalAddressingMode
4 ; answers when the address space was dropped from the query.
6 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
8 %0 = type { i32, double, i32, float }
10 ; CHECK-LABEL: @lsr_crash_preserve_addrspace_unknown_type(
11 ; CHECK: %tmp4 = bitcast %0 addrspace(3)* %tmp to double addrspace(3)*
12 ; CHECK: %scevgep5 = getelementptr double, double addrspace(3)* %tmp4, i32 1
13 ; CHECK: load double, double addrspace(3)* %scevgep5
15 ; CHECK: %scevgep = getelementptr i32, i32 addrspace(3)* %tmp1, i32 4
16 ; CHECK:%tmp14 = load i32, i32 addrspace(3)* %scevgep
17 define amdgpu_kernel void @lsr_crash_preserve_addrspace_unknown_type() #0 {
21 bb1: ; preds = %bb17, %bb
22 %tmp = phi %0 addrspace(3)* [ undef, %bb ], [ %tmp18, %bb17 ]
23 %tmp2 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 0, i32 1
24 %tmp3 = load double, double addrspace(3)* %tmp2, align 8
28 br i1 undef, label %bb8, label %bb5
34 %tmp9 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 0, i32 0
35 %tmp10 = load i32, i32 addrspace(3)* %tmp9, align 4
36 %tmp11 = icmp eq i32 0, %tmp10
37 br i1 %tmp11, label %bb12, label %bb17
40 %tmp13 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 0, i32 2
41 %tmp14 = load i32, i32 addrspace(3)* %tmp13, align 4
42 %tmp15 = icmp eq i32 0, %tmp14
43 br i1 %tmp15, label %bb16, label %bb17
48 bb17: ; preds = %bb12, %bb8
49 %tmp18 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 2
53 ; CHECK-LABEL: @lsr_crash_preserve_addrspace_unknown_type2(
54 ; CHECK: %scevgep3 = getelementptr i8, i8 addrspace(5)* %array, i32 %j
55 ; CHECK: %scevgep2 = getelementptr i8, i8 addrspace(5)* %array, i32 %j
56 ; CHECK: %n8 = load i8, i8 addrspace(5)* %scevgep2, align 4
57 ; CHECK: call void @llvm.memcpy.p5i8.p3i8.i64(i8 addrspace(5)* %scevgep3, i8 addrspace(3)* %scevgep4, i64 42, i1 false)
58 ; CHECK: call void @llvm.memmove.p5i8.p3i8.i64(i8 addrspace(5)* %scevgep3, i8 addrspace(3)* %scevgep4, i64 42, i1 false)
59 ; CHECK: call void @llvm.memset.p5i8.i64(i8 addrspace(5)* %scevgep3, i8 42, i64 42, i1 false)
60 define void @lsr_crash_preserve_addrspace_unknown_type2(i8 addrspace(5)* %array, i8 addrspace(3)* %array2) {
64 for.body: ; preds = %entry, %for.inc
65 %j = phi i32 [ %add, %for.inc ], [ 0, %entry ]
66 %idx = getelementptr inbounds i8, i8 addrspace(5)* %array, i32 %j
67 %idx1 = getelementptr inbounds i8, i8 addrspace(3)* %array2, i32 %j
68 %t = getelementptr inbounds i8, i8 addrspace(5)* %array, i32 %j
69 %n8 = load i8, i8 addrspace(5)* %t, align 4
70 %n7 = getelementptr inbounds i8, i8 addrspace(5)* %t, i32 42
71 %n9 = load i8, i8 addrspace(5)* %n7, align 4
72 %cmp = icmp sgt i32 %j, 42
73 %add = add nuw nsw i32 %j, 1
74 br i1 %cmp, label %if.then17, label %for.inc
76 if.then17: ; preds = %for.body
77 call void @llvm.memcpy.p5i8.p5i8.i64(i8 addrspace(5)* %idx, i8 addrspace(3)* %idx1, i64 42, i1 false)
78 call void @llvm.memmove.p5i8.p5i8.i64(i8 addrspace(5)* %idx, i8 addrspace(3)* %idx1, i64 42, i1 false)
79 call void @llvm.memset.p5i8.i64(i8 addrspace(5)* %idx, i8 42, i64 42, i1 false)
82 for.inc: ; preds = %for.body, %if.then17
83 %exitcond = icmp eq i1 %cmp, 1
84 br i1 %exitcond, label %end, label %for.body
86 end: ; preds = %for.inc
90 declare void @llvm.memcpy.p5i8.p5i8.i64(i8 addrspace(5)*, i8 addrspace(3)*, i64, i1)
91 declare void @llvm.memmove.p5i8.p5i8.i64(i8 addrspace(5)*, i8 addrspace(3)*, i64, i1)
92 declare void @llvm.memset.p5i8.i64(i8 addrspace(5)*, i8, i64, i1)
94 attributes #0 = { nounwind }
95 attributes #1 = { nounwind readnone }