1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX9
3 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX12
4 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX9
6 ; Make sure we don't crash when trying to create a bitcast between
8 define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
9 ; GFX9-LABEL: @constant_from_offset_cast_generic_null(
10 ; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 4), align 4
11 ; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
12 ; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
13 ; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
16 ; GFX12-LABEL: @constant_from_offset_cast_generic_null(
17 ; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
18 ; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
19 ; GFX12-NEXT: ret void
21 %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
22 store i8 %load, ptr addrspace(1) undef
26 define amdgpu_kernel void @constant_from_offset_cast_global_null() {
27 ; GFX9-LABEL: @constant_from_offset_cast_global_null(
28 ; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 4), align 4
29 ; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
30 ; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
31 ; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
34 ; GFX12-LABEL: @constant_from_offset_cast_global_null(
35 ; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
36 ; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
37 ; GFX12-NEXT: ret void
39 %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
40 store i8 %load, ptr addrspace(1) undef
44 @gv = unnamed_addr addrspace(1) global [64 x i8] undef, align 4
46 define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
47 ; GFX9-LABEL: @constant_from_offset_cast_global_gv(
48 ; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 4), align 4
49 ; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
50 ; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
51 ; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
54 ; GFX12-LABEL: @constant_from_offset_cast_global_gv(
55 ; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
56 ; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
57 ; GFX12-NEXT: ret void
59 %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
60 store i8 %load, ptr addrspace(1) undef
64 define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
65 ; GFX9-LABEL: @constant_from_offset_cast_generic_inttoptr(
66 ; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 4), align 4
67 ; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
68 ; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
69 ; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
72 ; GFX12-LABEL: @constant_from_offset_cast_generic_inttoptr(
73 ; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
74 ; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
75 ; GFX12-NEXT: ret void
77 %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
78 store i8 %load, ptr addrspace(1) undef
82 define amdgpu_kernel void @constant_from_inttoptr() {
83 ; GFX9-LABEL: @constant_from_inttoptr(
84 ; GFX9-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 4
85 ; GFX9-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
88 ; GFX12-LABEL: @constant_from_inttoptr(
89 ; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
90 ; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
91 ; GFX12-NEXT: ret void
93 %load = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
94 store i8 %load, ptr addrspace(1) undef
98 define void @broken_phi() {
99 ; GFX9-LABEL: @broken_phi(
101 ; GFX9-NEXT: br label [[BB1:%.*]]
103 ; GFX9-NEXT: [[I:%.*]] = phi <4 x i8> [ splat (i8 1), [[BB:%.*]] ], [ [[I8:%.*]], [[BB7:%.*]] ]
104 ; GFX9-NEXT: br i1 false, label [[BB3:%.*]], label [[BB2:%.*]]
106 ; GFX9-NEXT: br label [[BB3]]
108 ; GFX9-NEXT: [[I4:%.*]] = phi <4 x i8> [ zeroinitializer, [[BB2]] ], [ [[I]], [[BB1]] ]
109 ; GFX9-NEXT: br i1 false, label [[BB7]], label [[BB5:%.*]]
111 ; GFX9-NEXT: [[I6:%.*]] = call <4 x i8> @llvm.smax.v4i8(<4 x i8> [[I4]], <4 x i8> zeroinitializer)
112 ; GFX9-NEXT: br label [[BB7]]
114 ; GFX9-NEXT: [[I8]] = phi <4 x i8> [ zeroinitializer, [[BB5]] ], [ zeroinitializer, [[BB3]] ]
115 ; GFX9-NEXT: br label [[BB1]]
117 ; GFX12-LABEL: @broken_phi(
119 ; GFX12-NEXT: br label [[BB1:%.*]]
121 ; GFX12-NEXT: [[I:%.*]] = phi <4 x i8> [ splat (i8 1), [[BB:%.*]] ], [ [[I8:%.*]], [[BB7:%.*]] ]
122 ; GFX12-NEXT: br i1 false, label [[BB3:%.*]], label [[BB2:%.*]]
124 ; GFX12-NEXT: br label [[BB3]]
126 ; GFX12-NEXT: [[I4:%.*]] = phi <4 x i8> [ zeroinitializer, [[BB2]] ], [ [[I]], [[BB1]] ]
127 ; GFX12-NEXT: br i1 false, label [[BB7]], label [[BB5:%.*]]
129 ; GFX12-NEXT: [[I6:%.*]] = call <4 x i8> @llvm.smax.v4i8(<4 x i8> [[I4]], <4 x i8> zeroinitializer)
130 ; GFX12-NEXT: br label [[BB7]]
132 ; GFX12-NEXT: [[I8]] = phi <4 x i8> [ zeroinitializer, [[BB5]] ], [ zeroinitializer, [[BB3]] ]
133 ; GFX12-NEXT: br label [[BB1]]
138 %i = phi <4 x i8> [ <i8 1, i8 1, i8 1, i8 1>, %bb ], [ %i8, %bb7 ]
139 br i1 false, label %bb3, label %bb2
143 %i4 = phi <4 x i8> [ zeroinitializer, %bb2 ], [ %i, %bb1 ]
144 br i1 false, label %bb7, label %bb5
146 %i6 = call <4 x i8> @llvm.smax.v4i8(<4 x i8> %i4, <4 x i8> zeroinitializer)
149 %i8 = phi <4 x i8> [ zeroinitializer, %bb5 ], [ zeroinitializer, %bb3 ]