1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
3 ; This is a copy of sibling-call.ll, but stops after the IRTranslator.
5 define fastcc i32 @i32_fastcc_i32_i32(i32 %arg0, i32 %arg1) #1 {
6 ; GCN-LABEL: name: i32_fastcc_i32_i32
7 ; GCN: bb.1 (%ir-block.0):
8 ; GCN-NEXT: liveins: $vgpr0, $vgpr1
10 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
11 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
12 ; GCN-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
13 ; GCN-NEXT: $vgpr0 = COPY [[ADD]](s32)
14 ; GCN-NEXT: SI_RETURN implicit $vgpr0
15 %add0 = add i32 %arg0, %arg1
19 define fastcc i32 @i32_fastcc_i32_i32_stack_object(i32 %arg0, i32 %arg1) #1 {
20 ; GCN-LABEL: name: i32_fastcc_i32_i32_stack_object
21 ; GCN: bb.1 (%ir-block.0):
22 ; GCN-NEXT: liveins: $vgpr0, $vgpr1
24 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
25 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
26 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
27 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
28 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
29 ; GCN-NEXT: %4:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
30 ; GCN-NEXT: G_STORE [[C]](s32), %4(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
31 ; GCN-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
32 ; GCN-NEXT: $vgpr0 = COPY [[ADD]](s32)
33 ; GCN-NEXT: SI_RETURN implicit $vgpr0
34 %alloca = alloca [16 x i32], align 4, addrspace(5)
35 %gep = getelementptr inbounds [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 5
36 store volatile i32 9, ptr addrspace(5) %gep
37 %add0 = add i32 %arg0, %arg1
41 define hidden fastcc i32 @sibling_call_i32_fastcc_i32_i32(i32 %a, i32 %b, i32 %c) #1 {
42 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32
44 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
46 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
47 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
48 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
49 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
50 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
51 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
52 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
53 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
54 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
56 %ret = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
60 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_stack_object(i32 %a, i32 %b, i32 %c) #1 {
61 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_stack_object
63 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
65 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
66 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
67 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
68 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
69 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
70 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
71 ; GCN-NEXT: %5:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
72 ; GCN-NEXT: G_STORE [[C]](s32), %5(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
73 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
74 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
75 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
76 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
77 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
78 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
80 %alloca = alloca [16 x i32], align 4, addrspace(5)
81 %gep = getelementptr inbounds [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 5
82 store volatile i32 9, ptr addrspace(5) %gep
83 %ret = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
87 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_callee_stack_object(i32 %a, i32 %b, i32 %c) #1 {
88 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_callee_stack_object
90 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
92 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
93 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
94 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
95 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
96 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
97 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
98 ; GCN-NEXT: %5:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
99 ; GCN-NEXT: G_STORE [[C]](s32), %5(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
100 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_stack_object
101 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
102 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
103 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
104 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
105 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_stack_object, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
107 %alloca = alloca [16 x i32], align 4, addrspace(5)
108 %gep = getelementptr inbounds [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 5
109 store volatile i32 9, ptr addrspace(5) %gep
110 %ret = tail call fastcc i32 @i32_fastcc_i32_i32_stack_object(i32 %a, i32 %b)
114 define fastcc void @sibling_call_i32_fastcc_i32_i32_unused_result(i32 %a, i32 %b, i32 %c) #1 {
115 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_unused_result
117 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
119 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
120 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
121 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
122 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
123 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
124 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
125 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
126 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
127 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
129 %ret = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
133 ; It doesn't make sense to do a tail from a kernel
134 define amdgpu_kernel void @kernel_call_i32_fastcc_i32_i32_unused_result(i32 %a, i32 %b, i32 %c) #1 {
135 ; GCN-LABEL: name: kernel_call_i32_fastcc_i32_i32_unused_result
137 ; GCN-NEXT: liveins: $sgpr4_sgpr5
139 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
140 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
141 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
142 ; GCN-NEXT: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
143 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load (<2 x s32>) from %ir.a.kernarg.offset1, align 16, addrspace 4)
144 ; GCN-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[LOAD]](<2 x s32>), [[C]](s32)
145 ; GCN-NEXT: [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[LOAD]](<2 x s32>), [[C1]](s32)
146 ; GCN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
147 ; GCN-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
148 ; GCN-NEXT: $vgpr0 = COPY [[EVEC]](s32)
149 ; GCN-NEXT: $vgpr1 = COPY [[EVEC1]](s32)
150 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
151 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
152 ; GCN-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[GV]](p0), @i32_fastcc_i32_i32, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
153 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0
154 ; GCN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc
155 ; GCN-NEXT: S_ENDPGM 0
157 %ret = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
161 define hidden fastcc i32 @i32_fastcc_i32_byval_i32(i32 %arg0, ptr addrspace(5) byval(i32) align 4 %arg1) #1 {
162 ; GCN-LABEL: name: i32_fastcc_i32_byval_i32
163 ; GCN: bb.1 (%ir-block.0):
164 ; GCN-NEXT: liveins: $vgpr0
166 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
167 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
168 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
169 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p5) :: (dereferenceable load (s32) from %ir.arg1, addrspace 5)
170 ; GCN-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[LOAD]]
171 ; GCN-NEXT: $vgpr0 = COPY [[ADD]](s32)
172 ; GCN-NEXT: SI_RETURN implicit $vgpr0
173 %arg1.load = load i32, ptr addrspace(5) %arg1, align 4
174 %add0 = add i32 %arg0, %arg1.load
178 ; Tail call disallowed with byval in parent.
179 define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32_byval_parent(i32 %a, ptr addrspace(5) byval(i32) %b.byval, i32 %c) #1 {
180 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_byval_i32_byval_parent
182 ; GCN-NEXT: liveins: $vgpr0, $vgpr1
184 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
185 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
186 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
187 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr1
188 ; GCN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
189 ; GCN-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_byval_i32
190 ; GCN-NEXT: [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sgpr32
191 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
192 ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C]](s32)
193 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
194 ; GCN-NEXT: G_MEMCPY [[PTR_ADD]](p5), [[COPY1]](p5), [[C1]](s32), 0 :: (dereferenceable store (s32) into stack, addrspace 5), (dereferenceable load (s32) from %ir.b.byval, addrspace 5)
195 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
196 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
197 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
198 ; GCN-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[GV]](p0), @i32_fastcc_i32_byval_i32, csr_amdgpu, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
199 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr0
200 ; GCN-NEXT: ADJCALLSTACKDOWN 0, 4, implicit-def $scc
201 ; GCN-NEXT: $vgpr0 = COPY [[COPY4]](s32)
202 ; GCN-NEXT: SI_RETURN implicit $vgpr0
204 %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, ptr addrspace(5) byval(i32) %b.byval)
208 ; Tail call disallowed with byval in parent, not callee. The stack
209 ; usage of incoming arguments must be <= the outgoing stack
211 define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32(i32 %a, [32 x i32] %large) #1 {
212 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_byval_i32
214 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
216 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
217 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
218 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
219 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
220 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
221 ; GCN-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
222 ; GCN-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
223 ; GCN-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
224 ; GCN-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
225 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
226 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
227 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
228 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
229 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
230 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
231 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
232 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
233 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
234 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
235 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
236 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
237 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
238 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
239 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
240 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
241 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
242 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
243 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
244 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
245 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
246 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
247 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
248 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.2, align 16, addrspace 5)
249 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
250 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.1, addrspace 5)
251 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
252 ; GCN-NEXT: [[INTTOPTR:%[0-9]+]]:_(p5) = G_INTTOPTR [[C]](s32)
253 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_byval_i32
254 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
255 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
256 ; GCN-NEXT: G_MEMCPY [[FRAME_INDEX2]](p5), [[INTTOPTR]](p5), [[C1]](s32), 0 :: (dereferenceable store (s32) into %fixed-stack.0, align 16, addrspace 5), (dereferenceable load (s32) from `ptr addrspace(5) inttoptr (i32 16 to ptr addrspace(5))`, align 16, addrspace 5)
257 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
258 ; GCN-NEXT: [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
259 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
260 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_byval_i32, 0, csr_amdgpu, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3
262 %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, ptr addrspace(5) byval(i32) inttoptr (i32 16 to ptr addrspace(5)))
266 define fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %arg0, i32 %arg1, [32 x i32] %large) #1 {
267 ; GCN-LABEL: name: i32_fastcc_i32_i32_a32i32
268 ; GCN: bb.1 (%ir-block.0):
269 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
271 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
272 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
273 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
274 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
275 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
276 ; GCN-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
277 ; GCN-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
278 ; GCN-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
279 ; GCN-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
280 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
281 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
282 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
283 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
284 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
285 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
286 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
287 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
288 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
289 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
290 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
291 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
292 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
293 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
294 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
295 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
296 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
297 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
298 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
299 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
300 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
301 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
302 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
303 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.2, align 16, addrspace 5)
304 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
305 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.1, addrspace 5)
306 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
307 ; GCN-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.0, align 8, addrspace 5)
308 ; GCN-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
309 ; GCN-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[LOAD1]]
310 ; GCN-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[LOAD2]]
311 ; GCN-NEXT: $vgpr0 = COPY [[ADD2]](s32)
312 ; GCN-NEXT: SI_RETURN implicit $vgpr0
313 %val_firststack = extractvalue [32 x i32] %large, 30
314 %val_laststack = extractvalue [32 x i32] %large, 31
315 %add0 = add i32 %arg0, %arg1
316 %add1 = add i32 %add0, %val_firststack
317 %add2 = add i32 %add1, %val_laststack
321 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c) #1 {
322 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_a32i32
324 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
326 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
327 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
328 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
329 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
330 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
331 ; GCN-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
332 ; GCN-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
333 ; GCN-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
334 ; GCN-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
335 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
336 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
337 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
338 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
339 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
340 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
341 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
342 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
343 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
344 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
345 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
346 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
347 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
348 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
349 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
350 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
351 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
352 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
353 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
354 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
355 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
356 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
357 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
358 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.5, align 16, addrspace 5)
359 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
360 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
361 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
362 ; GCN-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
363 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
364 ; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
365 ; GCN-NEXT: G_STORE [[LOAD]](s32), [[FRAME_INDEX3]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
366 ; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
367 ; GCN-NEXT: G_STORE [[LOAD1]](s32), [[FRAME_INDEX4]](p5) :: (store (s32) into %fixed-stack.1, addrspace 5)
368 ; GCN-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
369 ; GCN-NEXT: G_STORE [[LOAD2]](s32), [[FRAME_INDEX5]](p5) :: (store (s32) into %fixed-stack.0, align 8, addrspace 5)
370 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
371 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
372 ; GCN-NEXT: $vgpr2 = COPY [[COPY2]](s32)
373 ; GCN-NEXT: $vgpr3 = COPY [[COPY3]](s32)
374 ; GCN-NEXT: $vgpr4 = COPY [[COPY4]](s32)
375 ; GCN-NEXT: $vgpr5 = COPY [[COPY5]](s32)
376 ; GCN-NEXT: $vgpr6 = COPY [[COPY6]](s32)
377 ; GCN-NEXT: $vgpr7 = COPY [[COPY7]](s32)
378 ; GCN-NEXT: $vgpr8 = COPY [[COPY8]](s32)
379 ; GCN-NEXT: $vgpr9 = COPY [[COPY9]](s32)
380 ; GCN-NEXT: $vgpr10 = COPY [[COPY10]](s32)
381 ; GCN-NEXT: $vgpr11 = COPY [[COPY11]](s32)
382 ; GCN-NEXT: $vgpr12 = COPY [[COPY12]](s32)
383 ; GCN-NEXT: $vgpr13 = COPY [[COPY13]](s32)
384 ; GCN-NEXT: $vgpr14 = COPY [[COPY14]](s32)
385 ; GCN-NEXT: $vgpr15 = COPY [[COPY15]](s32)
386 ; GCN-NEXT: $vgpr16 = COPY [[COPY16]](s32)
387 ; GCN-NEXT: $vgpr17 = COPY [[COPY17]](s32)
388 ; GCN-NEXT: $vgpr18 = COPY [[COPY18]](s32)
389 ; GCN-NEXT: $vgpr19 = COPY [[COPY19]](s32)
390 ; GCN-NEXT: $vgpr20 = COPY [[COPY20]](s32)
391 ; GCN-NEXT: $vgpr21 = COPY [[COPY21]](s32)
392 ; GCN-NEXT: $vgpr22 = COPY [[COPY22]](s32)
393 ; GCN-NEXT: $vgpr23 = COPY [[COPY23]](s32)
394 ; GCN-NEXT: $vgpr24 = COPY [[COPY24]](s32)
395 ; GCN-NEXT: $vgpr25 = COPY [[COPY25]](s32)
396 ; GCN-NEXT: $vgpr26 = COPY [[COPY26]](s32)
397 ; GCN-NEXT: $vgpr27 = COPY [[COPY27]](s32)
398 ; GCN-NEXT: $vgpr28 = COPY [[COPY28]](s32)
399 ; GCN-NEXT: $vgpr29 = COPY [[COPY29]](s32)
400 ; GCN-NEXT: $vgpr30 = COPY [[COPY30]](s32)
401 ; GCN-NEXT: [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
402 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
403 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
405 %ret = tail call fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c)
409 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32_stack_object(i32 %a, i32 %b, [32 x i32] %c) #1 {
410 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_a32i32_stack_object
412 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
414 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
415 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
416 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
417 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
418 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
419 ; GCN-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
420 ; GCN-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
421 ; GCN-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
422 ; GCN-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
423 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
424 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
425 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
426 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
427 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
428 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
429 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
430 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
431 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
432 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
433 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
434 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
435 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
436 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
437 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
438 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
439 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
440 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
441 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
442 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
443 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
444 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
445 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
446 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.5, align 16, addrspace 5)
447 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
448 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
449 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
450 ; GCN-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
451 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
452 ; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
453 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
454 ; GCN-NEXT: %39:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s32)
455 ; GCN-NEXT: G_STORE [[C]](s32), %39(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
456 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
457 ; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
458 ; GCN-NEXT: G_STORE [[LOAD]](s32), [[FRAME_INDEX4]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
459 ; GCN-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
460 ; GCN-NEXT: G_STORE [[LOAD1]](s32), [[FRAME_INDEX5]](p5) :: (store (s32) into %fixed-stack.1, addrspace 5)
461 ; GCN-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
462 ; GCN-NEXT: G_STORE [[LOAD2]](s32), [[FRAME_INDEX6]](p5) :: (store (s32) into %fixed-stack.0, align 8, addrspace 5)
463 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
464 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
465 ; GCN-NEXT: $vgpr2 = COPY [[COPY2]](s32)
466 ; GCN-NEXT: $vgpr3 = COPY [[COPY3]](s32)
467 ; GCN-NEXT: $vgpr4 = COPY [[COPY4]](s32)
468 ; GCN-NEXT: $vgpr5 = COPY [[COPY5]](s32)
469 ; GCN-NEXT: $vgpr6 = COPY [[COPY6]](s32)
470 ; GCN-NEXT: $vgpr7 = COPY [[COPY7]](s32)
471 ; GCN-NEXT: $vgpr8 = COPY [[COPY8]](s32)
472 ; GCN-NEXT: $vgpr9 = COPY [[COPY9]](s32)
473 ; GCN-NEXT: $vgpr10 = COPY [[COPY10]](s32)
474 ; GCN-NEXT: $vgpr11 = COPY [[COPY11]](s32)
475 ; GCN-NEXT: $vgpr12 = COPY [[COPY12]](s32)
476 ; GCN-NEXT: $vgpr13 = COPY [[COPY13]](s32)
477 ; GCN-NEXT: $vgpr14 = COPY [[COPY14]](s32)
478 ; GCN-NEXT: $vgpr15 = COPY [[COPY15]](s32)
479 ; GCN-NEXT: $vgpr16 = COPY [[COPY16]](s32)
480 ; GCN-NEXT: $vgpr17 = COPY [[COPY17]](s32)
481 ; GCN-NEXT: $vgpr18 = COPY [[COPY18]](s32)
482 ; GCN-NEXT: $vgpr19 = COPY [[COPY19]](s32)
483 ; GCN-NEXT: $vgpr20 = COPY [[COPY20]](s32)
484 ; GCN-NEXT: $vgpr21 = COPY [[COPY21]](s32)
485 ; GCN-NEXT: $vgpr22 = COPY [[COPY22]](s32)
486 ; GCN-NEXT: $vgpr23 = COPY [[COPY23]](s32)
487 ; GCN-NEXT: $vgpr24 = COPY [[COPY24]](s32)
488 ; GCN-NEXT: $vgpr25 = COPY [[COPY25]](s32)
489 ; GCN-NEXT: $vgpr26 = COPY [[COPY26]](s32)
490 ; GCN-NEXT: $vgpr27 = COPY [[COPY27]](s32)
491 ; GCN-NEXT: $vgpr28 = COPY [[COPY28]](s32)
492 ; GCN-NEXT: $vgpr29 = COPY [[COPY29]](s32)
493 ; GCN-NEXT: $vgpr30 = COPY [[COPY30]](s32)
494 ; GCN-NEXT: [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
495 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
496 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
498 %alloca = alloca [16 x i32], align 4, addrspace(5)
499 %gep = getelementptr inbounds [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 5
500 store volatile i32 9, ptr addrspace(5) %gep
501 %ret = tail call fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c)
505 ; If the callee requires more stack argument space than the caller,
506 ; don't do a tail call.
507 ; TODO: Do we really need this restriction?
508 define fastcc i32 @no_sibling_call_callee_more_stack_space(i32 %a, i32 %b) #1 {
509 ; GCN-LABEL: name: no_sibling_call_callee_more_stack_space
511 ; GCN-NEXT: liveins: $vgpr0, $vgpr1
513 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
514 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
515 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
516 ; GCN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
517 ; GCN-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
518 ; GCN-NEXT: [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sgpr32
519 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
520 ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C1]](s32)
521 ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (store (s32) into stack, align 16, addrspace 5)
522 ; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
523 ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C2]](s32)
524 ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD1]](p5) :: (store (s32) into stack + 4, addrspace 5)
525 ; GCN-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
526 ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C3]](s32)
527 ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD2]](p5) :: (store (s32) into stack + 8, align 8, addrspace 5)
528 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
529 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
530 ; GCN-NEXT: $vgpr2 = COPY [[C]](s32)
531 ; GCN-NEXT: $vgpr3 = COPY [[C]](s32)
532 ; GCN-NEXT: $vgpr4 = COPY [[C]](s32)
533 ; GCN-NEXT: $vgpr5 = COPY [[C]](s32)
534 ; GCN-NEXT: $vgpr6 = COPY [[C]](s32)
535 ; GCN-NEXT: $vgpr7 = COPY [[C]](s32)
536 ; GCN-NEXT: $vgpr8 = COPY [[C]](s32)
537 ; GCN-NEXT: $vgpr9 = COPY [[C]](s32)
538 ; GCN-NEXT: $vgpr10 = COPY [[C]](s32)
539 ; GCN-NEXT: $vgpr11 = COPY [[C]](s32)
540 ; GCN-NEXT: $vgpr12 = COPY [[C]](s32)
541 ; GCN-NEXT: $vgpr13 = COPY [[C]](s32)
542 ; GCN-NEXT: $vgpr14 = COPY [[C]](s32)
543 ; GCN-NEXT: $vgpr15 = COPY [[C]](s32)
544 ; GCN-NEXT: $vgpr16 = COPY [[C]](s32)
545 ; GCN-NEXT: $vgpr17 = COPY [[C]](s32)
546 ; GCN-NEXT: $vgpr18 = COPY [[C]](s32)
547 ; GCN-NEXT: $vgpr19 = COPY [[C]](s32)
548 ; GCN-NEXT: $vgpr20 = COPY [[C]](s32)
549 ; GCN-NEXT: $vgpr21 = COPY [[C]](s32)
550 ; GCN-NEXT: $vgpr22 = COPY [[C]](s32)
551 ; GCN-NEXT: $vgpr23 = COPY [[C]](s32)
552 ; GCN-NEXT: $vgpr24 = COPY [[C]](s32)
553 ; GCN-NEXT: $vgpr25 = COPY [[C]](s32)
554 ; GCN-NEXT: $vgpr26 = COPY [[C]](s32)
555 ; GCN-NEXT: $vgpr27 = COPY [[C]](s32)
556 ; GCN-NEXT: $vgpr28 = COPY [[C]](s32)
557 ; GCN-NEXT: $vgpr29 = COPY [[C]](s32)
558 ; GCN-NEXT: $vgpr30 = COPY [[C]](s32)
559 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
560 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
561 ; GCN-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[GV]](p0), @i32_fastcc_i32_i32_a32i32, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
562 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr0
563 ; GCN-NEXT: ADJCALLSTACKDOWN 0, 12, implicit-def $scc
564 ; GCN-NEXT: $vgpr0 = COPY [[COPY3]](s32)
565 ; GCN-NEXT: SI_RETURN implicit $vgpr0
567 %ret = tail call fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] zeroinitializer)
571 ; Have another non-tail in the function
572 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_other_call(i32 %a, i32 %b, i32 %c) #1 {
573 ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_other_call
575 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
577 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
578 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
579 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
580 ; GCN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
581 ; GCN-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
582 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
583 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
584 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
585 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
586 ; GCN-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[GV]](p0), @i32_fastcc_i32_i32, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
587 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr0
588 ; GCN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc
589 ; GCN-NEXT: [[GV1:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @sibling_call_i32_fastcc_i32_i32
590 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
591 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
592 ; GCN-NEXT: $vgpr2 = COPY [[COPY4]](s32)
593 ; GCN-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
594 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY5]](<4 x s32>)
595 ; GCN-NEXT: SI_TCRETURN [[GV1]](p0), @sibling_call_i32_fastcc_i32_i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $sgpr0_sgpr1_sgpr2_sgpr3
597 %other.call = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
598 %ret = tail call fastcc i32 @sibling_call_i32_fastcc_i32_i32(i32 %a, i32 %b, i32 %other.call)
602 ; Have stack object in caller and stack passed arguments. SP should be
603 ; in same place at function exit.
604 define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c) #1 {
605 ; GCN-LABEL: name: sibling_call_stack_objecti32_fastcc_i32_i32_a32i32
607 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
609 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
610 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
611 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
612 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
613 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
614 ; GCN-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
615 ; GCN-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
616 ; GCN-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
617 ; GCN-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
618 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
619 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
620 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
621 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
622 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
623 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
624 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
625 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
626 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
627 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
628 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
629 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
630 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
631 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
632 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
633 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
634 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
635 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
636 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
637 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
638 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
639 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
640 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
641 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.5, align 16, addrspace 5)
642 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
643 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
644 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
645 ; GCN-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
646 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
647 ; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
648 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
649 ; GCN-NEXT: %39:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s32)
650 ; GCN-NEXT: G_STORE [[C]](s32), %39(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
651 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
652 ; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
653 ; GCN-NEXT: G_STORE [[LOAD]](s32), [[FRAME_INDEX4]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
654 ; GCN-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
655 ; GCN-NEXT: G_STORE [[LOAD1]](s32), [[FRAME_INDEX5]](p5) :: (store (s32) into %fixed-stack.1, addrspace 5)
656 ; GCN-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
657 ; GCN-NEXT: G_STORE [[LOAD2]](s32), [[FRAME_INDEX6]](p5) :: (store (s32) into %fixed-stack.0, align 8, addrspace 5)
658 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
659 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
660 ; GCN-NEXT: $vgpr2 = COPY [[COPY2]](s32)
661 ; GCN-NEXT: $vgpr3 = COPY [[COPY3]](s32)
662 ; GCN-NEXT: $vgpr4 = COPY [[COPY4]](s32)
663 ; GCN-NEXT: $vgpr5 = COPY [[COPY5]](s32)
664 ; GCN-NEXT: $vgpr6 = COPY [[COPY6]](s32)
665 ; GCN-NEXT: $vgpr7 = COPY [[COPY7]](s32)
666 ; GCN-NEXT: $vgpr8 = COPY [[COPY8]](s32)
667 ; GCN-NEXT: $vgpr9 = COPY [[COPY9]](s32)
668 ; GCN-NEXT: $vgpr10 = COPY [[COPY10]](s32)
669 ; GCN-NEXT: $vgpr11 = COPY [[COPY11]](s32)
670 ; GCN-NEXT: $vgpr12 = COPY [[COPY12]](s32)
671 ; GCN-NEXT: $vgpr13 = COPY [[COPY13]](s32)
672 ; GCN-NEXT: $vgpr14 = COPY [[COPY14]](s32)
673 ; GCN-NEXT: $vgpr15 = COPY [[COPY15]](s32)
674 ; GCN-NEXT: $vgpr16 = COPY [[COPY16]](s32)
675 ; GCN-NEXT: $vgpr17 = COPY [[COPY17]](s32)
676 ; GCN-NEXT: $vgpr18 = COPY [[COPY18]](s32)
677 ; GCN-NEXT: $vgpr19 = COPY [[COPY19]](s32)
678 ; GCN-NEXT: $vgpr20 = COPY [[COPY20]](s32)
679 ; GCN-NEXT: $vgpr21 = COPY [[COPY21]](s32)
680 ; GCN-NEXT: $vgpr22 = COPY [[COPY22]](s32)
681 ; GCN-NEXT: $vgpr23 = COPY [[COPY23]](s32)
682 ; GCN-NEXT: $vgpr24 = COPY [[COPY24]](s32)
683 ; GCN-NEXT: $vgpr25 = COPY [[COPY25]](s32)
684 ; GCN-NEXT: $vgpr26 = COPY [[COPY26]](s32)
685 ; GCN-NEXT: $vgpr27 = COPY [[COPY27]](s32)
686 ; GCN-NEXT: $vgpr28 = COPY [[COPY28]](s32)
687 ; GCN-NEXT: $vgpr29 = COPY [[COPY29]](s32)
688 ; GCN-NEXT: $vgpr30 = COPY [[COPY30]](s32)
689 ; GCN-NEXT: [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
690 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
691 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
693 %alloca = alloca [16 x i32], align 4, addrspace(5)
694 %gep = getelementptr inbounds [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 5
695 store volatile i32 9, ptr addrspace(5) %gep
696 %ret = tail call fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c)
700 define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32_larger_arg_area(i32 %a, i32 %b, [36 x i32] %c) #1 {
701 ; GCN-LABEL: name: sibling_call_stack_objecti32_fastcc_i32_i32_a32i32_larger_arg_area
703 ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
705 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
706 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
707 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
708 ; GCN-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
709 ; GCN-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
710 ; GCN-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
711 ; GCN-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
712 ; GCN-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
713 ; GCN-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
714 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
715 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
716 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
717 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr12
718 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr13
719 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
720 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
721 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr16
722 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr17
723 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr18
724 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr19
725 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr20
726 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr21
727 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr22
728 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr23
729 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr24
730 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr25
731 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr26
732 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr27
733 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr28
734 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr29
735 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
736 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.9
737 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.9, align 16, addrspace 5)
738 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.8
739 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.8, addrspace 5)
740 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
741 ; GCN-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.7, align 8, addrspace 5)
742 ; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
743 ; GCN-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load (s32) from %fixed-stack.6, addrspace 5)
744 ; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
745 ; GCN-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load (s32) from %fixed-stack.5, align 16, addrspace 5)
746 ; GCN-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
747 ; GCN-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
748 ; GCN-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
749 ; GCN-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
750 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
751 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
752 ; GCN-NEXT: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
753 ; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
754 ; GCN-NEXT: %47:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX7]], [[C2]](s32)
755 ; GCN-NEXT: G_STORE [[C]](s32), %47(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
756 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
757 ; GCN-NEXT: [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
758 ; GCN-NEXT: G_STORE [[C1]](s32), [[FRAME_INDEX8]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
759 ; GCN-NEXT: [[FRAME_INDEX9:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
760 ; GCN-NEXT: G_STORE [[C1]](s32), [[FRAME_INDEX9]](p5) :: (store (s32) into %fixed-stack.1, addrspace 5)
761 ; GCN-NEXT: [[FRAME_INDEX10:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
762 ; GCN-NEXT: G_STORE [[C1]](s32), [[FRAME_INDEX10]](p5) :: (store (s32) into %fixed-stack.0, align 8, addrspace 5)
763 ; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
764 ; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
765 ; GCN-NEXT: $vgpr2 = COPY [[C1]](s32)
766 ; GCN-NEXT: $vgpr3 = COPY [[C1]](s32)
767 ; GCN-NEXT: $vgpr4 = COPY [[C1]](s32)
768 ; GCN-NEXT: $vgpr5 = COPY [[C1]](s32)
769 ; GCN-NEXT: $vgpr6 = COPY [[C1]](s32)
770 ; GCN-NEXT: $vgpr7 = COPY [[C1]](s32)
771 ; GCN-NEXT: $vgpr8 = COPY [[C1]](s32)
772 ; GCN-NEXT: $vgpr9 = COPY [[C1]](s32)
773 ; GCN-NEXT: $vgpr10 = COPY [[C1]](s32)
774 ; GCN-NEXT: $vgpr11 = COPY [[C1]](s32)
775 ; GCN-NEXT: $vgpr12 = COPY [[C1]](s32)
776 ; GCN-NEXT: $vgpr13 = COPY [[C1]](s32)
777 ; GCN-NEXT: $vgpr14 = COPY [[C1]](s32)
778 ; GCN-NEXT: $vgpr15 = COPY [[C1]](s32)
779 ; GCN-NEXT: $vgpr16 = COPY [[C1]](s32)
780 ; GCN-NEXT: $vgpr17 = COPY [[C1]](s32)
781 ; GCN-NEXT: $vgpr18 = COPY [[C1]](s32)
782 ; GCN-NEXT: $vgpr19 = COPY [[C1]](s32)
783 ; GCN-NEXT: $vgpr20 = COPY [[C1]](s32)
784 ; GCN-NEXT: $vgpr21 = COPY [[C1]](s32)
785 ; GCN-NEXT: $vgpr22 = COPY [[C1]](s32)
786 ; GCN-NEXT: $vgpr23 = COPY [[C1]](s32)
787 ; GCN-NEXT: $vgpr24 = COPY [[C1]](s32)
788 ; GCN-NEXT: $vgpr25 = COPY [[C1]](s32)
789 ; GCN-NEXT: $vgpr26 = COPY [[C1]](s32)
790 ; GCN-NEXT: $vgpr27 = COPY [[C1]](s32)
791 ; GCN-NEXT: $vgpr28 = COPY [[C1]](s32)
792 ; GCN-NEXT: $vgpr29 = COPY [[C1]](s32)
793 ; GCN-NEXT: $vgpr30 = COPY [[C1]](s32)
794 ; GCN-NEXT: [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
795 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
796 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
798 %alloca = alloca [16 x i32], align 4, addrspace(5)
799 %gep = getelementptr inbounds [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 5
800 store volatile i32 9, ptr addrspace(5) %gep
801 %ret = tail call fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] zeroinitializer)
805 declare hidden void @void_fastcc_multi_byval(i32 %a, ptr addrspace(5) byval([3 x i32]) align 16, ptr addrspace(5) byval([2 x i64]))
807 define fastcc void @sibling_call_fastcc_multi_byval(i32 %a, [64 x i32]) #1 {
808 ; GCN-LABEL: name: sibling_call_fastcc_multi_byval
810 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
812 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
813 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
814 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
815 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
816 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
817 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
818 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
819 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
820 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
821 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr0
822 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr1
823 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr2
824 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr3
825 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr4
826 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr5
827 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr6
828 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr7
829 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr8
830 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr9
831 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr10
832 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr11
833 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr12
834 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr13
835 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr14
836 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr15
837 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr16
838 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr17
839 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr18
840 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr19
841 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr20
842 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr21
843 ; GCN-NEXT: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr22
844 ; GCN-NEXT: [[COPY32:%[0-9]+]]:_(s32) = COPY $vgpr23
845 ; GCN-NEXT: [[COPY33:%[0-9]+]]:_(s32) = COPY $vgpr24
846 ; GCN-NEXT: [[COPY34:%[0-9]+]]:_(s32) = COPY $vgpr25
847 ; GCN-NEXT: [[COPY35:%[0-9]+]]:_(s32) = COPY $vgpr26
848 ; GCN-NEXT: [[COPY36:%[0-9]+]]:_(s32) = COPY $vgpr27
849 ; GCN-NEXT: [[COPY37:%[0-9]+]]:_(s32) = COPY $vgpr28
850 ; GCN-NEXT: [[COPY38:%[0-9]+]]:_(s32) = COPY $vgpr29
851 ; GCN-NEXT: [[COPY39:%[0-9]+]]:_(s32) = COPY $vgpr30
852 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.35
853 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.35, align 16, addrspace 5)
854 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.34
855 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.34, addrspace 5)
856 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.33
857 ; GCN-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.33, align 8, addrspace 5)
858 ; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.32
859 ; GCN-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load (s32) from %fixed-stack.32, addrspace 5)
860 ; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.31
861 ; GCN-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load (s32) from %fixed-stack.31, align 16, addrspace 5)
862 ; GCN-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.30
863 ; GCN-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load (s32) from %fixed-stack.30, addrspace 5)
864 ; GCN-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.29
865 ; GCN-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load (s32) from %fixed-stack.29, align 8, addrspace 5)
866 ; GCN-NEXT: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.28
867 ; GCN-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p5) :: (invariant load (s32) from %fixed-stack.28, addrspace 5)
868 ; GCN-NEXT: [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.27
869 ; GCN-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p5) :: (invariant load (s32) from %fixed-stack.27, align 16, addrspace 5)
870 ; GCN-NEXT: [[FRAME_INDEX9:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.26
871 ; GCN-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p5) :: (invariant load (s32) from %fixed-stack.26, addrspace 5)
872 ; GCN-NEXT: [[FRAME_INDEX10:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.25
873 ; GCN-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p5) :: (invariant load (s32) from %fixed-stack.25, align 8, addrspace 5)
874 ; GCN-NEXT: [[FRAME_INDEX11:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.24
875 ; GCN-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p5) :: (invariant load (s32) from %fixed-stack.24, addrspace 5)
876 ; GCN-NEXT: [[FRAME_INDEX12:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.23
877 ; GCN-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p5) :: (invariant load (s32) from %fixed-stack.23, align 16, addrspace 5)
878 ; GCN-NEXT: [[FRAME_INDEX13:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.22
879 ; GCN-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p5) :: (invariant load (s32) from %fixed-stack.22, addrspace 5)
880 ; GCN-NEXT: [[FRAME_INDEX14:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.21
881 ; GCN-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p5) :: (invariant load (s32) from %fixed-stack.21, align 8, addrspace 5)
882 ; GCN-NEXT: [[FRAME_INDEX15:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.20
883 ; GCN-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p5) :: (invariant load (s32) from %fixed-stack.20, addrspace 5)
884 ; GCN-NEXT: [[FRAME_INDEX16:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.19
885 ; GCN-NEXT: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX16]](p5) :: (invariant load (s32) from %fixed-stack.19, align 16, addrspace 5)
886 ; GCN-NEXT: [[FRAME_INDEX17:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.18
887 ; GCN-NEXT: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX17]](p5) :: (invariant load (s32) from %fixed-stack.18, addrspace 5)
888 ; GCN-NEXT: [[FRAME_INDEX18:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.17
889 ; GCN-NEXT: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX18]](p5) :: (invariant load (s32) from %fixed-stack.17, align 8, addrspace 5)
890 ; GCN-NEXT: [[FRAME_INDEX19:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.16
891 ; GCN-NEXT: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX19]](p5) :: (invariant load (s32) from %fixed-stack.16, addrspace 5)
892 ; GCN-NEXT: [[FRAME_INDEX20:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.15
893 ; GCN-NEXT: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX20]](p5) :: (invariant load (s32) from %fixed-stack.15, align 16, addrspace 5)
894 ; GCN-NEXT: [[FRAME_INDEX21:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.14
895 ; GCN-NEXT: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX21]](p5) :: (invariant load (s32) from %fixed-stack.14, addrspace 5)
896 ; GCN-NEXT: [[FRAME_INDEX22:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.13
897 ; GCN-NEXT: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX22]](p5) :: (invariant load (s32) from %fixed-stack.13, align 8, addrspace 5)
898 ; GCN-NEXT: [[FRAME_INDEX23:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.12
899 ; GCN-NEXT: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX23]](p5) :: (invariant load (s32) from %fixed-stack.12, addrspace 5)
900 ; GCN-NEXT: [[FRAME_INDEX24:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.11
901 ; GCN-NEXT: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX24]](p5) :: (invariant load (s32) from %fixed-stack.11, align 16, addrspace 5)
902 ; GCN-NEXT: [[FRAME_INDEX25:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.10
903 ; GCN-NEXT: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX25]](p5) :: (invariant load (s32) from %fixed-stack.10, addrspace 5)
904 ; GCN-NEXT: [[FRAME_INDEX26:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.9
905 ; GCN-NEXT: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX26]](p5) :: (invariant load (s32) from %fixed-stack.9, align 8, addrspace 5)
906 ; GCN-NEXT: [[FRAME_INDEX27:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.8
907 ; GCN-NEXT: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX27]](p5) :: (invariant load (s32) from %fixed-stack.8, addrspace 5)
908 ; GCN-NEXT: [[FRAME_INDEX28:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
909 ; GCN-NEXT: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX28]](p5) :: (invariant load (s32) from %fixed-stack.7, align 16, addrspace 5)
910 ; GCN-NEXT: [[FRAME_INDEX29:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
911 ; GCN-NEXT: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX29]](p5) :: (invariant load (s32) from %fixed-stack.6, addrspace 5)
912 ; GCN-NEXT: [[FRAME_INDEX30:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
913 ; GCN-NEXT: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX30]](p5) :: (invariant load (s32) from %fixed-stack.5, align 8, addrspace 5)
914 ; GCN-NEXT: [[FRAME_INDEX31:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
915 ; GCN-NEXT: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX31]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
916 ; GCN-NEXT: [[FRAME_INDEX32:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
917 ; GCN-NEXT: [[LOAD32:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX32]](p5) :: (invariant load (s32) from %fixed-stack.3, align 16, addrspace 5)
918 ; GCN-NEXT: [[FRAME_INDEX33:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
919 ; GCN-NEXT: [[LOAD33:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX33]](p5) :: (invariant load (s32) from %fixed-stack.2, addrspace 5)
920 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
921 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
922 ; GCN-NEXT: [[FRAME_INDEX34:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca0
923 ; GCN-NEXT: [[FRAME_INDEX35:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.1.alloca1
924 ; GCN-NEXT: G_STORE [[C]](s32), [[FRAME_INDEX34]](p5) :: (store (s32) into %ir.alloca0, addrspace 5)
925 ; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
926 ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C2]](s32)
927 ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.alloca0 + 4, addrspace 5)
928 ; GCN-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
929 ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C3]](s32)
930 ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD1]](p5) :: (store (s32) into %ir.alloca0 + 8, addrspace 5)
931 ; GCN-NEXT: G_STORE [[C1]](s64), [[FRAME_INDEX35]](p5) :: (store (s64) into %ir.alloca1, addrspace 5)
932 ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX35]], [[C3]](s32)
933 ; GCN-NEXT: G_STORE [[C1]](s64), [[PTR_ADD2]](p5) :: (store (s64) into %ir.alloca1 + 8, addrspace 5)
934 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @void_fastcc_multi_byval
935 ; GCN-NEXT: [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
936 ; GCN-NEXT: [[COPY41:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
937 ; GCN-NEXT: [[COPY42:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
938 ; GCN-NEXT: [[COPY43:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
939 ; GCN-NEXT: [[COPY44:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
940 ; GCN-NEXT: [[COPY45:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
941 ; GCN-NEXT: [[COPY46:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
942 ; GCN-NEXT: [[COPY47:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
943 ; GCN-NEXT: [[COPY48:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
944 ; GCN-NEXT: [[FRAME_INDEX36:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
945 ; GCN-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
946 ; GCN-NEXT: G_MEMCPY [[FRAME_INDEX36]](p5), [[FRAME_INDEX34]](p5), [[C4]](s32), 0 :: (dereferenceable store (s96) into %fixed-stack.1, align 16, addrspace 5), (dereferenceable load (s96) from %ir.alloca0, align 16, addrspace 5)
947 ; GCN-NEXT: [[FRAME_INDEX37:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
948 ; GCN-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
949 ; GCN-NEXT: G_MEMCPY [[FRAME_INDEX37]](p5), [[FRAME_INDEX35]](p5), [[C5]](s32), 0 :: (dereferenceable store (s128) into %fixed-stack.0, addrspace 5), (dereferenceable load (s128) from %ir.alloca1, align 8, addrspace 5)
950 ; GCN-NEXT: $vgpr0 = COPY [[COPY9]](s32)
951 ; GCN-NEXT: [[COPY49:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
952 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY49]](<4 x s32>)
953 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY40]](p4)
954 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY41]](p4)
955 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY42]](p4)
956 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY43]](s64)
957 ; GCN-NEXT: $sgpr12 = COPY [[COPY44]](s32)
958 ; GCN-NEXT: $sgpr13 = COPY [[COPY45]](s32)
959 ; GCN-NEXT: $sgpr14 = COPY [[COPY46]](s32)
960 ; GCN-NEXT: $sgpr15 = COPY [[COPY47]](s32)
961 ; GCN-NEXT: $vgpr31 = COPY [[COPY48]](s32)
962 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @void_fastcc_multi_byval, 0, csr_amdgpu, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
964 %alloca0 = alloca [3 x i32], align 16, addrspace(5)
965 %alloca1 = alloca [2 x i64], align 8, addrspace(5)
966 store [3 x i32] [i32 9, i32 9, i32 9], ptr addrspace(5) %alloca0
967 store [2 x i64] zeroinitializer, ptr addrspace(5) %alloca1
968 tail call fastcc void @void_fastcc_multi_byval(i32 %a, ptr addrspace(5) byval([3 x i32]) %alloca0, ptr addrspace(5) byval([2 x i64]) %alloca1)
972 declare hidden void @void_fastcc_byval_and_stack_passed(ptr addrspace(5) byval([3 x i32]) align 16, [32 x i32], i32)
974 ; Callee has a byval and non-byval stack passed argument
975 define fastcc void @sibling_call_byval_and_stack_passed(i32 %stack.out.arg, [64 x i32]) #1 {
976 ; GCN-LABEL: name: sibling_call_byval_and_stack_passed
978 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
980 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
981 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
982 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
983 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
984 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
985 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
986 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
987 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
988 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
989 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr0
990 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr1
991 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr2
992 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr3
993 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr4
994 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr5
995 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr6
996 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr7
997 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr8
998 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr9
999 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr10
1000 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr11
1001 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr12
1002 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr13
1003 ; GCN-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr14
1004 ; GCN-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr15
1005 ; GCN-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY $vgpr16
1006 ; GCN-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY $vgpr17
1007 ; GCN-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY $vgpr18
1008 ; GCN-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY $vgpr19
1009 ; GCN-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY $vgpr20
1010 ; GCN-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr21
1011 ; GCN-NEXT: [[COPY31:%[0-9]+]]:_(s32) = COPY $vgpr22
1012 ; GCN-NEXT: [[COPY32:%[0-9]+]]:_(s32) = COPY $vgpr23
1013 ; GCN-NEXT: [[COPY33:%[0-9]+]]:_(s32) = COPY $vgpr24
1014 ; GCN-NEXT: [[COPY34:%[0-9]+]]:_(s32) = COPY $vgpr25
1015 ; GCN-NEXT: [[COPY35:%[0-9]+]]:_(s32) = COPY $vgpr26
1016 ; GCN-NEXT: [[COPY36:%[0-9]+]]:_(s32) = COPY $vgpr27
1017 ; GCN-NEXT: [[COPY37:%[0-9]+]]:_(s32) = COPY $vgpr28
1018 ; GCN-NEXT: [[COPY38:%[0-9]+]]:_(s32) = COPY $vgpr29
1019 ; GCN-NEXT: [[COPY39:%[0-9]+]]:_(s32) = COPY $vgpr30
1020 ; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.36
1021 ; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.36, align 16, addrspace 5)
1022 ; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.35
1023 ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.35, addrspace 5)
1024 ; GCN-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.34
1025 ; GCN-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.34, align 8, addrspace 5)
1026 ; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.33
1027 ; GCN-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load (s32) from %fixed-stack.33, addrspace 5)
1028 ; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.32
1029 ; GCN-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load (s32) from %fixed-stack.32, align 16, addrspace 5)
1030 ; GCN-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.31
1031 ; GCN-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load (s32) from %fixed-stack.31, addrspace 5)
1032 ; GCN-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.30
1033 ; GCN-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load (s32) from %fixed-stack.30, align 8, addrspace 5)
1034 ; GCN-NEXT: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.29
1035 ; GCN-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p5) :: (invariant load (s32) from %fixed-stack.29, addrspace 5)
1036 ; GCN-NEXT: [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.28
1037 ; GCN-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p5) :: (invariant load (s32) from %fixed-stack.28, align 16, addrspace 5)
1038 ; GCN-NEXT: [[FRAME_INDEX9:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.27
1039 ; GCN-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p5) :: (invariant load (s32) from %fixed-stack.27, addrspace 5)
1040 ; GCN-NEXT: [[FRAME_INDEX10:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.26
1041 ; GCN-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p5) :: (invariant load (s32) from %fixed-stack.26, align 8, addrspace 5)
1042 ; GCN-NEXT: [[FRAME_INDEX11:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.25
1043 ; GCN-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p5) :: (invariant load (s32) from %fixed-stack.25, addrspace 5)
1044 ; GCN-NEXT: [[FRAME_INDEX12:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.24
1045 ; GCN-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p5) :: (invariant load (s32) from %fixed-stack.24, align 16, addrspace 5)
1046 ; GCN-NEXT: [[FRAME_INDEX13:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.23
1047 ; GCN-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p5) :: (invariant load (s32) from %fixed-stack.23, addrspace 5)
1048 ; GCN-NEXT: [[FRAME_INDEX14:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.22
1049 ; GCN-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p5) :: (invariant load (s32) from %fixed-stack.22, align 8, addrspace 5)
1050 ; GCN-NEXT: [[FRAME_INDEX15:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.21
1051 ; GCN-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p5) :: (invariant load (s32) from %fixed-stack.21, addrspace 5)
1052 ; GCN-NEXT: [[FRAME_INDEX16:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.20
1053 ; GCN-NEXT: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX16]](p5) :: (invariant load (s32) from %fixed-stack.20, align 16, addrspace 5)
1054 ; GCN-NEXT: [[FRAME_INDEX17:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.19
1055 ; GCN-NEXT: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX17]](p5) :: (invariant load (s32) from %fixed-stack.19, addrspace 5)
1056 ; GCN-NEXT: [[FRAME_INDEX18:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.18
1057 ; GCN-NEXT: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX18]](p5) :: (invariant load (s32) from %fixed-stack.18, align 8, addrspace 5)
1058 ; GCN-NEXT: [[FRAME_INDEX19:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.17
1059 ; GCN-NEXT: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX19]](p5) :: (invariant load (s32) from %fixed-stack.17, addrspace 5)
1060 ; GCN-NEXT: [[FRAME_INDEX20:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.16
1061 ; GCN-NEXT: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX20]](p5) :: (invariant load (s32) from %fixed-stack.16, align 16, addrspace 5)
1062 ; GCN-NEXT: [[FRAME_INDEX21:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.15
1063 ; GCN-NEXT: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX21]](p5) :: (invariant load (s32) from %fixed-stack.15, addrspace 5)
1064 ; GCN-NEXT: [[FRAME_INDEX22:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.14
1065 ; GCN-NEXT: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX22]](p5) :: (invariant load (s32) from %fixed-stack.14, align 8, addrspace 5)
1066 ; GCN-NEXT: [[FRAME_INDEX23:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.13
1067 ; GCN-NEXT: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX23]](p5) :: (invariant load (s32) from %fixed-stack.13, addrspace 5)
1068 ; GCN-NEXT: [[FRAME_INDEX24:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.12
1069 ; GCN-NEXT: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX24]](p5) :: (invariant load (s32) from %fixed-stack.12, align 16, addrspace 5)
1070 ; GCN-NEXT: [[FRAME_INDEX25:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.11
1071 ; GCN-NEXT: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX25]](p5) :: (invariant load (s32) from %fixed-stack.11, addrspace 5)
1072 ; GCN-NEXT: [[FRAME_INDEX26:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.10
1073 ; GCN-NEXT: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX26]](p5) :: (invariant load (s32) from %fixed-stack.10, align 8, addrspace 5)
1074 ; GCN-NEXT: [[FRAME_INDEX27:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.9
1075 ; GCN-NEXT: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX27]](p5) :: (invariant load (s32) from %fixed-stack.9, addrspace 5)
1076 ; GCN-NEXT: [[FRAME_INDEX28:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.8
1077 ; GCN-NEXT: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX28]](p5) :: (invariant load (s32) from %fixed-stack.8, align 16, addrspace 5)
1078 ; GCN-NEXT: [[FRAME_INDEX29:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.7
1079 ; GCN-NEXT: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX29]](p5) :: (invariant load (s32) from %fixed-stack.7, addrspace 5)
1080 ; GCN-NEXT: [[FRAME_INDEX30:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.6
1081 ; GCN-NEXT: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX30]](p5) :: (invariant load (s32) from %fixed-stack.6, align 8, addrspace 5)
1082 ; GCN-NEXT: [[FRAME_INDEX31:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.5
1083 ; GCN-NEXT: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX31]](p5) :: (invariant load (s32) from %fixed-stack.5, addrspace 5)
1084 ; GCN-NEXT: [[FRAME_INDEX32:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.4
1085 ; GCN-NEXT: [[LOAD32:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX32]](p5) :: (invariant load (s32) from %fixed-stack.4, align 16, addrspace 5)
1086 ; GCN-NEXT: [[FRAME_INDEX33:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
1087 ; GCN-NEXT: [[LOAD33:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX33]](p5) :: (invariant load (s32) from %fixed-stack.3, addrspace 5)
1088 ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
1089 ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1090 ; GCN-NEXT: [[FRAME_INDEX34:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
1091 ; GCN-NEXT: G_STORE [[C]](s32), [[FRAME_INDEX34]](p5) :: (store (s32) into %ir.alloca, addrspace 5)
1092 ; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1093 ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C2]](s32)
1094 ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.alloca + 4, addrspace 5)
1095 ; GCN-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
1096 ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C3]](s32)
1097 ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD1]](p5) :: (store (s32) into %ir.alloca + 8, addrspace 5)
1098 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @void_fastcc_byval_and_stack_passed
1099 ; GCN-NEXT: [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
1100 ; GCN-NEXT: [[COPY41:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
1101 ; GCN-NEXT: [[COPY42:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
1102 ; GCN-NEXT: [[COPY43:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
1103 ; GCN-NEXT: [[COPY44:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
1104 ; GCN-NEXT: [[COPY45:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
1105 ; GCN-NEXT: [[COPY46:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
1106 ; GCN-NEXT: [[COPY47:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
1107 ; GCN-NEXT: [[COPY48:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
1108 ; GCN-NEXT: [[FRAME_INDEX35:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
1109 ; GCN-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
1110 ; GCN-NEXT: G_MEMCPY [[FRAME_INDEX35]](p5), [[FRAME_INDEX34]](p5), [[C4]](s32), 0 :: (dereferenceable store (s96) into %fixed-stack.2, align 16, addrspace 5), (dereferenceable load (s96) from %ir.alloca, align 16, addrspace 5)
1111 ; GCN-NEXT: [[FRAME_INDEX36:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
1112 ; GCN-NEXT: G_STORE [[C1]](s32), [[FRAME_INDEX36]](p5) :: (store (s32) into %fixed-stack.1, addrspace 5)
1113 ; GCN-NEXT: [[FRAME_INDEX37:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
1114 ; GCN-NEXT: G_STORE [[COPY9]](s32), [[FRAME_INDEX37]](p5) :: (store (s32) into %fixed-stack.0, align 16, addrspace 5)
1115 ; GCN-NEXT: $vgpr0 = COPY [[C1]](s32)
1116 ; GCN-NEXT: $vgpr1 = COPY [[C1]](s32)
1117 ; GCN-NEXT: $vgpr2 = COPY [[C1]](s32)
1118 ; GCN-NEXT: $vgpr3 = COPY [[C1]](s32)
1119 ; GCN-NEXT: $vgpr4 = COPY [[C1]](s32)
1120 ; GCN-NEXT: $vgpr5 = COPY [[C1]](s32)
1121 ; GCN-NEXT: $vgpr6 = COPY [[C1]](s32)
1122 ; GCN-NEXT: $vgpr7 = COPY [[C1]](s32)
1123 ; GCN-NEXT: $vgpr8 = COPY [[C1]](s32)
1124 ; GCN-NEXT: $vgpr9 = COPY [[C1]](s32)
1125 ; GCN-NEXT: $vgpr10 = COPY [[C1]](s32)
1126 ; GCN-NEXT: $vgpr11 = COPY [[C1]](s32)
1127 ; GCN-NEXT: $vgpr12 = COPY [[C1]](s32)
1128 ; GCN-NEXT: $vgpr13 = COPY [[C1]](s32)
1129 ; GCN-NEXT: $vgpr14 = COPY [[C1]](s32)
1130 ; GCN-NEXT: $vgpr15 = COPY [[C1]](s32)
1131 ; GCN-NEXT: $vgpr16 = COPY [[C1]](s32)
1132 ; GCN-NEXT: $vgpr17 = COPY [[C1]](s32)
1133 ; GCN-NEXT: $vgpr18 = COPY [[C1]](s32)
1134 ; GCN-NEXT: $vgpr19 = COPY [[C1]](s32)
1135 ; GCN-NEXT: $vgpr20 = COPY [[C1]](s32)
1136 ; GCN-NEXT: $vgpr21 = COPY [[C1]](s32)
1137 ; GCN-NEXT: $vgpr22 = COPY [[C1]](s32)
1138 ; GCN-NEXT: $vgpr23 = COPY [[C1]](s32)
1139 ; GCN-NEXT: $vgpr24 = COPY [[C1]](s32)
1140 ; GCN-NEXT: $vgpr25 = COPY [[C1]](s32)
1141 ; GCN-NEXT: $vgpr26 = COPY [[C1]](s32)
1142 ; GCN-NEXT: $vgpr27 = COPY [[C1]](s32)
1143 ; GCN-NEXT: $vgpr28 = COPY [[C1]](s32)
1144 ; GCN-NEXT: $vgpr29 = COPY [[C1]](s32)
1145 ; GCN-NEXT: $vgpr30 = COPY [[C1]](s32)
1146 ; GCN-NEXT: [[COPY49:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1147 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY49]](<4 x s32>)
1148 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY40]](p4)
1149 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY41]](p4)
1150 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY42]](p4)
1151 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY43]](s64)
1152 ; GCN-NEXT: $sgpr12 = COPY [[COPY44]](s32)
1153 ; GCN-NEXT: $sgpr13 = COPY [[COPY45]](s32)
1154 ; GCN-NEXT: $sgpr14 = COPY [[COPY46]](s32)
1155 ; GCN-NEXT: $sgpr15 = COPY [[COPY47]](s32)
1156 ; GCN-NEXT: $vgpr31 = COPY [[COPY48]](s32)
1157 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @void_fastcc_byval_and_stack_passed, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
1159 %alloca = alloca [3 x i32], align 16, addrspace(5)
1160 store [3 x i32] [i32 9, i32 9, i32 9], ptr addrspace(5) %alloca
1161 tail call fastcc void @void_fastcc_byval_and_stack_passed(ptr addrspace(5) byval([3 x i32]) %alloca, [32 x i32] zeroinitializer, i32 %stack.out.arg)
1165 declare hidden fastcc i64 @i64_fastcc_i64(i64 %arg0)
1167 define hidden fastcc i64 @sibling_call_i64_fastcc_i64(i64 %a) #1 {
1168 ; GCN-LABEL: name: sibling_call_i64_fastcc_i64
1170 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
1172 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
1173 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
1174 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
1175 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
1176 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
1177 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
1178 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
1179 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
1180 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
1181 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr0
1182 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr1
1183 ; GCN-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY9]](s32), [[COPY10]](s32)
1184 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i64_fastcc_i64
1185 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
1186 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
1187 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
1188 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
1189 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
1190 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
1191 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
1192 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
1193 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
1194 ; GCN-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
1195 ; GCN-NEXT: $vgpr0 = COPY [[UV]](s32)
1196 ; GCN-NEXT: $vgpr1 = COPY [[UV1]](s32)
1197 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1198 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
1199 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY11]](p4)
1200 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY12]](p4)
1201 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY13]](p4)
1202 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY14]](s64)
1203 ; GCN-NEXT: $sgpr12 = COPY [[COPY15]](s32)
1204 ; GCN-NEXT: $sgpr13 = COPY [[COPY16]](s32)
1205 ; GCN-NEXT: $sgpr14 = COPY [[COPY17]](s32)
1206 ; GCN-NEXT: $sgpr15 = COPY [[COPY18]](s32)
1207 ; GCN-NEXT: $vgpr31 = COPY [[COPY19]](s32)
1208 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i64_fastcc_i64, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
1210 %ret = tail call fastcc i64 @i64_fastcc_i64(i64 %a)
1214 declare hidden fastcc ptr addrspace(1) @p1i8_fastcc_p1i8(ptr addrspace(1) %arg0) #1
1216 define hidden fastcc ptr addrspace(1) @sibling_call_p1i8_fastcc_p1i8(ptr addrspace(1) %a) #1 {
1217 ; GCN-LABEL: name: sibling_call_p1i8_fastcc_p1i8
1219 ; GCN-NEXT: liveins: $vgpr0, $vgpr1
1221 ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
1222 ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1223 ; GCN-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
1224 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @p1i8_fastcc_p1i8
1225 ; GCN-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](p1)
1226 ; GCN-NEXT: $vgpr0 = COPY [[UV]](s32)
1227 ; GCN-NEXT: $vgpr1 = COPY [[UV1]](s32)
1228 ; GCN-NEXT: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1229 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
1230 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @p1i8_fastcc_p1i8, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
1232 %ret = tail call fastcc ptr addrspace(1) @p1i8_fastcc_p1i8(ptr addrspace(1) %a)
1233 ret ptr addrspace(1) %ret
1236 declare hidden fastcc i16 @i16_fastcc_i16(i16 %arg0)
1238 define hidden fastcc i16 @sibling_call_i16_fastcc_i16(i16 %a) #1 {
1239 ; GCN-LABEL: name: sibling_call_i16_fastcc_i16
1241 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
1243 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
1244 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
1245 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
1246 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
1247 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
1248 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
1249 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
1250 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
1251 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
1252 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr0
1253 ; GCN-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
1254 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i16_fastcc_i16
1255 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
1256 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
1257 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
1258 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
1259 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
1260 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
1261 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
1262 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
1263 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
1264 ; GCN-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
1265 ; GCN-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
1266 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1267 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY19]](<4 x s32>)
1268 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY10]](p4)
1269 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY11]](p4)
1270 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY12]](p4)
1271 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY13]](s64)
1272 ; GCN-NEXT: $sgpr12 = COPY [[COPY14]](s32)
1273 ; GCN-NEXT: $sgpr13 = COPY [[COPY15]](s32)
1274 ; GCN-NEXT: $sgpr14 = COPY [[COPY16]](s32)
1275 ; GCN-NEXT: $sgpr15 = COPY [[COPY17]](s32)
1276 ; GCN-NEXT: $vgpr31 = COPY [[COPY18]](s32)
1277 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @i16_fastcc_i16, 0, csr_amdgpu, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
1279 %ret = tail call fastcc i16 @i16_fastcc_i16(i16 %a)
1283 declare hidden fastcc half @f16_fastcc_f16(half %arg0)
1285 define hidden fastcc half @sibling_call_f16_fastcc_f16(half %a) #1 {
1286 ; GCN-LABEL: name: sibling_call_f16_fastcc_f16
1288 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
1290 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
1291 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
1292 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
1293 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
1294 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
1295 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
1296 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
1297 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
1298 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
1299 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr0
1300 ; GCN-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
1301 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @f16_fastcc_f16
1302 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
1303 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
1304 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
1305 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
1306 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
1307 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
1308 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
1309 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
1310 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
1311 ; GCN-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
1312 ; GCN-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
1313 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1314 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY19]](<4 x s32>)
1315 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY10]](p4)
1316 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY11]](p4)
1317 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY12]](p4)
1318 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY13]](s64)
1319 ; GCN-NEXT: $sgpr12 = COPY [[COPY14]](s32)
1320 ; GCN-NEXT: $sgpr13 = COPY [[COPY15]](s32)
1321 ; GCN-NEXT: $sgpr14 = COPY [[COPY16]](s32)
1322 ; GCN-NEXT: $sgpr15 = COPY [[COPY17]](s32)
1323 ; GCN-NEXT: $vgpr31 = COPY [[COPY18]](s32)
1324 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @f16_fastcc_f16, 0, csr_amdgpu, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
1326 %ret = tail call fastcc half @f16_fastcc_f16(half %a)
1330 declare hidden fastcc <3 x i16> @v3i16_fastcc_v3i16(<3 x i16> %arg0)
1332 define hidden fastcc <3 x i16> @sibling_call_v3i16_fastcc_v3i16(<3 x i16> %a) #1 {
1333 ; GCN-LABEL: name: sibling_call_v3i16_fastcc_v3i16
1335 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
1337 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
1338 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
1339 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
1340 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
1341 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
1342 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
1343 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
1344 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
1345 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
1346 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
1347 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
1348 ; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY9]](<2 x s16>), [[COPY10]](<2 x s16>)
1349 ; GCN-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>)
1350 ; GCN-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16)
1351 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @v3i16_fastcc_v3i16
1352 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
1353 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
1354 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
1355 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
1356 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
1357 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
1358 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
1359 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
1360 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
1361 ; GCN-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s16>)
1362 ; GCN-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1363 ; GCN-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV4]](s16), [[UV5]](s16), [[UV6]](s16), [[DEF]](s16)
1364 ; GCN-NEXT: [[UV7:%[0-9]+]]:_(<2 x s16>), [[UV8:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<4 x s16>)
1365 ; GCN-NEXT: $vgpr0 = COPY [[UV7]](<2 x s16>)
1366 ; GCN-NEXT: $vgpr1 = COPY [[UV8]](<2 x s16>)
1367 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1368 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
1369 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY11]](p4)
1370 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY12]](p4)
1371 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY13]](p4)
1372 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY14]](s64)
1373 ; GCN-NEXT: $sgpr12 = COPY [[COPY15]](s32)
1374 ; GCN-NEXT: $sgpr13 = COPY [[COPY16]](s32)
1375 ; GCN-NEXT: $sgpr14 = COPY [[COPY17]](s32)
1376 ; GCN-NEXT: $sgpr15 = COPY [[COPY18]](s32)
1377 ; GCN-NEXT: $vgpr31 = COPY [[COPY19]](s32)
1378 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @v3i16_fastcc_v3i16, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
1380 %ret = tail call fastcc <3 x i16> @v3i16_fastcc_v3i16(<3 x i16> %a)
1384 declare hidden fastcc <4 x i16> @v4i16_fastcc_v4i16(<4 x i16> %arg0)
1386 define hidden fastcc <4 x i16> @sibling_call_v4i16_fastcc_v4i16(<4 x i16> %a) #1 {
1387 ; GCN-LABEL: name: sibling_call_v4i16_fastcc_v4i16
1389 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
1391 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
1392 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
1393 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
1394 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
1395 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
1396 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
1397 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
1398 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
1399 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
1400 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
1401 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
1402 ; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY9]](<2 x s16>), [[COPY10]](<2 x s16>)
1403 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @v4i16_fastcc_v4i16
1404 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
1405 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
1406 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
1407 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
1408 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
1409 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
1410 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
1411 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
1412 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
1413 ; GCN-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>)
1414 ; GCN-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
1415 ; GCN-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
1416 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1417 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
1418 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY11]](p4)
1419 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY12]](p4)
1420 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY13]](p4)
1421 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY14]](s64)
1422 ; GCN-NEXT: $sgpr12 = COPY [[COPY15]](s32)
1423 ; GCN-NEXT: $sgpr13 = COPY [[COPY16]](s32)
1424 ; GCN-NEXT: $sgpr14 = COPY [[COPY17]](s32)
1425 ; GCN-NEXT: $sgpr15 = COPY [[COPY18]](s32)
1426 ; GCN-NEXT: $vgpr31 = COPY [[COPY19]](s32)
1427 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @v4i16_fastcc_v4i16, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
1429 %ret = tail call fastcc <4 x i16> @v4i16_fastcc_v4i16(<4 x i16> %a)
1433 declare hidden fastcc <2 x i64> @v2i64_fastcc_v2i64(<2 x i64> %arg0)
1435 define hidden fastcc <2 x i64> @sibling_call_v2i64_fastcc_v2i64(<2 x i64> %a) #1 {
1436 ; GCN-LABEL: name: sibling_call_v2i64_fastcc_v2i64
1438 ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
1440 ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
1441 ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr15
1442 ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr14
1443 ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr13
1444 ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32(s32) = COPY $sgpr12
1445 ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr10_sgpr11
1446 ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
1447 ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr6_sgpr7
1448 ; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
1449 ; GCN-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr0
1450 ; GCN-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr1
1451 ; GCN-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr2
1452 ; GCN-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr3
1453 ; GCN-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY9]](s32), [[COPY10]](s32)
1454 ; GCN-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY11]](s32), [[COPY12]](s32)
1455 ; GCN-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
1456 ; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @v2i64_fastcc_v2i64
1457 ; GCN-NEXT: [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
1458 ; GCN-NEXT: [[COPY14:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
1459 ; GCN-NEXT: [[COPY15:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
1460 ; GCN-NEXT: [[COPY16:%[0-9]+]]:_(s64) = COPY [[COPY5]](s64)
1461 ; GCN-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
1462 ; GCN-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
1463 ; GCN-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
1464 ; GCN-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
1465 ; GCN-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
1466 ; GCN-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s64>)
1467 ; GCN-NEXT: $vgpr0 = COPY [[UV]](s32)
1468 ; GCN-NEXT: $vgpr1 = COPY [[UV1]](s32)
1469 ; GCN-NEXT: $vgpr2 = COPY [[UV2]](s32)
1470 ; GCN-NEXT: $vgpr3 = COPY [[UV3]](s32)
1471 ; GCN-NEXT: [[COPY22:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
1472 ; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY22]](<4 x s32>)
1473 ; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY13]](p4)
1474 ; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY14]](p4)
1475 ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[COPY15]](p4)
1476 ; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY16]](s64)
1477 ; GCN-NEXT: $sgpr12 = COPY [[COPY17]](s32)
1478 ; GCN-NEXT: $sgpr13 = COPY [[COPY18]](s32)
1479 ; GCN-NEXT: $sgpr14 = COPY [[COPY19]](s32)
1480 ; GCN-NEXT: $sgpr15 = COPY [[COPY20]](s32)
1481 ; GCN-NEXT: $vgpr31 = COPY [[COPY21]](s32)
1482 ; GCN-NEXT: SI_TCRETURN [[GV]](p0), @v2i64_fastcc_v2i64, 0, csr_amdgpu, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
1484 %ret = tail call fastcc <2 x i64> @v2i64_fastcc_v2i64(<2 x i64> %a)
1488 attributes #0 = { nounwind }
1489 attributes #1 = { nounwind noinline "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" }
1491 !llvm.module.flags = !{!0}
1492 !0 = !{i32 1, !"amdhsa_code_object_version", i32 500}