1 ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -amdgpu-s-branch-bits=4 < %s | FileCheck -check-prefix=GCN %s
4 ; FIXME: We should use llvm-mc for this, but we can't even parse our own output.
6 ; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-s-branch-bits=4 -o %t.o -filetype=obj %s
7 ; RUN: llvm-readobj -r %t.o | FileCheck --check-prefix=OBJ %s
12 ; Restrict maximum branch to between +7 and -8 dwords
14 ; Used to emit an always 4 byte instruction. Inline asm always assumes
15 ; each instruction is the maximum size.
16 declare void @llvm.amdgcn.s.sleep(i32) #0
18 declare i32 @llvm.amdgcn.workitem.id.x() #1
21 ; GCN-LABEL: {{^}}uniform_conditional_max_short_forward_branch:
22 ; GCN: s_load_dword [[CND:s[0-9]+]]
23 ; GCN: s_cmp_eq_u32 [[CND]], 0
24 ; GCN-NEXT: s_cbranch_scc1 [[BB3:BB[0-9]+_[0-9]+]]
27 ; GCN-NEXT: ; %bb.1: ; %bb2
28 ; GCN-NEXT: ;;#ASMSTART
35 ; GCN-NEXT: [[BB3]]: ; %bb3
36 ; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
37 ; GCN: buffer_store_dword [[V_CND]]
39 define amdgpu_kernel void @uniform_conditional_max_short_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
41 %cmp = icmp eq i32 %cnd, 0
42 br i1 %cmp, label %bb3, label %bb2 ; +8 dword branch
46 call void asm sideeffect
50 call void @llvm.amdgcn.s.sleep(i32 0)
54 store volatile i32 %cnd, i32 addrspace(1)* %arg
58 ; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_branch:
59 ; GCN: s_load_dword [[CND:s[0-9]+]]
60 ; GCN: s_cmp_eq_u32 [[CND]], 0
61 ; GCN-NEXT: s_cbranch_scc0 [[LONGBB:BB[0-9]+_[0-9]+]]
63 ; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
64 ; GCN-NEXT: s_getpc_b64 vcc
65 ; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[ENDBB:BB[0-9]+_[0-9]+]]-([[LONG_JUMP]]+4)
66 ; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0
67 ; GCN-NEXT: s_setpc_b64 vcc
69 ; GCN-NEXT: [[LONGBB]]:
70 ; GCN-NEXT: ;;#ASMSTART
77 ; GCN-NEXT: [[ENDBB]]:
78 ; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
79 ; GCN: buffer_store_dword [[V_CND]]
81 define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
83 %cmp = icmp eq i32 %cnd, 0
84 br i1 %cmp, label %bb3, label %bb2 ; +9 dword branch
88 call void asm sideeffect
96 store volatile i32 %cnd, i32 addrspace(1)* %arg
100 ; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_vcnd_branch:
101 ; GCN: s_load_dword [[CND:s[0-9]+]]
102 ; GCN-DAG: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
103 ; GCN-DAG: v_cmp_eq_f32_e64 [[UNMASKED:s\[[0-9]+:[0-9]+\]]], [[CND]], 0
104 ; GCN-DAG: s_and_b64 vcc, exec, [[UNMASKED]]
105 ; GCN: s_cbranch_vccz [[LONGBB:BB[0-9]+_[0-9]+]]
107 ; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
108 ; GCN-NEXT: s_getpc_b64 vcc
109 ; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[ENDBB:BB[0-9]+_[0-9]+]]-([[LONG_JUMP]]+4)
110 ; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0
111 ; GCN-NEXT: s_setpc_b64 vcc
113 ; GCN-NEXT: [[LONGBB]]:
120 ; GCN: buffer_store_dword [[V_CND]]
122 define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(float addrspace(1)* %arg, float %cnd) #0 {
124 %cmp = fcmp oeq float %cnd, 0.0
125 br i1 %cmp, label %bb3, label %bb2 ; + 8 dword branch
128 call void asm sideeffect " ; 32 bytes
136 store volatile float %cnd, float addrspace(1)* %arg
140 ; GCN-LABEL: {{^}}min_long_forward_vbranch:
142 ; GCN: buffer_load_dword
143 ; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
144 ; GCN: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], vcc
151 ; GCN: s_or_b64 exec, exec, [[SAVE]]
152 ; GCN: buffer_store_dword
154 define amdgpu_kernel void @min_long_forward_vbranch(i32 addrspace(1)* %arg) #0 {
156 %tid = call i32 @llvm.amdgcn.workitem.id.x()
157 %tid.ext = zext i32 %tid to i64
158 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tid.ext
159 %load = load volatile i32, i32 addrspace(1)* %gep
160 %cmp = icmp eq i32 %load, 0
161 br i1 %cmp, label %bb3, label %bb2 ; + 8 dword branch
164 call void asm sideeffect " ; 32 bytes
172 store volatile i32 %load, i32 addrspace(1)* %gep
176 ; GCN-LABEL: {{^}}long_backward_sbranch:
177 ; GCN: s_mov_b32 [[LOOPIDX:s[0-9]+]], 0{{$}}
179 ; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]: ; %bb2
180 ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
181 ; GCN-NEXT: s_add_i32 [[INC:s[0-9]+]], [[LOOPIDX]], 1
182 ; GCN-NEXT: s_cmp_lt_i32 [[INC]], 10
184 ; GCN-NEXT: ;;#ASMSTART
185 ; GCN-NEXT: v_nop_e64
186 ; GCN-NEXT: v_nop_e64
187 ; GCN-NEXT: v_nop_e64
188 ; GCN-NEXT: ;;#ASMEND
190 ; GCN-NEXT: s_cbranch_scc0 [[ENDBB:BB[0-9]+_[0-9]+]]
192 ; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb2
193 ; GCN-NEXT: ; in Loop: Header=[[LOOPBB]] Depth=1
194 ; GCN-NEXT: s_getpc_b64 vcc
195 ; GCN-NEXT: s_sub_u32 vcc_lo, vcc_lo, ([[LONG_JUMP]]+4)-[[LOOPBB]]
196 ; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0
197 ; GCN-NEXT: s_setpc_b64 vcc
199 ; GCN-NEXT: [[ENDBB]]:
201 define amdgpu_kernel void @long_backward_sbranch(i32 addrspace(1)* %arg) #0 {
206 %loop.idx = phi i32 [ 0, %bb ], [ %inc, %bb2 ]
208 call void asm sideeffect
212 %inc = add nsw i32 %loop.idx, 1 ; add cost 4
213 %cmp = icmp slt i32 %inc, 10 ; condition cost = 8
214 br i1 %cmp, label %bb2, label %bb3 ; -
220 ; Requires expansion of unconditional branch from %bb2 to %bb4 (and
221 ; expansion of conditional branch from %bb to %bb3.
223 ; GCN-LABEL: {{^}}uniform_unconditional_min_long_forward_branch:
225 ; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]+_[0-9]+]]
227 ; GCN-NEXT: [[LONG_JUMP0:BB[0-9]+_[0-9]+]]: ; %bb0
228 ; GCN-NEXT: s_getpc_b64 vcc
229 ; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB3:BB[0-9]_[0-9]+]]-([[LONG_JUMP0]]+4)
230 ; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
231 ; GCN-NEXT: s_setpc_b64 vcc
233 ; GCN-NEXT: [[BB2]]: ; %bb2
234 ; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17
235 ; GCN: buffer_store_dword [[BB2_K]]
237 ; GCN-NEXT: [[LONG_JUMP1:BB[0-9]+_[0-9]+]]: ; %bb2
238 ; GCN-NEXT: s_getpc_b64 vcc
239 ; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB4:BB[0-9]_[0-9]+]]-([[LONG_JUMP1]]+4)
240 ; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
241 ; GCN-NEXT: s_setpc_b64 vcc
243 ; GCN: [[BB3]]: ; %bb3
250 ; GCN-NEXT: [[BB4]]: ; %bb4
251 ; GCN: v_mov_b32_e32 [[BB4_K:v[0-9]+]], 63
252 ; GCN: buffer_store_dword [[BB4_K]]
254 ; GCN-NEXT: .Lfunc_end{{[0-9]+}}:
255 define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
257 %tmp = icmp ne i32 %arg1, 0
258 br i1 %tmp, label %bb2, label %bb3
261 store volatile i32 17, i32 addrspace(1)* undef
266 call void asm sideeffect
274 store volatile i32 63, i32 addrspace(1)* %arg
278 ; GCN-LABEL: {{^}}uniform_unconditional_min_long_backward_branch:
279 ; GCN-NEXT: ; %bb.0: ; %entry
281 ; GCN-NEXT: [[LOOP:BB[0-9]_[0-9]+]]: ; %loop
282 ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
283 ; GCN-NEXT: ;;#ASMSTART
284 ; GCN-NEXT: v_nop_e64
285 ; GCN-NEXT: v_nop_e64
286 ; GCN-NEXT: v_nop_e64
287 ; GCN-NEXT: v_nop_e64
288 ; GCN-NEXT: ;;#ASMEND
290 ; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop
291 ; GCN-NEXT: ; in Loop: Header=[[LOOP]] Depth=1
292 ; GCN-NEXT: s_getpc_b64 vcc
293 ; GCN-NEXT: s_sub_u32 vcc_lo, vcc_lo, ([[LONGBB]]+4)-[[LOOP]]
294 ; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0{{$}}
295 ; GCN-NEXT: s_setpc_b64 vcc
296 ; GCN-NEXT .Lfunc_end{{[0-9]+}}:
297 define amdgpu_kernel void @uniform_unconditional_min_long_backward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
303 call void asm sideeffect
311 ; Expansion of branch from %bb1 to %bb3 introduces need to expand
312 ; branch from %bb0 to %bb2
314 ; GCN-LABEL: {{^}}expand_requires_expand:
315 ; GCN-NEXT: ; %bb.0: ; %bb0
317 ; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}}
318 ; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]]
320 ; GCN-NEXT: [[LONGBB0:BB[0-9]+_[0-9]+]]: ; %bb0
321 ; GCN-NEXT: s_getpc_b64 vcc
322 ; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB0]]+4)
323 ; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
324 ; GCN-NEXT: s_setpc_b64 vcc
326 ; GCN-NEXT: [[BB1]]: ; %bb1
327 ; GCN-NEXT: s_load_dword
328 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
329 ; GCN-NEXT: s_cmp_eq_u32 s{{[0-9]+}}, 3{{$}}
330 ; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]_[0-9]+]]
332 ; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]: ; %bb1
333 ; GCN-NEXT: s_getpc_b64 vcc
334 ; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB3:BB[0-9]+_[0-9]+]]-([[LONGBB1]]+4)
335 ; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
336 ; GCN-NEXT: s_setpc_b64 vcc
338 ; GCN-NEXT: [[BB2]]: ; %bb2
339 ; GCN-NEXT: ;;#ASMSTART
340 ; GCN-NEXT: v_nop_e64
341 ; GCN-NEXT: v_nop_e64
342 ; GCN-NEXT: v_nop_e64
343 ; GCN-NEXT: v_nop_e64
344 ; GCN-NEXT: ;;#ASMEND
346 ; GCN-NEXT: [[BB3]]: ; %bb3
347 ; GCN-NEXT: ;;#ASMSTART
348 ; GCN-NEXT: v_nop_e64
349 ; GCN-NEXT: ;;#ASMEND
350 ; GCN-NEXT: ;;#ASMSTART
351 ; GCN-NEXT: v_nop_e64
352 ; GCN-NEXT: ;;#ASMEND
354 define amdgpu_kernel void @expand_requires_expand(i32 %cond0) #0 {
356 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
357 %cmp0 = icmp slt i32 %cond0, 0
358 br i1 %cmp0, label %bb2, label %bb1
361 %val = load volatile i32, i32 addrspace(4)* undef
362 %cmp1 = icmp eq i32 %val, 3
363 br i1 %cmp1, label %bb3, label %bb2
366 call void asm sideeffect
374 ; These NOPs prevent tail-duplication-based outlining
375 ; from firing, which defeats the need to expand the branches and this test.
376 call void asm sideeffect
378 call void asm sideeffect
383 ; Requires expanding of required skip branch.
385 ; GCN-LABEL: {{^}}uniform_inside_divergent:
386 ; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
387 ; GCN-NEXT: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
388 ; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
389 ; GCN-NEXT: s_cbranch_execnz [[IF:BB[0-9]+_[0-9]+]]
391 ; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %entry
392 ; GCN-NEXT: s_getpc_b64 vcc
393 ; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB]]+4)
394 ; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
395 ; GCN-NEXT: s_setpc_b64 vcc
397 ; GCN-NEXT: [[IF]]: ; %if
398 ; GCN: buffer_store_dword
400 ; GCN: s_cbranch_scc1 [[ENDIF]]
402 ; GCN-NEXT: ; %bb.2: ; %if_uniform
403 ; GCN: buffer_store_dword
405 ; GCN-NEXT: [[ENDIF]]: ; %endif
406 ; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
407 ; GCN-NEXT: s_sleep 5
409 define amdgpu_kernel void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) #0 {
411 %tid = call i32 @llvm.amdgcn.workitem.id.x()
412 %d_cmp = icmp ult i32 %tid, 16
413 br i1 %d_cmp, label %if, label %endif
416 store i32 0, i32 addrspace(1)* %out
417 %u_cmp = icmp eq i32 %cond, 0
418 br i1 %u_cmp, label %if_uniform, label %endif
421 store i32 1, i32 addrspace(1)* %out
425 ; layout can remove the split branch if it can copy the return block.
426 ; This call makes the return block long enough that it doesn't get copied.
427 call void @llvm.amdgcn.s.sleep(i32 5);
433 ; GCN-LABEL: {{^}}analyze_mask_branch:
434 ; GCN: v_cmp_nlt_f32_e32 vcc
435 ; GCN-NEXT: s_and_saveexec_b64 [[TEMP_MASK:s\[[0-9]+:[0-9]+\]]], vcc
436 ; GCN-NEXT: s_xor_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec, [[TEMP_MASK]]
437 ; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
439 ; GCN: [[FLOW]]: ; %Flow
440 ; GCN-NEXT: s_or_saveexec_b64 [[TEMP_MASK1:s\[[0-9]+:[0-9]+\]]], [[MASK]]
441 ; GCN-NEXT: s_xor_b64 exec, exec, [[TEMP_MASK1]]
442 ; GCN-NEXT: ; mask branch [[RET:BB[0-9]+_[0-9]+]]
444 ; GCN: [[LOOP_BODY:BB[0-9]+_[0-9]+]]: ; %loop
453 ; GCN: s_cbranch_vccz [[RET]]
455 ; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop
456 ; GCN-NEXT: ; in Loop: Header=[[LOOP_BODY]] Depth=1
457 ; GCN-NEXT: s_getpc_b64 vcc
458 ; GCN-NEXT: s_sub_u32 vcc_lo, vcc_lo, ([[LONGBB]]+4)-[[LOOP_BODY]]
459 ; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0
460 ; GCN-NEXT: s_setpc_b64 vcc
462 ; GCN-NEXT: [[RET]]: ; %UnifiedReturnBlock
464 define amdgpu_kernel void @analyze_mask_branch() #0 {
466 %reg = call float asm sideeffect "v_mov_b32_e64 $0, 0", "=v"()
467 %cmp0 = fcmp ogt float %reg, 0.000000e+00
468 br i1 %cmp0, label %loop, label %ret
471 %phi = phi float [ 0.000000e+00, %loop_body ], [ 1.000000e+00, %entry ]
472 call void asm sideeffect
475 %cmp1 = fcmp olt float %phi, 8.0
476 br i1 %cmp1, label %loop_body, label %ret
479 call void asm sideeffect
487 store volatile i32 7, i32 addrspace(1)* undef
491 ; GCN-LABEL: {{^}}long_branch_hang:
492 ; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 6
493 ; GCN: s_cbranch_scc1 {{BB[0-9]+_[0-9]+}}
494 ; GCN-NEXT: s_branch [[LONG_BR_0:BB[0-9]+_[0-9]+]]
495 ; GCN-NEXT: BB{{[0-9]+_[0-9]+}}:
497 ; GCN: s_add_u32 vcc_lo, vcc_lo, [[LONG_BR_DEST0:BB[0-9]+_[0-9]+]]-(
500 ; GCN-NEXT: [[LONG_BR_0]]:
501 ; GCN-DAG: v_cmp_lt_i32
502 ; GCN-DAG: v_cmp_gt_i32
503 ; GCN: s_cbranch_vccnz
508 ; GCN: [[LONG_BR_DEST0]]
509 ; GCN: s_cbranch_vccz
513 define amdgpu_kernel void @long_branch_hang(i32 addrspace(1)* nocapture %arg, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i64 %arg5) #0 {
515 %tmp = icmp slt i32 %arg2, 9
516 %tmp6 = icmp eq i32 %arg1, 0
517 %tmp7 = icmp sgt i32 %arg4, 0
518 %tmp8 = icmp sgt i32 %arg4, 5
519 br i1 %tmp8, label %bb9, label %bb13
522 %tmp10 = and i1 %tmp7, %tmp
523 %tmp11 = icmp slt i32 %arg3, %arg4
524 %tmp12 = or i1 %tmp11, %tmp7
525 br i1 %tmp12, label %bb19, label %bb14
528 call void asm sideeffect
533 br i1 %tmp6, label %bb19, label %bb14
535 bb14: ; preds = %bb13, %bb9
536 %tmp15 = icmp slt i32 %arg3, %arg4
537 %tmp16 = or i1 %tmp15, %tmp
538 %tmp17 = and i1 %tmp6, %tmp16
539 %tmp18 = zext i1 %tmp17 to i32
542 bb19: ; preds = %bb14, %bb13, %bb9
543 %tmp20 = phi i32 [ undef, %bb9 ], [ undef, %bb13 ], [ %tmp18, %bb14 ]
544 %tmp21 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %arg5
545 store i32 %tmp20, i32 addrspace(1)* %tmp21, align 4
549 attributes #0 = { nounwind }
550 attributes #1 = { nounwind readnone }