1 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
2 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
3 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
4 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
6 ; XXX - Why does it like to use vcc?
8 ; GCN-LABEL: {{^}}spill_m0:
10 ; GCN-DAG: s_cmp_lg_u32
12 ; TOVGPR-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
13 ; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 2
15 ; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
16 ; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]]
17 ; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12 ; 4-byte Folded Spill
19 ; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]]
22 ; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 2
23 ; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
25 ; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12 ; 4-byte Folded Reload
26 ; TOVMEM: s_waitcnt vmcnt(0)
27 ; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]]
28 ; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
30 ; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
31 define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
33 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
34 %cmp0 = icmp eq i32 %cond, 0
35 br i1 %cmp0, label %if, label %endif
38 call void asm sideeffect "v_nop", ""() #0
42 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{m0}"(i32 %m0) #0
43 store i32 %foo, i32 addrspace(1)* %out
47 @lds = internal addrspace(3) global [64 x float] undef
49 ; m0 is killed, so it isn't necessary during the entry block spill to preserve it
50 ; GCN-LABEL: {{^}}spill_kill_m0_lds:
51 ; GCN: s_mov_b32 m0, s6
52 ; GCN: v_interp_mov_f32
54 ; GCN-NOT: v_readlane_b32 m0
55 ; GCN-NOT: s_buffer_store_dword m0
56 ; GCN-NOT: s_buffer_load_dword m0
57 define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %m0) #0 {
59 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
60 %cmp = fcmp ueq float 0.000000e+00, %tmp
61 br i1 %cmp, label %if, label %else
63 if: ; preds = %main_body
64 %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
65 %lds_data_ = load float, float addrspace(3)* %lds_ptr
66 %lds_data = call float @llvm.amdgcn.wqm.f32(float %lds_data_)
69 else: ; preds = %main_body
70 %interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
73 endif: ; preds = %else, %if
74 %export = phi float [ %lds_data, %if ], [ %interp, %else ]
75 %tmp4 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %export, float %export)
76 call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp4, <2 x half> %tmp4, i1 true, i1 true) #0
80 declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
81 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
82 declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
83 declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
84 declare float @llvm.amdgcn.wqm.f32(float) #1
86 attributes #0 = { nounwind }
87 attributes #1 = { nounwind readnone }