1 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVGPR -check-prefix=GCN %s
2 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVGPR -check-prefix=GCN %s
3 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVMEM -check-prefix=GCN %s
4 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVMEM -check-prefix=GCN %s
6 ; XXX - Why does it like to use vcc?
8 ; GCN-LABEL: {{^}}spill_m0:
11 ; GCN-NEXT: s_mov_b32 m0, 0
13 ; GCN-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
15 ; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], [[M0_LANE:[0-9]+]]
17 ; TOVMEM: s_mov_b64 [[COPY_EXEC:s\[[0-9]+:[0-9]+\]]], exec
18 ; TOVMEM: s_mov_b64 exec, 1
19 ; TOVMEM: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 0
20 ; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4 ; 4-byte Folded Spill
21 ; TOVMEM: s_mov_b64 exec, [[COPY_EXEC]]
23 ; GCN: s_cbranch_scc1 [[ENDIF:.LBB[0-9]+_[0-9]+]]
26 ; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], [[M0_LANE]]
27 ; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
29 ; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4 ; 4-byte Folded Reload
30 ; TOVMEM: s_waitcnt vmcnt(0)
31 ; TOVMEM: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]], 0
32 ; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
34 ; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
35 define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
37 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
38 %cmp0 = icmp eq i32 %cond, 0
39 br i1 %cmp0, label %if, label %endif
42 call void asm sideeffect "v_nop", ""() #0
46 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{m0}"(i32 %m0) #0
47 store i32 %foo, i32 addrspace(1)* %out
51 @lds = internal addrspace(3) global [64 x float] undef
53 ; m0 is killed, so it isn't necessary during the entry block spill to preserve it
54 ; GCN-LABEL: {{^}}spill_kill_m0_lds:
56 ; GCN-NOT: v_readlane_b32 m0
57 ; GCN-NOT: s_buffer_store_dword m0
58 ; GCN-NOT: s_buffer_load_dword m0
59 define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %m0) #0 {
61 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
62 %cmp = fcmp ueq float 0.000000e+00, %tmp
63 br i1 %cmp, label %if, label %else
65 if: ; preds = %main_body
66 %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
67 %lds_data_ = load float, float addrspace(3)* %lds_ptr
68 %lds_data = call float @llvm.amdgcn.wqm.f32(float %lds_data_)
71 else: ; preds = %main_body
72 %interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
75 endif: ; preds = %else, %if
76 %export = phi float [ %lds_data, %if ], [ %interp, %else ]
77 %tmp4 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %export, float %export)
78 call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp4, <2 x half> %tmp4, i1 true, i1 true) #0
82 ; Force save and restore of m0 during SMEM spill
83 ; GCN-LABEL: {{^}}m0_unavailable_spill:
84 ; GCN: s_load_dword [[REG0:s[0-9]+]], s[0:1], {{0x[0-9]+}}
88 ; GCN: s_mov_b32 m0, [[REG0]]
89 ; GCN: v_interp_mov_f32
93 ; TOSMEM: s_mov_b32 s2, m0
94 ; TOSMEM: s_add_u32 m0, s3, 0x100
95 ; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
96 ; TOSMEM: s_mov_b32 m0, s2
98 ; TOSMEM: s_mov_b64 exec,
99 ; TOSMEM: s_cbranch_execz
102 ; TOSMEM: BB{{[0-9]+_[0-9]+}}:
103 ; TOSMEM: s_add_u32 m0, s3, 0x100
104 ; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
106 ; GCN-NOT: v_readlane_b32 m0
107 ; GCN-NOT: s_buffer_store_dword m0
108 ; GCN-NOT: s_buffer_load_dword m0
109 define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
111 %m0 = call i32 asm sideeffect "; def $0, 1", "={m0}"() #0
112 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
113 call void asm sideeffect "; clobber $0", "~{m0}"() #0
114 %cmp = fcmp ueq float 0.000000e+00, %tmp
115 br i1 %cmp, label %if, label %else
117 if: ; preds = %main_body
118 store volatile i32 8, i32 addrspace(1)* undef
121 else: ; preds = %main_body
122 store volatile i32 11, i32 addrspace(1)* undef
129 ; GCN-LABEL: {{^}}restore_m0_lds:
130 ; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
131 ; FIXME-TOSMEM-NOT: m0
132 ; TOSMEM: s_add_u32 m0, s3, {{0x[0-9]+}}
133 ; TOSMEM: s_buffer_store_dword s1, s[88:91], m0 ; 4-byte Folded Spill
134 ; FIXME-TOSMEM-NOT: m0
135 ; TOSMEM: s_load_dwordx2 [[REG:s\[[0-9]+:[0-9]+\]]]
136 ; TOSMEM: s_add_u32 m0, s3, {{0x[0-9]+}}
137 ; TOSMEM: s_waitcnt lgkmcnt(0)
138 ; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill
139 ; FIXME-TOSMEM-NOT: m0
140 ; TOSMEM: s_cmp_eq_u32
141 ; TOSMEM: s_cbranch_scc1
143 ; TOSMEM: s_mov_b32 m0, -1
145 ; TOSMEM: s_mov_b32 s2, m0
146 ; TOSMEM: s_add_u32 m0, s3, 0x200
147 ; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
148 ; TOSMEM: s_mov_b32 m0, s2
149 ; TOSMEM: s_waitcnt lgkmcnt(0)
151 ; TOSMEM: ds_write_b64
153 ; FIXME-TOSMEM-NOT: m0
154 ; TOSMEM: s_add_u32 m0, s3, 0x100
155 ; TOSMEM: s_buffer_load_dword s2, s[88:91], m0 ; 4-byte Folded Reload
156 ; FIXME-TOSMEM-NOT: m0
158 ; TOSMEM: s_mov_b32 [[REG1:s[0-9]+]], m0
159 ; TOSMEM: s_add_u32 m0, s3, 0x100
160 ; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
161 ; TOSMEM: s_mov_b32 m0, [[REG1]]
162 ; TOSMEM: s_mov_b32 m0, -1
164 ; TOSMEM: s_waitcnt lgkmcnt(0)
166 ; TOSMEM: s_mov_b32 m0, s2
169 ; TOSMEM: s_dcache_wb
171 define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
172 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
173 %sval = load volatile i64, i64 addrspace(4)* undef
174 %cmp = icmp eq i32 %arg, 0
175 br i1 %cmp, label %ret, label %bb
178 store volatile i64 %sval, i64 addrspace(3)* undef
179 call void asm sideeffect "; use $0", "{m0}"(i32 %m0) #0
186 declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
187 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
188 declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
189 declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
190 declare float @llvm.amdgcn.wqm.f32(float) #1
192 attributes #0 = { nounwind }
193 attributes #1 = { nounwind readnone }