1 ; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SIVI,FUNC %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SIVI,FUNC %s
3 ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,FUNC %s
4 ; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10,FUNC %s
5 ; RUN: llc -march=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10,FUNC %s
7 ; FUNC-LABEL: {{^}}s_add_i32:
8 ; GCN: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}}
9 ; GCN: v_mov_b32_e32 v[[V_REG:[0-9]+]], s[[REG]]
10 ; GCN: buffer_store_{{dword|b32}} v[[V_REG]],
11 define amdgpu_kernel void @s_add_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
12 %b_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
13 %a = load i32, ptr addrspace(1) %in
14 %b = load i32, ptr addrspace(1) %b_ptr
15 %result = add i32 %a, %b
16 store i32 %result, ptr addrspace(1) %out
20 ; FUNC-LABEL: {{^}}s_add_v2i32:
21 ; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
22 ; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
23 define amdgpu_kernel void @s_add_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
24 %b_ptr = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 1
25 %a = load <2 x i32>, ptr addrspace(1) %in
26 %b = load <2 x i32>, ptr addrspace(1) %b_ptr
27 %result = add <2 x i32> %a, %b
28 store <2 x i32> %result, ptr addrspace(1) %out
32 ; FUNC-LABEL: {{^}}s_add_v4i32:
33 ; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
34 ; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
35 ; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
36 ; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
37 define amdgpu_kernel void @s_add_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
38 %b_ptr = getelementptr <4 x i32>, ptr addrspace(1) %in, i32 1
39 %a = load <4 x i32>, ptr addrspace(1) %in
40 %b = load <4 x i32>, ptr addrspace(1) %b_ptr
41 %result = add <4 x i32> %a, %b
42 store <4 x i32> %result, ptr addrspace(1) %out
46 ; FUNC-LABEL: {{^}}s_add_v8i32:
55 define amdgpu_kernel void @s_add_v8i32(ptr addrspace(1) %out, <8 x i32> %a, <8 x i32> %b) {
57 %0 = add <8 x i32> %a, %b
58 store <8 x i32> %0, ptr addrspace(1) %out
62 ; FUNC-LABEL: {{^}}s_add_v16i32:
79 define amdgpu_kernel void @s_add_v16i32(ptr addrspace(1) %out, <16 x i32> %a, <16 x i32> %b) {
81 %0 = add <16 x i32> %a, %b
82 store <16 x i32> %0, ptr addrspace(1) %out
86 ; FUNC-LABEL: {{^}}v_add_i32:
87 ; GCN: {{buffer|flat|global}}_load_{{dword|b32}} [[A:v[0-9]+]]
88 ; GCN: {{buffer|flat|global}}_load_{{dword|b32}} [[B:v[0-9]+]]
89 ; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc, [[A]], [[B]]
90 ; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[A]], [[B]]
91 ; GFX10: v_add_nc_u32_e32 v{{[0-9]+}}, [[A]], [[B]]
92 define amdgpu_kernel void @v_add_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
93 %tid = call i32 @llvm.amdgcn.workitem.id.x()
94 %gep = getelementptr inbounds i32, ptr addrspace(1) %in, i32 %tid
95 %b_ptr = getelementptr i32, ptr addrspace(1) %gep, i32 1
96 %a = load volatile i32, ptr addrspace(1) %gep
97 %b = load volatile i32, ptr addrspace(1) %b_ptr
98 %result = add i32 %a, %b
99 store i32 %result, ptr addrspace(1) %out
103 ; FUNC-LABEL: {{^}}v_add_imm_i32:
104 ; GCN: {{buffer|flat|global}}_load_{{dword|b32}} [[A:v[0-9]+]]
105 ; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc, 0x7b, [[A]]
106 ; GFX9: v_add_u32_e32 v{{[0-9]+}}, 0x7b, [[A]]
107 ; GFX10: v_add_nc_u32_e32 v{{[0-9]+}}, 0x7b, [[A]]
108 define amdgpu_kernel void @v_add_imm_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
109 %tid = call i32 @llvm.amdgcn.workitem.id.x()
110 %gep = getelementptr inbounds i32, ptr addrspace(1) %in, i32 %tid
111 %b_ptr = getelementptr i32, ptr addrspace(1) %gep, i32 1
112 %a = load volatile i32, ptr addrspace(1) %gep
113 %result = add i32 %a, 123
114 store i32 %result, ptr addrspace(1) %out
118 ; FUNC-LABEL: {{^}}add64:
121 define amdgpu_kernel void @add64(ptr addrspace(1) %out, i64 %a, i64 %b) {
123 %add = add i64 %a, %b
124 store i64 %add, ptr addrspace(1) %out
128 ; The v_addc_u32 and v_add_i32 instruction can't read SGPRs, because they
129 ; use VCC. The test is designed so that %a will be stored in an SGPR and
130 ; %0 will be stored in a VGPR, so the comiler will be forced to copy %a
131 ; to a VGPR before doing the add.
133 ; FUNC-LABEL: {{^}}add64_sgpr_vgpr:
134 ; GCN-NOT: v_addc_u32_e32 s
135 define amdgpu_kernel void @add64_sgpr_vgpr(ptr addrspace(1) %out, i64 %a, ptr addrspace(1) %in) {
137 %0 = load i64, ptr addrspace(1) %in
139 store i64 %1, ptr addrspace(1) %out
143 ; Test i64 add inside a branch.
144 ; FUNC-LABEL: {{^}}add64_in_branch:
147 define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(1) %in, i64 %a, i64 %b, i64 %c) {
149 %0 = icmp eq i64 %a, 0
150 br i1 %0, label %if, label %else
153 %1 = load i64, ptr addrspace(1) %in
161 %3 = phi i64 [%1, %if], [%2, %else]
162 store i64 %3, ptr addrspace(1) %out
166 ; Make sure the VOP3 form of add is initially selected. Otherwise pair
167 ; of opies from/to VCC would be necessary
169 ; GCN-LABEL: {{^}}add_select_vop3:
170 ; SI: v_add_i32_e64 v0, s[0:1], s0, v0
171 ; VI: v_add_u32_e64 v0, s[0:1], s0, v0
172 ; GFX9: v_add_u32_e32 v0, s0, v0
173 ; GFX10: v_add_nc_u32_e32 v0, s0, v0
176 ; GCN: ds_{{write|store}}_b32
178 define amdgpu_ps void @add_select_vop3(i32 inreg %s, i32 %v) {
179 %vcc = call i64 asm sideeffect "; def vcc", "={vcc}"()
180 %sub = add i32 %v, %s
181 store i32 %sub, ptr addrspace(3) undef
182 call void asm sideeffect "; use vcc", "{vcc}"(i64 %vcc)
186 declare i32 @llvm.amdgcn.workitem.id.x() #1
188 attributes #0 = { nounwind }
189 attributes #1 = { nounwind readnone speculatable }