1 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
2 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
3 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
5 ; FUNC-LABEL: {{^}}s_usubo_i64_zext:
8 ; GCN: v_cmp_gt_u64_e32 vcc
12 define amdgpu_kernel void @s_usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
13 %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0
14 %val = extractvalue { i64, i1 } %usub, 0
15 %carry = extractvalue { i64, i1 } %usub, 1
16 %ext = zext i1 %carry to i64
17 %add2 = add i64 %val, %ext
18 store i64 %add2, i64 addrspace(1)* %out, align 8
22 ; FIXME: Could do scalar
24 ; FUNC-LABEL: {{^}}s_usubo_i32:
25 ; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
26 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
30 define amdgpu_kernel void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 {
31 %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
32 %val = extractvalue { i32, i1 } %usub, 0
33 %carry = extractvalue { i32, i1 } %usub, 1
34 store i32 %val, i32 addrspace(1)* %out, align 4
35 store i1 %carry, i1 addrspace(1)* %carryout
39 ; FUNC-LABEL: {{^}}v_usubo_i32:
40 ; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
41 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
45 define amdgpu_kernel void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
46 %tid = call i32 @llvm.amdgcn.workitem.id.x()
47 %tid.ext = sext i32 %tid to i64
48 %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
49 %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
50 %a = load i32, i32 addrspace(1)* %a.gep, align 4
51 %b = load i32, i32 addrspace(1)* %b.gep, align 4
52 %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
53 %val = extractvalue { i32, i1 } %usub, 0
54 %carry = extractvalue { i32, i1 } %usub, 1
55 store i32 %val, i32 addrspace(1)* %out, align 4
56 store i1 %carry, i1 addrspace(1)* %carryout
60 ; FUNC-LABEL: {{^}}v_usubo_i32_novcc:
61 ; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
62 ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
66 define amdgpu_kernel void @v_usubo_i32_novcc(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
67 %tid = call i32 @llvm.amdgcn.workitem.id.x()
68 %tid.ext = sext i32 %tid to i64
69 %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
70 %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
71 %a = load i32, i32 addrspace(1)* %a.gep, align 4
72 %b = load i32, i32 addrspace(1)* %b.gep, align 4
73 %uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
74 %val = extractvalue { i32, i1 } %uadd, 0
75 %carry = extractvalue { i32, i1 } %uadd, 1
76 store volatile i32 %val, i32 addrspace(1)* %out, align 4
77 call void asm sideeffect "", "~{VCC}"() #0
78 store volatile i1 %carry, i1 addrspace(1)* %carryout
82 ; FUNC-LABEL: {{^}}s_usubo_i64:
90 define amdgpu_kernel void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) #0 {
91 %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
92 %val = extractvalue { i64, i1 } %usub, 0
93 %carry = extractvalue { i64, i1 } %usub, 1
94 store i64 %val, i64 addrspace(1)* %out, align 8
95 store i1 %carry, i1 addrspace(1)* %carryout
99 ; FUNC-LABEL: {{^}}v_usubo_i64:
107 define amdgpu_kernel void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %a.ptr, i64 addrspace(1)* %b.ptr) #0 {
108 %tid = call i32 @llvm.amdgcn.workitem.id.x()
109 %tid.ext = sext i32 %tid to i64
110 %a.gep = getelementptr inbounds i64, i64 addrspace(1)* %a.ptr
111 %b.gep = getelementptr inbounds i64, i64 addrspace(1)* %b.ptr
112 %a = load i64, i64 addrspace(1)* %a.gep
113 %b = load i64, i64 addrspace(1)* %b.gep
114 %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
115 %val = extractvalue { i64, i1 } %usub, 0
116 %carry = extractvalue { i64, i1 } %usub, 1
117 store i64 %val, i64 addrspace(1)* %out, align 8
118 store i1 %carry, i1 addrspace(1)* %carryout
122 ; FUNC-LABEL: {{^}}v_usubo_i16:
124 ; VI: v_cmp_gt_u16_e32
125 define amdgpu_kernel void @v_usubo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
126 %tid = call i32 @llvm.amdgcn.workitem.id.x()
127 %tid.ext = sext i32 %tid to i64
128 %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr
129 %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %b.ptr
130 %a = load i16, i16 addrspace(1)* %a.gep
131 %b = load i16, i16 addrspace(1)* %b.gep
132 %usub = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %a, i16 %b)
133 %val = extractvalue { i16, i1 } %usub, 0
134 %carry = extractvalue { i16, i1 } %usub, 1
135 store i16 %val, i16 addrspace(1)* %out
136 store i1 %carry, i1 addrspace(1)* %carryout
140 declare i32 @llvm.amdgcn.workitem.id.x() #1
141 declare { i16, i1 } @llvm.usub.with.overflow.i16(i16, i16) #1
142 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #1
143 declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) #1
145 attributes #0 = { nounwind }
146 attributes #1 = { nounwind readnone }