1 ; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-dpp-combine -verify-machineinstrs < %s | FileCheck %s
3 ; VOP2 with literal cannot be combined
4 ; CHECK-LABEL: {{^}}dpp_combine_i32_literal:
5 ; CHECK: v_mov_b32_dpp [[OLD:v[0-9]+]], {{v[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x2 bank_mask:0x1 bound_ctrl:0
6 ; CHECK: v_add_u32_e32 {{v[0-9]+}}, vcc, 42, [[OLD]]
7 define amdgpu_kernel void @dpp_combine_i32_literal(i32 addrspace(1)* %out, i32 %in) {
8 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 1, i32 2, i32 1, i1 1) #0
9 %res = add nsw i32 %dpp, 42
10 store i32 %res, i32 addrspace(1)* %out
14 ; CHECK-LABEL: {{^}}dpp_combine_i32_bz:
15 ; CHECK: v_add_u32_dpp {{v[0-9]+}}, vcc, {{v[0-9]+}}, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
16 define amdgpu_kernel void @dpp_combine_i32_bz(i32 addrspace(1)* %out, i32 %in) {
17 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
18 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 1, i32 1, i32 1, i1 1) #0
19 %res = add nsw i32 %dpp, %x
20 store i32 %res, i32 addrspace(1)* %out
24 ; CHECK-LABEL: {{^}}dpp_combine_i32_boff_undef:
25 ; CHECK: v_add_u32_dpp {{v[0-9]+}}, vcc, {{v[0-9]+}}, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
26 define amdgpu_kernel void @dpp_combine_i32_boff_undef(i32 addrspace(1)* %out, i32 %in) {
27 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
28 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 1, i32 1, i32 1, i1 0) #0
29 %res = add nsw i32 %dpp, %x
30 store i32 %res, i32 addrspace(1)* %out
34 ; CHECK-LABEL: {{^}}dpp_combine_i32_boff_0:
35 ; CHECK: v_add_u32_dpp {{v[0-9]+}}, vcc, {{v[0-9]+}}, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
36 define amdgpu_kernel void @dpp_combine_i32_boff_0(i32 addrspace(1)* %out, i32 %in) {
37 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
38 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %in, i32 1, i32 1, i32 1, i1 0) #0
39 %res = add nsw i32 %dpp, %x
40 store i32 %res, i32 addrspace(1)* %out
44 ; CHECK-LABEL: {{^}}dpp_combine_i32_boff_max:
45 ; CHECK: v_bfrev_b32_e32 [[OLD:v[0-9]+]], -2
46 ; CHECK: v_max_i32_dpp [[OLD]], {{v[0-9]+}}, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
47 define amdgpu_kernel void @dpp_combine_i32_boff_max(i32 addrspace(1)* %out, i32 %in) {
48 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
49 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 2147483647, i32 %in, i32 1, i32 1, i32 1, i1 0) #0
50 %cmp = icmp sge i32 %dpp, %x
51 %res = select i1 %cmp, i32 %dpp, i32 %x
52 store i32 %res, i32 addrspace(1)* %out
56 ; CHECK-LABEL: {{^}}dpp_combine_i32_boff_min:
57 ; CHECK: v_bfrev_b32_e32 [[OLD:v[0-9]+]], 1
58 ; CHECK: v_min_i32_dpp [[OLD]], {{v[0-9]+}}, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
59 define amdgpu_kernel void @dpp_combine_i32_boff_min(i32 addrspace(1)* %out, i32 %in) {
60 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
61 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 -2147483648, i32 %in, i32 1, i32 1, i32 1, i1 0) #0
62 %cmp = icmp sle i32 %dpp, %x
63 %res = select i1 %cmp, i32 %dpp, i32 %x
64 store i32 %res, i32 addrspace(1)* %out
68 ; CHECK-LABEL: {{^}}dpp_combine_i32_boff_mul:
69 ; CHECK: v_mul_i32_i24_dpp v0, v3, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
70 define amdgpu_kernel void @dpp_combine_i32_boff_mul(i32 addrspace(1)* %out, i32 %in) {
71 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
72 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 1, i32 %in, i32 1, i32 1, i32 1, i1 0) #0
74 %dpp.shl = shl i32 %dpp, 8
75 %dpp.24 = ashr i32 %dpp.shl, 8
76 %x.shl = shl i32 %x, 8
77 %x.24 = ashr i32 %x.shl, 8
78 %res = mul i32 %dpp.24, %x.24
79 store i32 %res, i32 addrspace(1)* %out
83 ; CHECK-LABEL: {{^}}dpp_combine_i32_commute:
84 ; CHECK: v_subrev_u32_dpp {{v[0-9]+}}, vcc, {{v[0-9]+}}, v0 quad_perm:[2,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
85 define amdgpu_kernel void @dpp_combine_i32_commute(i32 addrspace(1)* %out, i32 %in) {
86 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
87 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 2, i32 1, i32 1, i1 1) #0
88 %res = sub nsw i32 %x, %dpp
89 store i32 %res, i32 addrspace(1)* %out
93 ; CHECK-LABEL: {{^}}dpp_combine_f32:
94 ; CHECK: v_add_f32_dpp {{v[0-9]+}}, {{v[0-9]+}}, v0 quad_perm:[3,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
95 define amdgpu_kernel void @dpp_combine_f32(i32 addrspace(1)* %out, i32 %in) {
96 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
98 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 3, i32 1, i32 1, i1 1) #0
99 %dpp.f32 = bitcast i32 %dpp to float
100 %x.f32 = bitcast i32 %x to float
101 %res.f32 = fadd float %x.f32, %dpp.f32
102 %res = bitcast float %res.f32 to i32
103 store i32 %res, i32 addrspace(1)* %out
107 ; CHECK-LABEL: {{^}}dpp_combine_test_f32_mods:
108 ; CHECK: v_mul_f32_dpp {{v[0-9]+}}, |{{v[0-9]+}}|, -v0 quad_perm:[0,1,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
109 define amdgpu_kernel void @dpp_combine_test_f32_mods(i32 addrspace(1)* %out, i32 %in) {
110 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
112 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 4, i32 1, i32 1, i1 1) #0
114 %x.f32 = bitcast i32 %x to float
115 %x.f32.neg = fsub float -0.000000e+00, %x.f32
117 %dpp.f32 = bitcast i32 %dpp to float
118 %dpp.f32.cmp = fcmp fast olt float %dpp.f32, 0.000000e+00
119 %dpp.f32.sign = select i1 %dpp.f32.cmp, float -1.000000e+00, float 1.000000e+00
120 %dpp.f32.abs = fmul fast float %dpp.f32, %dpp.f32.sign
122 %res.f32 = fmul float %x.f32.neg, %dpp.f32.abs
123 %res = bitcast float %res.f32 to i32
124 store i32 %res, i32 addrspace(1)* %out
128 ; CHECK-LABEL: {{^}}dpp_combine_mac:
129 ; CHECK: v_mac_f32_dpp v0, {{v[0-9]+}}, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
130 define amdgpu_kernel void @dpp_combine_mac(float addrspace(1)* %out, i32 %in) {
131 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
132 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
133 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 1, i32 1, i32 1, i1 1) #0
134 %dpp.f32 = bitcast i32 %dpp to float
135 %x.f32 = bitcast i32 %x to float
136 %y.f32 = bitcast i32 %y to float
138 %mult = fmul float %dpp.f32, %y.f32
139 %res = fadd float %mult, %x.f32
140 store float %res, float addrspace(1)* %out
144 ; CHECK-LABEL: {{^}}dpp_combine_sequence:
145 define amdgpu_kernel void @dpp_combine_sequence(i32 addrspace(1)* %out, i32 %in, i1 %cmp) {
146 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
147 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 1, i32 1, i32 1, i1 1) #0
148 br i1 %cmp, label %bb1, label %bb2
150 ; CHECK: v_add_u32_dpp {{v[0-9]+}}, vcc, {{v[0-9]+}}, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
151 %resadd = add nsw i32 %dpp, %x
154 ; CHECK: v_subrev_u32_dpp {{v[0-9]+}}, vcc, {{v[0-9]+}}, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
155 %ressub = sub nsw i32 %x, %dpp
158 %res = phi i32 [%resadd, %bb1], [%ressub, %bb2]
159 store i32 %res, i32 addrspace(1)* %out
163 ; CHECK-LABEL: {{^}}dpp_combine_sequence_negative:
164 ; CHECK: v_mov_b32_dpp v1, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
165 define amdgpu_kernel void @dpp_combine_sequence_negative(i32 addrspace(1)* %out, i32 %in, i1 %cmp) {
166 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
167 %dpp = call i32 @llvm.amdgcn.update.dpp.i32(i32 undef, i32 %in, i32 1, i32 1, i32 1, i1 1) #0
168 br i1 %cmp, label %bb1, label %bb2
170 %resadd = add nsw i32 %dpp, %x
173 %ressub = sub nsw i32 2, %dpp ; break seq
176 %res = phi i32 [%resadd, %bb1], [%ressub, %bb2]
177 store i32 %res, i32 addrspace(1)* %out
181 declare i32 @llvm.amdgcn.workitem.id.x()
182 declare i32 @llvm.amdgcn.workitem.id.y()
183 declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1) #0
185 attributes #0 = { nounwind readnone convergent }