1 ; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP64,GFX90A
2 ; RUN: llc -march=amdgcn -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP64,DPPMOV64
3 ; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP32,GFX10PLUS
4 ; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP32,GFX10PLUS
6 ; GCN-LABEL: {{^}}dpp64_ceil:
7 ; GCN: global_load_{{dwordx2|b64}} [[V:v\[[0-9:]+\]]],
8 ; DPP64: v_ceil_f64_dpp [[V]], [[V]] row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
9 ; DPP32-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_share:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
10 define amdgpu_kernel void @dpp64_ceil(ptr addrspace(1) %arg, i64 %in1) {
11 %id = tail call i32 @llvm.amdgcn.workitem.id.x()
12 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id
13 %load = load i64, ptr addrspace(1) %gep
14 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 337, i32 15, i32 15, i1 1) #0
15 %tmp1 = bitcast i64 %tmp0 to double
16 %round = tail call double @llvm.ceil.f64(double %tmp1)
17 %tmp2 = bitcast double %round to i64
18 store i64 %tmp2, ptr addrspace(1) %gep
22 ; GCN-LABEL: {{^}}dpp64_rcp:
23 ; GCN: global_load_{{dwordx2|b64}} [[V:v\[[0-9:]+\]]],
24 ; DPP64: v_rcp_f64_dpp [[V]], [[V]] row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
25 ; DPP32-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_share:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
26 define amdgpu_kernel void @dpp64_rcp(ptr addrspace(1) %arg, i64 %in1) {
27 %id = tail call i32 @llvm.amdgcn.workitem.id.x()
28 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id
29 %load = load i64, ptr addrspace(1) %gep
30 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 337, i32 15, i32 15, i1 1) #0
31 %tmp1 = bitcast i64 %tmp0 to double
32 %rcp = call double @llvm.amdgcn.rcp.f64(double %tmp1)
33 %tmp2 = bitcast double %rcp to i64
34 store i64 %tmp2, ptr addrspace(1) %gep
38 ; GCN-LABEL: {{^}}dpp64_rcp_unsupported_ctl:
39 ; GCN-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
41 define amdgpu_kernel void @dpp64_rcp_unsupported_ctl(ptr addrspace(1) %arg, i64 %in1) {
42 %id = tail call i32 @llvm.amdgcn.workitem.id.x()
43 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id
44 %load = load i64, ptr addrspace(1) %gep
45 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 1, i32 15, i32 15, i1 1) #0
46 %tmp1 = bitcast i64 %tmp0 to double
47 %rcp = fdiv fast double 1.0, %tmp1
48 %tmp2 = bitcast double %rcp to i64
49 store i64 %tmp2, ptr addrspace(1) %gep
53 ; GCN-LABEL: {{^}}dpp64_div:
54 ; GCN: global_load_{{dwordx2|b64}} [[V:v\[[0-9:]+\]]],
55 ; DPPMOV64: v_mov_b64_dpp v[{{[0-9:]+}}], [[V]] row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
56 ; GFX90A-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
57 ; GFX10PLUS-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_share:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
58 ; GCN: v_div_scale_f64
60 define amdgpu_kernel void @dpp64_div(ptr addrspace(1) %arg, i64 %in1) {
61 %id = tail call i32 @llvm.amdgcn.workitem.id.x()
62 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id
63 %load = load i64, ptr addrspace(1) %gep
64 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 337, i32 15, i32 15, i1 1) #0
65 %tmp1 = bitcast i64 %tmp0 to double
66 %rcp = fdiv double 15.0, %tmp1
67 %tmp2 = bitcast double %rcp to i64
68 store i64 %tmp2, ptr addrspace(1) %gep
72 declare i32 @llvm.amdgcn.workitem.id.x()
73 declare i64 @llvm.amdgcn.update.dpp.i64(i64, i64, i32, i32, i32, i1) #0
74 declare double @llvm.ceil.f64(double)
75 declare double @llvm.amdgcn.rcp.f64(double)
77 attributes #0 = { nounwind readnone convergent }