1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -global-isel -march=amdgcn -mcpu=tonga -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -check-prefix=GFX8 %s
3 ; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
5 define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in1, i32 %in2) {
6 ; GFX8-LABEL: dpp_test:
8 ; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c
9 ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
10 ; GFX8-NEXT: s_waitcnt lgkmcnt(0)
11 ; GFX8-NEXT: v_mov_b32_e32 v2, s2
12 ; GFX8-NEXT: v_mov_b32_e32 v0, s3
14 ; GFX8-NEXT: v_mov_b32_dpp v2, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
15 ; GFX8-NEXT: v_mov_b32_e32 v0, s0
16 ; GFX8-NEXT: v_mov_b32_e32 v1, s1
17 ; GFX8-NEXT: flat_store_dword v[0:1], v2
20 ; GFX10-LABEL: dpp_test:
22 ; GFX10-NEXT: s_clause 0x1
23 ; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c
24 ; GFX10-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
25 ; GFX10-NEXT: s_waitcnt lgkmcnt(0)
26 ; GFX10-NEXT: v_mov_b32_e32 v0, s2
27 ; GFX10-NEXT: v_mov_b32_e32 v1, s3
28 ; GFX10-NEXT: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
29 ; GFX10-NEXT: v_mov_b32_e32 v1, 0
30 ; GFX10-NEXT: global_store_dword v1, v0, s[4:5]
31 ; GFX10-NEXT: s_endpgm
32 %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 false)
33 store i32 %tmp0, i32 addrspace(1)* %out
36 define amdgpu_kernel void @update_dpp64_test(i64 addrspace(1)* %arg, i64 %in1, i64 %in2) {
37 ; GFX8-LABEL: update_dpp64_test:
39 ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
40 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v0
41 ; GFX8-NEXT: s_waitcnt lgkmcnt(0)
42 ; GFX8-NEXT: v_mov_b32_e32 v0, s0
43 ; GFX8-NEXT: v_mov_b32_e32 v1, s1
44 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
45 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
46 ; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
47 ; GFX8-NEXT: v_mov_b32_e32 v5, s3
48 ; GFX8-NEXT: v_mov_b32_e32 v4, s2
49 ; GFX8-NEXT: s_waitcnt vmcnt(0)
50 ; GFX8-NEXT: v_mov_b32_dpp v5, v3 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
51 ; GFX8-NEXT: v_mov_b32_dpp v4, v2 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
52 ; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
55 ; GFX10-LABEL: update_dpp64_test:
57 ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
58 ; GFX10-NEXT: v_lshlrev_b32_e32 v4, 3, v0
59 ; GFX10-NEXT: s_waitcnt lgkmcnt(0)
60 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v4, s[0:1]
61 ; GFX10-NEXT: v_mov_b32_e32 v2, s2
62 ; GFX10-NEXT: v_mov_b32_e32 v3, s3
63 ; GFX10-NEXT: s_waitcnt vmcnt(0)
64 ; GFX10-NEXT: v_mov_b32_dpp v2, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
65 ; GFX10-NEXT: v_mov_b32_dpp v3, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
66 ; GFX10-NEXT: global_store_dwordx2 v4, v[2:3], s[0:1]
67 ; GFX10-NEXT: s_endpgm
68 %id = tail call i32 @llvm.amdgcn.workitem.id.x()
69 %gep = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %id
70 %load = load i64, i64 addrspace(1)* %gep
71 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 1, i32 1, i32 1, i1 false) #1
72 store i64 %tmp0, i64 addrspace(1)* %gep
76 declare i32 @llvm.amdgcn.workitem.id.x() #0
77 declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32 immarg, i32 immarg, i32 immarg, i1 immarg) #1
78 declare i64 @llvm.amdgcn.update.dpp.i64(i64, i64, i32 immarg, i32 immarg, i32 immarg, i1 immarg) #1
80 attributes #0 = { nounwind readnone speculatable }
81 attributes #1 = { convergent nounwind readnone }