1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
4 ; Test that when extracting the same unknown vector index from an
5 ; insertelement the dynamic indexing is folded away.
7 declare i32 @llvm.amdgcn.workitem.id.x() #0
9 ; No dynamic indexing required
10 define amdgpu_kernel void @extract_insert_same_dynelt_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val, i32 %idx) #1 {
11 ; GCN-LABEL: extract_insert_same_dynelt_v4i32:
13 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
14 ; GCN-NEXT: s_load_dword s4, s[4:5], 0xd
15 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
16 ; GCN-NEXT: s_mov_b32 s3, 0xf000
17 ; GCN-NEXT: s_mov_b32 s2, 0
18 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
19 ; GCN-NEXT: v_mov_b32_e32 v1, 0
20 ; GCN-NEXT: v_mov_b32_e32 v2, s4
21 ; GCN-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
23 %id = call i32 @llvm.amdgcn.workitem.id.x()
24 %id.ext = sext i32 %id to i64
25 %gep.in = getelementptr inbounds <4 x i32>, ptr addrspace(1) %in, i64 %id.ext
26 %gep.out = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %id.ext
27 %vec = load <4 x i32>, ptr addrspace(1) %gep.in
28 %insert = insertelement <4 x i32> %vec, i32 %val, i32 %idx
29 %extract = extractelement <4 x i32> %insert, i32 %idx
30 store i32 %extract, ptr addrspace(1) %gep.out
34 define amdgpu_kernel void @extract_insert_different_dynelt_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val, i32 %idx0, i32 %idx1) #1 {
35 ; GCN-LABEL: extract_insert_different_dynelt_v4i32:
37 ; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
38 ; GCN-NEXT: s_mov_b32 s3, 0xf000
39 ; GCN-NEXT: s_mov_b32 s2, 0
40 ; GCN-NEXT: v_lshlrev_b32_e32 v4, 4, v0
41 ; GCN-NEXT: v_mov_b32_e32 v5, 0
42 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
43 ; GCN-NEXT: s_mov_b64 s[0:1], s[10:11]
44 ; GCN-NEXT: buffer_load_dwordx4 v[1:4], v[4:5], s[0:3], 0 addr64
45 ; GCN-NEXT: s_cmp_eq_u32 s13, 3
46 ; GCN-NEXT: s_cselect_b64 vcc, -1, 0
47 ; GCN-NEXT: s_cmp_eq_u32 s13, 2
48 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
49 ; GCN-NEXT: s_cmp_eq_u32 s13, 1
50 ; GCN-NEXT: s_mov_b64 s[10:11], s[2:3]
51 ; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0
52 ; GCN-NEXT: s_cmp_eq_u32 s13, 0
53 ; GCN-NEXT: v_lshlrev_b32_e32 v6, 2, v0
54 ; GCN-NEXT: v_mov_b32_e32 v0, s12
55 ; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
56 ; GCN-NEXT: s_cmp_eq_u32 s14, 1
57 ; GCN-NEXT: v_mov_b32_e32 v7, v5
58 ; GCN-NEXT: s_waitcnt vmcnt(0)
59 ; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
60 ; GCN-NEXT: v_cndmask_b32_e64 v3, v3, v0, s[0:1]
61 ; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[2:3]
62 ; GCN-NEXT: v_cndmask_b32_e64 v0, v1, v0, s[4:5]
63 ; GCN-NEXT: s_cselect_b64 vcc, -1, 0
64 ; GCN-NEXT: s_cmp_eq_u32 s14, 2
65 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
66 ; GCN-NEXT: s_cselect_b64 vcc, -1, 0
67 ; GCN-NEXT: s_cmp_eq_u32 s14, 3
68 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
69 ; GCN-NEXT: s_cselect_b64 vcc, -1, 0
70 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
71 ; GCN-NEXT: buffer_store_dword v0, v[6:7], s[8:11], 0 addr64
73 %id = call i32 @llvm.amdgcn.workitem.id.x()
74 %id.ext = sext i32 %id to i64
75 %gep.in = getelementptr inbounds <4 x i32>, ptr addrspace(1) %in, i64 %id.ext
76 %gep.out = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %id.ext
77 %vec = load <4 x i32>, ptr addrspace(1) %gep.in
78 %insert = insertelement <4 x i32> %vec, i32 %val, i32 %idx0
79 %extract = extractelement <4 x i32> %insert, i32 %idx1
80 store i32 %extract, ptr addrspace(1) %gep.out
84 define amdgpu_kernel void @extract_insert_same_elt2_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val, i32 %idx) #1 {
85 ; GCN-LABEL: extract_insert_same_elt2_v4i32:
87 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
88 ; GCN-NEXT: s_load_dword s4, s[4:5], 0xd
89 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
90 ; GCN-NEXT: s_mov_b32 s3, 0xf000
91 ; GCN-NEXT: s_mov_b32 s2, 0
92 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
93 ; GCN-NEXT: v_mov_b32_e32 v1, 0
94 ; GCN-NEXT: v_mov_b32_e32 v2, s4
95 ; GCN-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
97 %id = call i32 @llvm.amdgcn.workitem.id.x()
98 %id.ext = sext i32 %id to i64
99 %gep.in = getelementptr inbounds <4 x i32>, ptr addrspace(1) %in, i64 %id.ext
100 %gep.out = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %id.ext
101 %vec = load <4 x i32>, ptr addrspace(1) %gep.in
102 %insert = insertelement <4 x i32> %vec, i32 %val, i32 %idx
103 %extract = extractelement <4 x i32> %insert, i32 %idx
104 store i32 %extract, ptr addrspace(1) %gep.out
108 define amdgpu_kernel void @extract_insert_same_dynelt_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %in, float %val, i32 %idx) #1 {
109 ; GCN-LABEL: extract_insert_same_dynelt_v4f32:
111 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
112 ; GCN-NEXT: s_load_dword s8, s[4:5], 0xd
113 ; GCN-NEXT: s_mov_b32 s7, 0xf000
114 ; GCN-NEXT: s_mov_b32 s6, 0
115 ; GCN-NEXT: v_lshlrev_b32_e32 v4, 4, v0
116 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
117 ; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
118 ; GCN-NEXT: v_mov_b32_e32 v5, 0
119 ; GCN-NEXT: buffer_load_dwordx4 v[1:4], v[4:5], s[4:7], 0 addr64 glc
120 ; GCN-NEXT: s_waitcnt vmcnt(0)
121 ; GCN-NEXT: s_mov_b64 s[2:3], s[6:7]
122 ; GCN-NEXT: v_lshlrev_b32_e32 v4, 2, v0
123 ; GCN-NEXT: v_mov_b32_e32 v0, s8
124 ; GCN-NEXT: buffer_store_dword v0, v[4:5], s[0:3], 0 addr64
126 %id = call i32 @llvm.amdgcn.workitem.id.x()
127 %id.ext = sext i32 %id to i64
128 %gep.in = getelementptr inbounds <4 x float>, ptr addrspace(1) %in, i64 %id.ext
129 %gep.out = getelementptr inbounds float, ptr addrspace(1) %out, i64 %id.ext
130 %vec = load volatile <4 x float>, ptr addrspace(1) %gep.in
131 %insert = insertelement <4 x float> %vec, float %val, i32 %idx
132 %extract = extractelement <4 x float> %insert, i32 %idx
133 store float %extract, ptr addrspace(1) %gep.out
137 attributes #0 = { nounwind readnone }
138 attributes #1 = { nounwind }