1 ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=GFX9 %s
2 ; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
3 ; RUN: llc -mtriple=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
5 ; FIXME: Should be same on CI/VI
6 ; GCN-LABEL: {{^}}s_ashr_v2i16:
7 ; GFX9: s_load_dword [[LHS:s[0-9]+]]
8 ; GFX9: s_load_dword [[RHS:s[0-9]+]]
9 ; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]]
10 ; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]]
12 ; CIVI: s_load_dword [[LHS:s[0-9]+]]
13 ; CIVI: s_load_dword [[RHS:s[0-9]+]]
15 ; CIVI-DAG: s_ashr_i32
16 ; CIVI-DAG: s_ashr_i32
17 ; CIVI-DAG: s_sext_i32_i16
18 ; CIVI-DAG: s_sext_i32_i16
19 ; CIVI-DAG: s_ashr_i32
20 ; CIVI-DAG: s_ashr_i32
21 ; CIVI-DAG: s_lshl_b32
25 define amdgpu_kernel void @s_ashr_v2i16(ptr addrspace(1) %out, i32, <2 x i16> %lhs, i32, <2 x i16> %rhs) #0 {
26 %result = ashr <2 x i16> %lhs, %rhs
27 store <2 x i16> %result, ptr addrspace(1) %out
31 ; GCN-LABEL: {{^}}v_ashr_v2i16:
32 ; GCN: {{buffer|flat|global}}_load_dwordx2 v[[[LHS:[0-9]+]]:[[RHS:[0-9]+]]]
33 ; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], v[[RHS]], v[[LHS]]
35 ; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
36 ; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
37 ; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
39 ; CI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
40 ; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, 16, v[[LHS]]
41 ; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
42 ; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
43 ; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
44 ; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
45 ; CI: v_and_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
46 ; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
47 define amdgpu_kernel void @v_ashr_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
48 %tid = call i32 @llvm.amdgcn.workitem.id.x()
49 %tid.ext = sext i32 %tid to i64
50 %in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
51 %out.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %out, i64 %tid.ext
52 %b_ptr = getelementptr <2 x i16>, ptr addrspace(1) %in.gep, i32 1
53 %a = load <2 x i16>, ptr addrspace(1) %in.gep
54 %b = load <2 x i16>, ptr addrspace(1) %b_ptr
55 %result = ashr <2 x i16> %a, %b
56 store <2 x i16> %result, ptr addrspace(1) %out.gep
60 ; GCN-LABEL: {{^}}ashr_v_s_v2i16:
61 ; GFX9: s_load_dword [[RHS:s[0-9]+]]
62 ; GFX9: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
63 ; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
64 define amdgpu_kernel void @ashr_v_s_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in, <2 x i16> %sgpr) #0 {
65 %tid = call i32 @llvm.amdgcn.workitem.id.x()
66 %tid.ext = sext i32 %tid to i64
67 %in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
68 %out.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %out, i64 %tid.ext
69 %vgpr = load <2 x i16>, ptr addrspace(1) %in.gep
70 %result = ashr <2 x i16> %vgpr, %sgpr
71 store <2 x i16> %result, ptr addrspace(1) %out.gep
75 ; GCN-LABEL: {{^}}ashr_s_v_v2i16:
76 ; GFX9: s_load_dword [[LHS:s[0-9]+]]
77 ; GFX9: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
78 ; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
79 define amdgpu_kernel void @ashr_s_v_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in, <2 x i16> %sgpr) #0 {
80 %tid = call i32 @llvm.amdgcn.workitem.id.x()
81 %tid.ext = sext i32 %tid to i64
82 %in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
83 %out.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %out, i64 %tid.ext
84 %vgpr = load <2 x i16>, ptr addrspace(1) %in.gep
85 %result = ashr <2 x i16> %sgpr, %vgpr
86 store <2 x i16> %result, ptr addrspace(1) %out.gep
90 ; GCN-LABEL: {{^}}ashr_imm_v_v2i16:
91 ; GCN: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
92 ; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], -4
93 define amdgpu_kernel void @ashr_imm_v_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
94 %tid = call i32 @llvm.amdgcn.workitem.id.x()
95 %tid.ext = sext i32 %tid to i64
96 %in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
97 %out.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %out, i64 %tid.ext
98 %vgpr = load <2 x i16>, ptr addrspace(1) %in.gep
99 %result = ashr <2 x i16> <i16 -4, i16 -4>, %vgpr
100 store <2 x i16> %result, ptr addrspace(1) %out.gep
104 ; GCN-LABEL: {{^}}ashr_v_imm_v2i16:
105 ; GCN: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
106 ; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], 8, [[LHS]]
107 define amdgpu_kernel void @ashr_v_imm_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
108 %tid = call i32 @llvm.amdgcn.workitem.id.x()
109 %tid.ext = sext i32 %tid to i64
110 %in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
111 %out.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %out, i64 %tid.ext
112 %vgpr = load <2 x i16>, ptr addrspace(1) %in.gep
113 %result = ashr <2 x i16> %vgpr, <i16 8, i16 8>
114 store <2 x i16> %result, ptr addrspace(1) %out.gep
118 ; GCN-LABEL: {{^}}v_ashr_v4i16:
119 ; GCN: {{buffer|flat|global}}_load_dwordx4
120 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
121 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
123 ; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
124 ; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
125 ; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
126 ; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
127 ; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
128 ; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
130 ; GCN: {{buffer|flat|global}}_store_dwordx2
131 define amdgpu_kernel void @v_ashr_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
132 %tid = call i32 @llvm.amdgcn.workitem.id.x()
133 %tid.ext = sext i32 %tid to i64
134 %in.gep = getelementptr inbounds <4 x i16>, ptr addrspace(1) %in, i64 %tid.ext
135 %out.gep = getelementptr inbounds <4 x i16>, ptr addrspace(1) %out, i64 %tid.ext
136 %b_ptr = getelementptr <4 x i16>, ptr addrspace(1) %in.gep, i32 1
137 %a = load <4 x i16>, ptr addrspace(1) %in.gep
138 %b = load <4 x i16>, ptr addrspace(1) %b_ptr
139 %result = ashr <4 x i16> %a, %b
140 store <4 x i16> %result, ptr addrspace(1) %out.gep
144 ; GCN-LABEL: {{^}}ashr_v_imm_v4i16:
145 ; GCN: {{buffer|flat|global}}_load_dwordx2
146 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
147 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
148 ; GCN: {{buffer|flat|global}}_store_dwordx2
149 define amdgpu_kernel void @ashr_v_imm_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
150 %tid = call i32 @llvm.amdgcn.workitem.id.x()
151 %tid.ext = sext i32 %tid to i64
152 %in.gep = getelementptr inbounds <4 x i16>, ptr addrspace(1) %in, i64 %tid.ext
153 %out.gep = getelementptr inbounds <4 x i16>, ptr addrspace(1) %out, i64 %tid.ext
154 %vgpr = load <4 x i16>, ptr addrspace(1) %in.gep
155 %result = ashr <4 x i16> %vgpr, <i16 8, i16 8, i16 8, i16 8>
156 store <4 x i16> %result, ptr addrspace(1) %out.gep
160 declare i32 @llvm.amdgcn.workitem.id.x() #1
162 attributes #0 = { nounwind }
163 attributes #1 = { nounwind readnone }