1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,FUNC %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,FUNC %s
3 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=R600,FUNC %s
5 ; BFI_INT Definition pattern from ISA docs
8 ; FUNC-LABEL: {{^}}bfi_def:
12 define amdgpu_kernel void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
18 store i32 %3, i32 addrspace(1)* %out
24 ; FUNC-LABEL: {{^}}bfi_sha256_ch:
28 define amdgpu_kernel void @bfi_sha256_ch(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
33 store i32 %2, i32 addrspace(1)* %out
38 ; ((x & z) | (y & (x | z)))
39 ; FUNC-LABEL: {{^}}bfi_sha256_ma:
40 ; R600: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
41 ; R600: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
43 ; GCN: v_xor_b32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}}
44 ; GCN: v_bfi_b32 {{v[0-9]+}}, [[DST]], {{s[0-9]+, v[0-9]+}}
45 define amdgpu_kernel void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
51 store i32 %3, i32 addrspace(1)* %out
55 ; FUNC-LABEL: {{^}}v_bitselect_v2i32_pat1:
57 ; GCN-NEXT: v_bfi_b32 v0, v2, v0, v4
58 ; GCN-NEXT: v_bfi_b32 v1, v3, v1, v5
59 ; GCN-NEXT: s_setpc_b64
60 define <2 x i32> @v_bitselect_v2i32_pat1(<2 x i32> %a, <2 x i32> %b, <2 x i32> %mask) {
61 %xor.0 = xor <2 x i32> %a, %mask
62 %and = and <2 x i32> %xor.0, %b
63 %bitselect = xor <2 x i32> %and, %mask
64 ret <2 x i32> %bitselect
67 ; FUNC-LABEL: {{^}}v_bitselect_i64_pat_0:
69 ; GCN-NEXT: v_bfi_b32 v1, v1, v3, v5
70 ; GCN-NEXT: v_bfi_b32 v0, v0, v2, v4
71 ; GCN-NEXT: s_setpc_b64
72 define i64 @v_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
73 %and0 = and i64 %a, %b
74 %not.a = xor i64 %a, -1
75 %and1 = and i64 %not.a, %mask
76 %bitselect = or i64 %and0, %and1
80 ; FUNC-LABEL: {{^}}v_bitselect_i64_pat_1:
82 ; GCN-NEXT: v_bfi_b32 v1, v3, v1, v5
83 ; GCN-NEXT: v_bfi_b32 v0, v2, v0, v4
84 ; GCN-NEXT: s_setpc_b64
85 define i64 @v_bitselect_i64_pat_1(i64 %a, i64 %b, i64 %mask) {
86 %xor.0 = xor i64 %a, %mask
87 %and = and i64 %xor.0, %b
88 %bitselect = xor i64 %and, %mask
92 ; FUNC-LABEL: {{^}}v_bitselect_i64_pat_2:
94 ; GCN-DAG: v_bfi_b32 v0, v2, v0, v4
95 ; GCN-DAG: v_bfi_b32 v1, v3, v1, v5
96 ; GCN-NEXT: s_setpc_b64
97 define i64 @v_bitselect_i64_pat_2(i64 %a, i64 %b, i64 %mask) {
98 %xor.0 = xor i64 %a, %mask
99 %and = and i64 %xor.0, %b
100 %bitselect = xor i64 %and, %mask
104 ; FUNC-LABEL: {{^}}v_bfi_sha256_ma_i64:
105 ; GCN-DAG: v_xor_b32_e32 v1, v1, v3
106 ; GCN-DAG: v_xor_b32_e32 v0, v0, v2
107 ; GCN-DAG: v_bfi_b32 v1, v1, v5, v3
108 ; GCN-DAG: v_bfi_b32 v0, v0, v4, v2
109 define i64 @v_bfi_sha256_ma_i64(i64 %x, i64 %y, i64 %z) {
111 %and0 = and i64 %x, %z
113 %and1 = and i64 %y, %or0
114 %or1 = or i64 %and0, %and1
118 ; FIXME: Should leave as 64-bit SALU ops
119 ; FUNC-LABEL: {{^}}s_bitselect_i64_pat_0:
120 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
121 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
122 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
123 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
126 define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
127 %and0 = and i64 %a, %b
128 %not.a = xor i64 %a, -1
129 %and1 = and i64 %not.a, %mask
130 %bitselect = or i64 %and0, %and1
131 %scalar.use = add i64 %bitselect, 10
132 store i64 %scalar.use, i64 addrspace(1)* undef
136 ; FUNC-LABEL: {{^}}s_bitselect_i64_pat_1:
137 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
138 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
139 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
140 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
143 define amdgpu_kernel void @s_bitselect_i64_pat_1(i64 %a, i64 %b, i64 %mask) {
144 %xor.0 = xor i64 %a, %mask
145 %and = and i64 %xor.0, %b
146 %bitselect = xor i64 %and, %mask
148 %scalar.use = add i64 %bitselect, 10
149 store i64 %scalar.use, i64 addrspace(1)* undef
153 ; FUNC-LABEL: {{^}}s_bitselect_i64_pat_2:
154 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
155 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
157 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
158 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
160 define amdgpu_kernel void @s_bitselect_i64_pat_2(i64 %a, i64 %b, i64 %mask) {
161 %xor.0 = xor i64 %a, %mask
162 %and = and i64 %xor.0, %b
163 %bitselect = xor i64 %and, %mask
165 %scalar.use = add i64 %bitselect, 10
166 store i64 %scalar.use, i64 addrspace(1)* undef
170 ; FUNC-LABEL: {{^}}s_bfi_sha256_ma_i64:
171 ; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
173 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
177 define amdgpu_kernel void @s_bfi_sha256_ma_i64(i64 %x, i64 %y, i64 %z) {
179 %and0 = and i64 %x, %z
181 %and1 = and i64 %y, %or0
182 %or1 = or i64 %and0, %and1
184 %scalar.use = add i64 %or1, 10
185 store i64 %scalar.use, i64 addrspace(1)* undef