1 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=NOSDWA,GCN %s
2 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI,GFX89,SDWA,GCN %s
3 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -amdgpu-sdwa-peephole -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9_10,SDWA,GCN %s
4 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1010 -amdgpu-sdwa-peephole -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX10,GFX9_10,SDWA,GCN %s
6 ; GCN-LABEL: {{^}}add_shr_i32:
7 ; NOSDWA: v_lshrrev_b32_e32 v[[DST:[0-9]+]], 16, v{{[0-9]+}}
8 ; NOSDWA: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
9 ; NOSDWA-NOT: v_add_{{(_co)?}}_u32_sdwa
11 ; VI: v_add_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
12 ; GFX9: v_add_u32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
13 ; GFX10: v_add_nc_u32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
15 define amdgpu_kernel void @add_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
16 %a = load i32, i32 addrspace(1)* %in, align 4
17 %shr = lshr i32 %a, 16
18 %add = add i32 %a, %shr
19 store i32 %add, i32 addrspace(1)* %out, align 4
23 ; GCN-LABEL: {{^}}sub_shr_i32:
24 ; NOSDWA: v_lshrrev_b32_e32 v[[DST:[0-9]+]], 16, v{{[0-9]+}}
25 ; NOSDWA: v_subrev_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
26 ; NOSDWA-NOT: v_subrev_{{(_co)?}}_u32_sdwa
28 ; VI: v_subrev_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
29 ; GFX9: v_sub_u32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
30 ; GFX10: v_sub_nc_u32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
31 define amdgpu_kernel void @sub_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
32 %a = load i32, i32 addrspace(1)* %in, align 4
33 %shr = lshr i32 %a, 16
34 %sub = sub i32 %shr, %a
35 store i32 %sub, i32 addrspace(1)* %out, align 4
39 ; GCN-LABEL: {{^}}mul_shr_i32:
40 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
41 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
42 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v[[DST0]], v[[DST1]]
43 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
45 ; SDWA: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
47 define amdgpu_kernel void @mul_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in1, i32 addrspace(1)* %in2) #0 {
48 %idx = call i32 @llvm.amdgcn.workitem.id.x()
49 %gep1 = getelementptr i32, i32 addrspace(1)* %in1, i32 %idx
50 %gep2 = getelementptr i32, i32 addrspace(1)* %in2, i32 %idx
51 %a = load i32, i32 addrspace(1)* %gep1, align 4
52 %b = load i32, i32 addrspace(1)* %gep2, align 4
53 %shra = lshr i32 %a, 16
54 %shrb = lshr i32 %b, 16
55 %mul = mul i32 %shra, %shrb
56 store i32 %mul, i32 addrspace(1)* %out, align 4
60 ; GCN-LABEL: {{^}}mul_i16:
61 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
62 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
63 ; GFX89: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
64 ; GFX10: v_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
65 ; SDWA-NOT: v_mul_u32_u24_sdwa
67 define amdgpu_kernel void @mul_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %ina, i16 addrspace(1)* %inb) #0 {
69 %idx = call i32 @llvm.amdgcn.workitem.id.x()
70 %gepa = getelementptr i16, i16 addrspace(1)* %ina, i32 %idx
71 %gepb = getelementptr i16, i16 addrspace(1)* %inb, i32 %idx
72 %a = load i16, i16 addrspace(1)* %gepa, align 4
73 %b = load i16, i16 addrspace(1)* %gepb, align 4
75 store i16 %mul, i16 addrspace(1)* %out, align 4
79 ; GCN-LABEL: {{^}}mul_v2i16:
80 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
81 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
82 ; NOSDWA: v_mul_lo_u16_e32 v[[DST_MUL:[0-9]+]], v[[DST1]], v[[DST0]]
83 ; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
84 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
85 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
87 ; VI-DAG: v_mul_lo_u16_e32 v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
88 ; VI-DAG: v_mul_lo_u16_sdwa v[[DST_MUL_HI:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
89 ; VI: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL_LO]], v[[DST_MUL_HI]]
91 ; GFX9_10: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
93 define amdgpu_kernel void @mul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) #0 {
95 %idx = call i32 @llvm.amdgcn.workitem.id.x()
96 %gepa = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %ina, i32 %idx
97 %gepb = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %inb, i32 %idx
98 %a = load <2 x i16>, <2 x i16> addrspace(1)* %gepa, align 4
99 %b = load <2 x i16>, <2 x i16> addrspace(1)* %gepb, align 4
100 %mul = mul <2 x i16> %a, %b
101 store <2 x i16> %mul, <2 x i16> addrspace(1)* %out, align 4
105 ; GCN-LABEL: {{^}}mul_v4i16:
106 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
107 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
108 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
109 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
110 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
111 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
113 ; VI-DAG: v_mul_lo_u16_e32 v[[DST_MUL0:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
114 ; VI-DAG: v_mul_lo_u16_sdwa v[[DST_MUL1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
115 ; VI-DAG: v_mul_lo_u16_e32 v[[DST_MUL2:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
116 ; VI-DAG: v_mul_lo_u16_sdwa v[[DST_MUL3:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
117 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL2]], v[[DST_MUL3]]
118 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL0]], v[[DST_MUL1]]
120 ; GFX9_10-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
121 ; GFX9_10-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
123 define amdgpu_kernel void @mul_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %ina, <4 x i16> addrspace(1)* %inb) #0 {
125 %idx = call i32 @llvm.amdgcn.workitem.id.x()
126 %gepa = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %ina, i32 %idx
127 %gepb = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %inb, i32 %idx
128 %a = load <4 x i16>, <4 x i16> addrspace(1)* %gepa, align 4
129 %b = load <4 x i16>, <4 x i16> addrspace(1)* %gepb, align 4
130 %mul = mul <4 x i16> %a, %b
131 store <4 x i16> %mul, <4 x i16> addrspace(1)* %out, align 4
135 ; GCN-LABEL: {{^}}mul_v8i16:
136 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
137 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
138 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
139 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
140 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
141 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
143 ; VI-DAG: v_mul_lo_u16_e32 v[[DST_MUL0:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
144 ; VI-DAG: v_mul_lo_u16_sdwa v[[DST_MUL1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
145 ; VI-DAG: v_mul_lo_u16_e32 v[[DST_MUL2:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
146 ; VI-DAG: v_mul_lo_u16_sdwa v[[DST_MUL3:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
147 ; VI-DAG: v_mul_lo_u16_e32 v[[DST_MUL4:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
148 ; VI-DAG: v_mul_lo_u16_sdwa v[[DST_MUL5:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
149 ; VI-DAG: v_mul_lo_u16_e32 v[[DST_MUL6:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
150 ; VI-DAG: v_mul_lo_u16_sdwa v[[DST_MUL7:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
151 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL6]], v[[DST_MUL7]]
152 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL4]], v[[DST_MUL5]]
153 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL2]], v[[DST_MUL3]]
154 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL0]], v[[DST_MUL1]]
156 ; GFX9_10-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
157 ; GFX9_10-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
158 ; GFX9_10-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
159 ; GFX9_10-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
161 define amdgpu_kernel void @mul_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %ina, <8 x i16> addrspace(1)* %inb) #0 {
163 %idx = call i32 @llvm.amdgcn.workitem.id.x()
164 %gepa = getelementptr <8 x i16>, <8 x i16> addrspace(1)* %ina, i32 %idx
165 %gepb = getelementptr <8 x i16>, <8 x i16> addrspace(1)* %inb, i32 %idx
166 %a = load <8 x i16>, <8 x i16> addrspace(1)* %gepa, align 4
167 %b = load <8 x i16>, <8 x i16> addrspace(1)* %gepb, align 4
168 %mul = mul <8 x i16> %a, %b
169 store <8 x i16> %mul, <8 x i16> addrspace(1)* %out, align 4
173 ; GCN-LABEL: {{^}}mul_half:
174 ; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
175 ; NOSDWA-NOT: v_mul_f16_sdwa
176 ; SDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
177 ; SDWA-NOT: v_mul_f16_sdwa
179 define amdgpu_kernel void @mul_half(half addrspace(1)* %out, half addrspace(1)* %ina, half addrspace(1)* %inb) #0 {
181 %a = load half, half addrspace(1)* %ina, align 4
182 %b = load half, half addrspace(1)* %inb, align 4
183 %mul = fmul half %a, %b
184 store half %mul, half addrspace(1)* %out, align 4
188 ; GCN-LABEL: {{^}}mul_v2half:
189 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
190 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
191 ; NOSDWA: v_mul_f16_e32 v[[DST_MUL:[0-9]+]], v[[DST0]], v[[DST1]]
192 ; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
193 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
194 ; NOSDWA-NOT: v_mul_f16_sdwa
196 ; VI-DAG: v_mul_f16_sdwa v[[DST_MUL_HI:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
197 ; VI-DAG: v_mul_f16_e32 v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
198 ; VI: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL_LO]], v[[DST_MUL_HI]]
200 ; GFX9_10: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
202 define amdgpu_kernel void @mul_v2half(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %ina, <2 x half> addrspace(1)* %inb) #0 {
204 %a = load <2 x half>, <2 x half> addrspace(1)* %ina, align 4
205 %b = load <2 x half>, <2 x half> addrspace(1)* %inb, align 4
206 %mul = fmul <2 x half> %a, %b
207 store <2 x half> %mul, <2 x half> addrspace(1)* %out, align 4
211 ; GCN-LABEL: {{^}}mul_v4half:
212 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
213 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
214 ; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
215 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
216 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
217 ; NOSDWA-NOT: v_mul_f16_sdwa
219 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
220 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
221 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
222 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
224 ; GFX9_10-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
225 ; GFX9_10-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
227 define amdgpu_kernel void @mul_v4half(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %ina, <4 x half> addrspace(1)* %inb) #0 {
229 %a = load <4 x half>, <4 x half> addrspace(1)* %ina, align 4
230 %b = load <4 x half>, <4 x half> addrspace(1)* %inb, align 4
231 %mul = fmul <4 x half> %a, %b
232 store <4 x half> %mul, <4 x half> addrspace(1)* %out, align 4
236 ; GCN-LABEL: {{^}}mul_v8half:
237 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
238 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
239 ; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
240 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
241 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
242 ; NOSDWA-NOT: v_mul_f16_sdwa
244 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
245 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
246 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
247 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
248 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
249 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
250 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
251 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
253 ; GFX9_10-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
254 ; GFX9_10-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
255 ; GFX9_10-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
256 ; GFX9_10-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
258 define amdgpu_kernel void @mul_v8half(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %ina, <8 x half> addrspace(1)* %inb) #0 {
260 %a = load <8 x half>, <8 x half> addrspace(1)* %ina, align 4
261 %b = load <8 x half>, <8 x half> addrspace(1)* %inb, align 4
262 %mul = fmul <8 x half> %a, %b
263 store <8 x half> %mul, <8 x half> addrspace(1)* %out, align 4
267 ; GCN-LABEL: {{^}}mul_i8:
268 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
269 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
270 ; GFX89: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
271 ; GFX10: v_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
272 ; SDWA-NOT: v_mul_u32_u24_sdwa
274 define amdgpu_kernel void @mul_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %ina, i8 addrspace(1)* %inb) #0 {
276 %idx = call i32 @llvm.amdgcn.workitem.id.x()
277 %gepa = getelementptr i8, i8 addrspace(1)* %ina, i32 %idx
278 %gepb = getelementptr i8, i8 addrspace(1)* %inb, i32 %idx
279 %a = load i8, i8 addrspace(1)* %gepa, align 4
280 %b = load i8, i8 addrspace(1)* %gepb, align 4
282 store i8 %mul, i8 addrspace(1)* %out, align 4
286 ; GCN-LABEL: {{^}}mul_v2i8:
287 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
288 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
289 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
290 ; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
291 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
292 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
294 ; VI: v_mul_lo_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
296 ; GFX9-DAG: v_mul_lo_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
297 ; GFX9-DAG: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
299 ; GFX10-DAG: v_mul_lo_u16
300 ; GFX10-DAG: v_mul_lo_u16
302 ; GFX9: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
304 ; GFX10: v_lshlrev_b16 v{{[0-9]+}}, 8, v
305 ; GFX10: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
306 define amdgpu_kernel void @mul_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %ina, <2 x i8> addrspace(1)* %inb) #0 {
308 %idx = call i32 @llvm.amdgcn.workitem.id.x()
309 %gepa = getelementptr <2 x i8>, <2 x i8> addrspace(1)* %ina, i32 %idx
310 %gepb = getelementptr <2 x i8>, <2 x i8> addrspace(1)* %inb, i32 %idx
311 %a = load <2 x i8>, <2 x i8> addrspace(1)* %gepa, align 4
312 %b = load <2 x i8>, <2 x i8> addrspace(1)* %gepb, align 4
313 %mul = mul <2 x i8> %a, %b
314 store <2 x i8> %mul, <2 x i8> addrspace(1)* %out, align 4
318 ; GCN-LABEL: {{^}}mul_v4i8:
319 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
320 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
321 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
322 ; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
323 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
324 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
326 ; VI-DAG: v_mul_lo_u16_sdwa
327 ; VI-DAG: v_mul_lo_u16_sdwa
328 ; VI-DAG: v_mul_lo_u16_sdwa
330 ; GFX9-DAG: v_mul_lo_u16_sdwa
331 ; GFX9-DAG: v_mul_lo_u16_sdwa
332 ; GFX9-DAG: v_mul_lo_u16_sdwa
334 ; GFX10-DAG: v_mul_lo_u16
335 ; GFX10-DAG: v_mul_lo_u16
336 ; GFX10-DAG: v_mul_lo_u16
337 ; GFX10-DAG: v_mul_lo_u16
339 define amdgpu_kernel void @mul_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %ina, <4 x i8> addrspace(1)* %inb) #0 {
341 %idx = call i32 @llvm.amdgcn.workitem.id.x()
342 %gepa = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %ina, i32 %idx
343 %gepb = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %inb, i32 %idx
344 %a = load <4 x i8>, <4 x i8> addrspace(1)* %gepa, align 4
345 %b = load <4 x i8>, <4 x i8> addrspace(1)* %gepb, align 4
346 %mul = mul <4 x i8> %a, %b
347 store <4 x i8> %mul, <4 x i8> addrspace(1)* %out, align 4
351 ; GCN-LABEL: {{^}}mul_v8i8:
352 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
353 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
354 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
355 ; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
356 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
357 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
359 ; VI-DAG: v_mul_lo_u16_sdwa
360 ; VI-DAG: v_mul_lo_u16_sdwa
361 ; VI-DAG: v_mul_lo_u16_sdwa
362 ; VI-DAG: v_mul_lo_u16_sdwa
363 ; VI-DAG: v_mul_lo_u16_sdwa
364 ; VI-DAG: v_mul_lo_u16_sdwa
366 ; GFX9-DAG: v_mul_lo_u16_sdwa
367 ; GFX9-DAG: v_mul_lo_u16_sdwa
368 ; GFX9-DAG: v_mul_lo_u16_sdwa
369 ; GFX9-DAG: v_mul_lo_u16_sdwa
370 ; GFX9-DAG: v_mul_lo_u16_sdwa
371 ; GFX9-DAG: v_mul_lo_u16_sdwa
373 ; GFX10-DAG: v_mul_lo_u16
374 ; GFX10-DAG: v_mul_lo_u16
375 ; GFX10-DAG: v_mul_lo_u16
376 ; GFX10-DAG: v_mul_lo_u16
377 ; GFX10-DAG: v_mul_lo_u16
378 ; GFX10-DAG: v_mul_lo_u16
379 ; GFX10-DAG: v_mul_lo_u16
380 ; GFX10-DAG: v_mul_lo_u16
382 define amdgpu_kernel void @mul_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(1)* %ina, <8 x i8> addrspace(1)* %inb) #0 {
384 %idx = call i32 @llvm.amdgcn.workitem.id.x()
385 %gepa = getelementptr <8 x i8>, <8 x i8> addrspace(1)* %ina, i32 %idx
386 %gepb = getelementptr <8 x i8>, <8 x i8> addrspace(1)* %inb, i32 %idx
387 %a = load <8 x i8>, <8 x i8> addrspace(1)* %gepa, align 4
388 %b = load <8 x i8>, <8 x i8> addrspace(1)* %gepb, align 4
389 %mul = mul <8 x i8> %a, %b
390 store <8 x i8> %mul, <8 x i8> addrspace(1)* %out, align 4
394 ; GCN-LABEL: {{^}}sitofp_v2i16_to_v2f16:
395 ; NOSDWA-DAG: v_cvt_f16_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}
396 ; NOSDWA-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
397 ; NOSDWA-DAG: v_cvt_f16_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}
398 ; NOSDWA-NOT: v_cvt_f16_i16_sdwa
400 ; SDWA-DAG: v_cvt_f16_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}
401 ; SDWA-DAG: v_cvt_f16_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}} dst_sel:{{(WORD_1|DWORD)?}} dst_unused:UNUSED_PAD src0_sel:WORD_1
403 ; FIXME: Should be able to avoid or
404 define amdgpu_kernel void @sitofp_v2i16_to_v2f16(
405 <2 x half> addrspace(1)* %r,
406 <2 x i16> addrspace(1)* %a) #0 {
408 %a.val = load <2 x i16>, <2 x i16> addrspace(1)* %a
409 %r.val = sitofp <2 x i16> %a.val to <2 x half>
410 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
415 ; GCN-LABEL: {{^}}mac_v2half:
416 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
417 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
418 ; NOSDWA: v_mac_f16_e32 v[[DST_MAC:[0-9]+]], v[[DST0]], v[[DST1]]
419 ; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MAC]]
420 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
421 ; NOSDWA-NOT: v_mac_f16_sdwa
423 ; VI: v_mac_f16_sdwa v[[DST_MAC:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
424 ; VI: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MAC]]
426 ; GFX9_10: v_pk_mul_f16 v[[DST_MUL:[0-9]+]], v{{[0-9]+}}, v[[SRC:[0-9]+]]
427 ; GFX9_10: v_pk_add_f16 v{{[0-9]+}}, v[[DST_MUL]], v[[SRC]]
429 define amdgpu_kernel void @mac_v2half(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %ina, <2 x half> addrspace(1)* %inb) #0 {
431 %a = load <2 x half>, <2 x half> addrspace(1)* %ina, align 4
432 %b = load <2 x half>, <2 x half> addrspace(1)* %inb, align 4
433 %mul = fmul <2 x half> %a, %b
434 %mac = fadd <2 x half> %mul, %b
435 store <2 x half> %mac, <2 x half> addrspace(1)* %out, align 4
439 ; GCN-LABEL: {{^}}immediate_mul_v2i16:
440 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
441 ; VI-DAG: v_mov_b32_e32 v[[M321:[0-9]+]], 0x141
442 ; VI-DAG: v_mul_lo_u16_e32 v{{[0-9]+}}, 0x7b, v{{[0-9]+}}
443 ; VI-DAG: v_mul_lo_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v[[M321]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
445 ; GFX9: s_mov_b32 s[[IMM:[0-9]+]], 0x141007b
446 ; GFX9: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, s[[IMM]]
448 ; GFX10: v_pk_mul_lo_u16 v{{[0-9]+}}, 0x141007b, v{{[0-9]+}}
450 define amdgpu_kernel void @immediate_mul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
452 %idx = call i32 @llvm.amdgcn.workitem.id.x()
453 %gep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %idx
454 %a = load <2 x i16>, <2 x i16> addrspace(1)* %gep, align 4
455 %mul = mul <2 x i16> %a, <i16 123, i16 321>
456 store <2 x i16> %mul, <2 x i16> addrspace(1)* %out, align 4
460 ; Double use of same src - should not convert it
461 ; GCN-LABEL: {{^}}mulmul_v2i16:
462 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
463 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
464 ; NOSDWA: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
465 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
466 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
467 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
469 ; VI: v_mul_lo_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
471 ; GFX9_10: v_pk_mul_lo_u16 v[[DST1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
472 ; GFX9_10: v_pk_mul_lo_u16 v{{[0-9]+}}, v[[DST1]], v{{[0-9]+}}
474 define amdgpu_kernel void @mulmul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) #0 {
476 %idx = call i32 @llvm.amdgcn.workitem.id.x()
477 %gepa = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %ina, i32 %idx
478 %gepb = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %inb, i32 %idx
479 %a = load <2 x i16>, <2 x i16> addrspace(1)* %gepa, align 4
480 %b = load <2 x i16>, <2 x i16> addrspace(1)* %gepb, align 4
481 %mul = mul <2 x i16> %a, %b
482 %mul2 = mul <2 x i16> %mul, %b
483 store <2 x i16> %mul2, <2 x i16> addrspace(1)* %out, align 4
487 ; GCN-LABEL: {{^}}add_bb_v2i16:
488 ; NOSDWA-NOT: v_add_{{(_co)?}}_u32_sdwa
490 ; VI: v_add_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
492 ; GFX9_10: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
494 define amdgpu_kernel void @add_bb_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) #0 {
496 %a = load <2 x i16>, <2 x i16> addrspace(1)* %ina, align 4
497 %b = load <2 x i16>, <2 x i16> addrspace(1)* %inb, align 4
500 %add = add <2 x i16> %a, %b
501 br label %store_label
503 store <2 x i16> %add, <2 x i16> addrspace(1)* %out, align 4
508 ; Check that "pulling out" SDWA operands works correctly.
509 ; GCN-LABEL: {{^}}pulled_out_test:
510 ; NOSDWA-DAG: v_and_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
511 ; NOSDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
512 ; NOSDWA-DAG: v_and_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
513 ; NOSDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
514 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
515 ; NOSDWA-NOT: v_and_b32_sdwa
516 ; NOSDWA-NOT: v_or_b32_sdwa
518 ; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
519 ; GFX9_10-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
520 ; GFX89-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
522 ; GFX10-DAG: v_lshrrev_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
524 ; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
525 ; GFX9_10-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
526 ; GFX89-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
528 ; GFX10-DAG: v_lshrrev_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
530 ; GFX89: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
532 ; GFX10: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
533 ; GFX10: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
534 ; GFX10: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
535 ; GFX10: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
537 define amdgpu_kernel void @pulled_out_test(<8 x i8> addrspace(1)* %sourceA, <8 x i8> addrspace(1)* %destValues) #0 {
539 %idxprom = ashr exact i64 15, 32
540 %arrayidx = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %sourceA, i64 %idxprom
541 %tmp = load <8 x i8>, <8 x i8> addrspace(1)* %arrayidx, align 8
543 %tmp1 = extractelement <8 x i8> %tmp, i32 0
544 %tmp2 = extractelement <8 x i8> %tmp, i32 1
545 %tmp3 = extractelement <8 x i8> %tmp, i32 2
546 %tmp4 = extractelement <8 x i8> %tmp, i32 3
547 %tmp5 = extractelement <8 x i8> %tmp, i32 4
548 %tmp6 = extractelement <8 x i8> %tmp, i32 5
549 %tmp7 = extractelement <8 x i8> %tmp, i32 6
550 %tmp8 = extractelement <8 x i8> %tmp, i32 7
552 %tmp9 = insertelement <2 x i8> undef, i8 %tmp1, i32 0
553 %tmp10 = insertelement <2 x i8> %tmp9, i8 %tmp2, i32 1
554 %tmp11 = insertelement <2 x i8> undef, i8 %tmp3, i32 0
555 %tmp12 = insertelement <2 x i8> %tmp11, i8 %tmp4, i32 1
556 %tmp13 = insertelement <2 x i8> undef, i8 %tmp5, i32 0
557 %tmp14 = insertelement <2 x i8> %tmp13, i8 %tmp6, i32 1
558 %tmp15 = insertelement <2 x i8> undef, i8 %tmp7, i32 0
559 %tmp16 = insertelement <2 x i8> %tmp15, i8 %tmp8, i32 1
561 %tmp17 = shufflevector <2 x i8> %tmp10, <2 x i8> %tmp12, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
562 %tmp18 = shufflevector <2 x i8> %tmp14, <2 x i8> %tmp16, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
563 %tmp19 = shufflevector <4 x i8> %tmp17, <4 x i8> %tmp18, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
565 %arrayidx5 = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %destValues, i64 %idxprom
566 store <8 x i8> %tmp19, <8 x i8> addrspace(1)* %arrayidx5, align 8
570 ; GCN-LABEL: {{^}}sdwa_crash_inlineasm_def:
571 ; GCN: s_mov_b32 s{{[0-9]+}}, 0xffff
572 ; GCN: v_and_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
574 ; TODO: Why is the constant not peepholed into the v_or_b32_e32?
576 ; NOSDWA: s_mov_b32 [[CONST:s[0-9]+]], 0x10000
577 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, s0,
578 ; SDWA: v_or_b32_e32 v{{[0-9]+}}, 0x10000,
579 define amdgpu_kernel void @sdwa_crash_inlineasm_def() #0 {
583 bb1: ; preds = %bb11, %bb
584 %tmp = phi <2 x i32> [ %tmp12, %bb11 ], [ undef, %bb ]
585 br i1 true, label %bb2, label %bb11
588 %tmp3 = call i32 asm "v_and_b32_e32 $0, $1, $2", "=v,s,v"(i32 65535, i32 undef) #1
589 %tmp5 = or i32 %tmp3, 65536
590 %tmp6 = insertelement <2 x i32> %tmp, i32 %tmp5, i64 0
593 bb11: ; preds = %bb10, %bb2
594 %tmp12 = phi <2 x i32> [ %tmp6, %bb2 ], [ %tmp, %bb1 ]
595 store volatile <2 x i32> %tmp12, <2 x i32> addrspace(1)* undef
599 declare i32 @llvm.amdgcn.workitem.id.x()
601 attributes #0 = { "denormal-fp-math"="preserve-sign,preserve-sign" }