1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
5 ; Test based on pr5626 to load/store
8 %i32vec3 = type <3 x i32>
9 define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
12 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
13 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
14 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
15 ; X86-NEXT: movdqa (%edx), %xmm0
16 ; X86-NEXT: paddd (%ecx), %xmm0
17 ; X86-NEXT: pextrd $2, %xmm0, 8(%eax)
18 ; X86-NEXT: movq %xmm0, (%eax)
23 ; X64-NEXT: movq %rdi, %rax
24 ; X64-NEXT: movdqa (%rsi), %xmm0
25 ; X64-NEXT: paddd (%rdx), %xmm0
26 ; X64-NEXT: pextrd $2, %xmm0, 8(%rdi)
27 ; X64-NEXT: movq %xmm0, (%rdi)
29 %a = load %i32vec3, %i32vec3* %ap, align 16
30 %b = load %i32vec3, %i32vec3* %bp, align 16
31 %x = add %i32vec3 %a, %b
32 store %i32vec3 %x, %i32vec3* %ret, align 16
36 define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
37 ; X86-LABEL: add3i32_2:
39 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
40 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
41 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
42 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
43 ; X86-NEXT: pinsrd $2, 8(%edx), %xmm0
44 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
45 ; X86-NEXT: pinsrd $2, 8(%ecx), %xmm1
46 ; X86-NEXT: paddd %xmm0, %xmm1
47 ; X86-NEXT: movq %xmm1, (%eax)
48 ; X86-NEXT: pextrd $2, %xmm1, 8(%eax)
51 ; X64-LABEL: add3i32_2:
53 ; X64-NEXT: movq %rdi, %rax
54 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
55 ; X64-NEXT: pinsrd $2, 8(%rsi), %xmm0
56 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
57 ; X64-NEXT: pinsrd $2, 8(%rdx), %xmm1
58 ; X64-NEXT: paddd %xmm0, %xmm1
59 ; X64-NEXT: pextrd $2, %xmm1, 8(%rdi)
60 ; X64-NEXT: movq %xmm1, (%rdi)
62 %a = load %i32vec3, %i32vec3* %ap, align 8
63 %b = load %i32vec3, %i32vec3* %bp, align 8
64 %x = add %i32vec3 %a, %b
65 store %i32vec3 %x, %i32vec3* %ret, align 8
69 %i32vec7 = type <7 x i32>
70 define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) {
73 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
74 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
75 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
76 ; X86-NEXT: movdqa (%edx), %xmm0
77 ; X86-NEXT: movdqa 16(%edx), %xmm1
78 ; X86-NEXT: paddd (%ecx), %xmm0
79 ; X86-NEXT: paddd 16(%ecx), %xmm1
80 ; X86-NEXT: pextrd $2, %xmm1, 24(%eax)
81 ; X86-NEXT: movq %xmm1, 16(%eax)
82 ; X86-NEXT: movdqa %xmm0, (%eax)
87 ; X64-NEXT: movq %rdi, %rax
88 ; X64-NEXT: movdqa (%rsi), %xmm0
89 ; X64-NEXT: movdqa 16(%rsi), %xmm1
90 ; X64-NEXT: paddd (%rdx), %xmm0
91 ; X64-NEXT: paddd 16(%rdx), %xmm1
92 ; X64-NEXT: movq %xmm1, 16(%rdi)
93 ; X64-NEXT: pextrd $2, %xmm1, 24(%rdi)
94 ; X64-NEXT: movdqa %xmm0, (%rdi)
96 %a = load %i32vec7, %i32vec7* %ap, align 16
97 %b = load %i32vec7, %i32vec7* %bp, align 16
98 %x = add %i32vec7 %a, %b
99 store %i32vec7 %x, %i32vec7* %ret, align 16
103 %i32vec12 = type <12 x i32>
104 define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) {
105 ; X86-LABEL: add12i32:
107 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
108 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
109 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
110 ; X86-NEXT: movdqa 32(%edx), %xmm0
111 ; X86-NEXT: movdqa (%edx), %xmm1
112 ; X86-NEXT: movdqa 16(%edx), %xmm2
113 ; X86-NEXT: paddd (%ecx), %xmm1
114 ; X86-NEXT: paddd 32(%ecx), %xmm0
115 ; X86-NEXT: paddd 16(%ecx), %xmm2
116 ; X86-NEXT: movdqa %xmm2, 16(%eax)
117 ; X86-NEXT: movdqa %xmm0, 32(%eax)
118 ; X86-NEXT: movdqa %xmm1, (%eax)
121 ; X64-LABEL: add12i32:
123 ; X64-NEXT: movq %rdi, %rax
124 ; X64-NEXT: movdqa (%rsi), %xmm0
125 ; X64-NEXT: movdqa 16(%rsi), %xmm1
126 ; X64-NEXT: movdqa 32(%rsi), %xmm2
127 ; X64-NEXT: paddd (%rdx), %xmm0
128 ; X64-NEXT: paddd 32(%rdx), %xmm2
129 ; X64-NEXT: paddd 16(%rdx), %xmm1
130 ; X64-NEXT: movdqa %xmm1, 16(%rdi)
131 ; X64-NEXT: movdqa %xmm2, 32(%rdi)
132 ; X64-NEXT: movdqa %xmm0, (%rdi)
134 %a = load %i32vec12, %i32vec12* %ap, align 16
135 %b = load %i32vec12, %i32vec12* %bp, align 16
136 %x = add %i32vec12 %a, %b
137 store %i32vec12 %x, %i32vec12* %ret, align 16
142 %i16vec3 = type <3 x i16>
143 define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
144 ; X86-LABEL: add3i16:
146 ; X86-NEXT: pushl %ebp
147 ; X86-NEXT: movl %esp, %ebp
148 ; X86-NEXT: andl $-8, %esp
149 ; X86-NEXT: subl $8, %esp
150 ; X86-NEXT: movl 8(%ebp), %eax
151 ; X86-NEXT: movl 16(%ebp), %ecx
152 ; X86-NEXT: movl 12(%ebp), %edx
153 ; X86-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
154 ; X86-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
155 ; X86-NEXT: paddd %xmm0, %xmm1
156 ; X86-NEXT: pextrw $4, %xmm1, 4(%eax)
157 ; X86-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
158 ; X86-NEXT: movd %xmm1, (%eax)
159 ; X86-NEXT: movl %ebp, %esp
160 ; X86-NEXT: popl %ebp
163 ; X64-LABEL: add3i16:
165 ; X64-NEXT: movq %rdi, %rax
166 ; X64-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
167 ; X64-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
168 ; X64-NEXT: paddd %xmm0, %xmm1
169 ; X64-NEXT: pextrw $4, %xmm1, 4(%rdi)
170 ; X64-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
171 ; X64-NEXT: movd %xmm1, (%rdi)
173 %a = load %i16vec3, %i16vec3* %ap, align 16
174 %b = load %i16vec3, %i16vec3* %bp, align 16
175 %x = add %i16vec3 %a, %b
176 store %i16vec3 %x, %i16vec3* %ret, align 16
180 %i16vec4 = type <4 x i16>
181 define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
182 ; X86-LABEL: add4i16:
184 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
185 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
186 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
187 ; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
188 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
189 ; X86-NEXT: paddw %xmm0, %xmm1
190 ; X86-NEXT: movq %xmm1, (%eax)
193 ; X64-LABEL: add4i16:
195 ; X64-NEXT: movq %rdi, %rax
196 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
197 ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
198 ; X64-NEXT: paddw %xmm0, %xmm1
199 ; X64-NEXT: movq %xmm1, (%rdi)
201 %a = load %i16vec4, %i16vec4* %ap, align 16
202 %b = load %i16vec4, %i16vec4* %bp, align 16
203 %x = add %i16vec4 %a, %b
204 store %i16vec4 %x, %i16vec4* %ret, align 16
208 %i16vec12 = type <12 x i16>
209 define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
210 ; X86-LABEL: add12i16:
212 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
213 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
214 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
215 ; X86-NEXT: movdqa (%edx), %xmm0
216 ; X86-NEXT: movdqa 16(%edx), %xmm1
217 ; X86-NEXT: paddw (%ecx), %xmm0
218 ; X86-NEXT: paddw 16(%ecx), %xmm1
219 ; X86-NEXT: movq %xmm1, 16(%eax)
220 ; X86-NEXT: movdqa %xmm0, (%eax)
223 ; X64-LABEL: add12i16:
225 ; X64-NEXT: movq %rdi, %rax
226 ; X64-NEXT: movdqa (%rsi), %xmm0
227 ; X64-NEXT: movdqa 16(%rsi), %xmm1
228 ; X64-NEXT: paddw (%rdx), %xmm0
229 ; X64-NEXT: paddw 16(%rdx), %xmm1
230 ; X64-NEXT: movq %xmm1, 16(%rdi)
231 ; X64-NEXT: movdqa %xmm0, (%rdi)
233 %a = load %i16vec12, %i16vec12* %ap, align 16
234 %b = load %i16vec12, %i16vec12* %bp, align 16
235 %x = add %i16vec12 %a, %b
236 store %i16vec12 %x, %i16vec12* %ret, align 16
240 %i16vec18 = type <18 x i16>
241 define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
242 ; X86-LABEL: add18i16:
244 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
245 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
246 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
247 ; X86-NEXT: movdqa 32(%edx), %xmm0
248 ; X86-NEXT: movdqa (%edx), %xmm1
249 ; X86-NEXT: movdqa 16(%edx), %xmm2
250 ; X86-NEXT: paddw (%ecx), %xmm1
251 ; X86-NEXT: paddw 32(%ecx), %xmm0
252 ; X86-NEXT: paddw 16(%ecx), %xmm2
253 ; X86-NEXT: movdqa %xmm2, 16(%eax)
254 ; X86-NEXT: movd %xmm0, 32(%eax)
255 ; X86-NEXT: movdqa %xmm1, (%eax)
258 ; X64-LABEL: add18i16:
260 ; X64-NEXT: movq %rdi, %rax
261 ; X64-NEXT: movdqa (%rsi), %xmm0
262 ; X64-NEXT: movdqa 16(%rsi), %xmm1
263 ; X64-NEXT: movdqa 32(%rsi), %xmm2
264 ; X64-NEXT: paddw (%rdx), %xmm0
265 ; X64-NEXT: paddw 32(%rdx), %xmm2
266 ; X64-NEXT: paddw 16(%rdx), %xmm1
267 ; X64-NEXT: movdqa %xmm1, 16(%rdi)
268 ; X64-NEXT: movd %xmm2, 32(%rdi)
269 ; X64-NEXT: movdqa %xmm0, (%rdi)
271 %a = load %i16vec18, %i16vec18* %ap, align 16
272 %b = load %i16vec18, %i16vec18* %bp, align 16
273 %x = add %i16vec18 %a, %b
274 store %i16vec18 %x, %i16vec18* %ret, align 16
279 %i8vec3 = type <3 x i8>
280 define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
283 ; X86-NEXT: subl $12, %esp
284 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
285 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
286 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
287 ; X86-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
288 ; X86-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
289 ; X86-NEXT: paddd %xmm0, %xmm1
290 ; X86-NEXT: pextrb $8, %xmm1, 2(%eax)
291 ; X86-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
292 ; X86-NEXT: pextrw $0, %xmm1, (%eax)
293 ; X86-NEXT: addl $12, %esp
298 ; X64-NEXT: movq %rdi, %rax
299 ; X64-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
300 ; X64-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
301 ; X64-NEXT: paddd %xmm0, %xmm1
302 ; X64-NEXT: pextrb $8, %xmm1, 2(%rdi)
303 ; X64-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
304 ; X64-NEXT: pextrw $0, %xmm1, (%rdi)
306 %a = load %i8vec3, %i8vec3* %ap, align 16
307 %b = load %i8vec3, %i8vec3* %bp, align 16
308 %x = add %i8vec3 %a, %b
309 store %i8vec3 %x, %i8vec3* %ret, align 16
313 %i8vec31 = type <31 x i8>
314 define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
315 ; X86-LABEL: add31i8:
317 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
318 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
319 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
320 ; X86-NEXT: movdqa (%edx), %xmm0
321 ; X86-NEXT: movdqa 16(%edx), %xmm1
322 ; X86-NEXT: paddb (%ecx), %xmm0
323 ; X86-NEXT: paddb 16(%ecx), %xmm1
324 ; X86-NEXT: pextrd $2, %xmm1, 24(%eax)
325 ; X86-NEXT: pextrw $6, %xmm1, 28(%eax)
326 ; X86-NEXT: pextrb $14, %xmm1, 30(%eax)
327 ; X86-NEXT: movq %xmm1, 16(%eax)
328 ; X86-NEXT: movdqa %xmm0, (%eax)
331 ; X64-LABEL: add31i8:
333 ; X64-NEXT: movq %rdi, %rax
334 ; X64-NEXT: movdqa (%rsi), %xmm0
335 ; X64-NEXT: movdqa 16(%rsi), %xmm1
336 ; X64-NEXT: paddb (%rdx), %xmm0
337 ; X64-NEXT: paddb 16(%rdx), %xmm1
338 ; X64-NEXT: movq %xmm1, 16(%rdi)
339 ; X64-NEXT: pextrd $2, %xmm1, 24(%rdi)
340 ; X64-NEXT: pextrw $6, %xmm1, 28(%rdi)
341 ; X64-NEXT: pextrb $14, %xmm1, 30(%rdi)
342 ; X64-NEXT: movdqa %xmm0, (%rdi)
344 %a = load %i8vec31, %i8vec31* %ap, align 16
345 %b = load %i8vec31, %i8vec31* %bp, align 16
346 %x = add %i8vec31 %a, %b
347 store %i8vec31 %x, %i8vec31* %ret, align 16
352 %i8vec3pack = type { <3 x i8>, i8 }
353 define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pack* %rot) nounwind {
355 ; X86: # %bb.0: # %entry
356 ; X86-NEXT: subl $16, %esp
357 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
358 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
359 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
360 ; X86-NEXT: movb $-98, 2(%edx)
361 ; X86-NEXT: movw $-24930, (%edx) # imm = 0x9E9E
362 ; X86-NEXT: movb $1, 2(%ecx)
363 ; X86-NEXT: movw $257, (%ecx) # imm = 0x101
364 ; X86-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
365 ; X86-NEXT: psrld $1, %xmm0
366 ; X86-NEXT: pextrb $8, %xmm0, 2(%eax)
367 ; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
368 ; X86-NEXT: pextrw $0, %xmm0, (%eax)
369 ; X86-NEXT: addl $16, %esp
373 ; X64: # %bb.0: # %entry
374 ; X64-NEXT: movq %rdi, %rax
375 ; X64-NEXT: movb $-98, 2(%rsi)
376 ; X64-NEXT: movw $-24930, (%rsi) # imm = 0x9E9E
377 ; X64-NEXT: movb $1, 2(%rdx)
378 ; X64-NEXT: movw $257, (%rdx) # imm = 0x101
379 ; X64-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
380 ; X64-NEXT: psrld $1, %xmm0
381 ; X64-NEXT: pextrb $8, %xmm0, 2(%rdi)
382 ; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
383 ; X64-NEXT: pextrw $0, %xmm0, (%rdi)
386 %storetmp = bitcast %i8vec3pack* %X to <3 x i8>*
387 store <3 x i8> <i8 -98, i8 -98, i8 -98>, <3 x i8>* %storetmp
388 %storetmp1 = bitcast %i8vec3pack* %rot to <3 x i8>*
389 store <3 x i8> <i8 1, i8 1, i8 1>, <3 x i8>* %storetmp1
390 %tmp = load %i8vec3pack, %i8vec3pack* %X
391 %extractVec = extractvalue %i8vec3pack %tmp, 0
392 %tmp2 = load %i8vec3pack, %i8vec3pack* %rot
393 %extractVec3 = extractvalue %i8vec3pack %tmp2, 0
394 %shr = lshr <3 x i8> %extractVec, %extractVec3
395 %storetmp4 = bitcast %i8vec3pack* %result to <3 x i8>*
396 store <3 x i8> %shr, <3 x i8>* %storetmp4