1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X64
3 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X32
4 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X32AVX
6 ; Use movq or movsd to load / store i64 values if sse2 is available.
9 define void @foo(i64* %x, i64* %y) {
12 ; X64-NEXT: movq (%rsi), %rax
13 ; X64-NEXT: movq %rax, (%rdi)
18 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
19 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
20 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
21 ; X32-NEXT: movsd %xmm0, (%eax)
26 ; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
27 ; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
28 ; X32AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
29 ; X32AVX-NEXT: vmovsd %xmm0, (%eax)
31 %tmp1 = load i64, i64* %y, align 8
32 store i64 %tmp1, i64* %x, align 8
36 ; Verify that a 64-bit chunk extracted from a vector is stored with a movq
37 ; regardless of whether the system is 64-bit.
39 define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, i64* %i) {
40 ; X64-LABEL: store_i64_from_vector:
42 ; X64-NEXT: paddw %xmm1, %xmm0
43 ; X64-NEXT: movq %xmm0, (%rdi)
46 ; X32-LABEL: store_i64_from_vector:
48 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
49 ; X32-NEXT: paddw %xmm1, %xmm0
50 ; X32-NEXT: movq %xmm0, (%eax)
53 ; X32AVX-LABEL: store_i64_from_vector:
55 ; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
56 ; X32AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
57 ; X32AVX-NEXT: vmovq %xmm0, (%eax)
59 %z = add <8 x i16> %x, %y ; force execution domain
60 %bc = bitcast <8 x i16> %z to <2 x i64>
61 %vecext = extractelement <2 x i64> %bc, i32 0
62 store i64 %vecext, i64* %i, align 8
66 define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, i64* %i) {
67 ; X64-LABEL: store_i64_from_vector256:
69 ; X64-NEXT: paddw %xmm3, %xmm1
70 ; X64-NEXT: movq %xmm1, (%rdi)
73 ; X32-LABEL: store_i64_from_vector256:
75 ; X32-NEXT: pushl %ebp
76 ; X32-NEXT: .cfi_def_cfa_offset 8
77 ; X32-NEXT: .cfi_offset %ebp, -8
78 ; X32-NEXT: movl %esp, %ebp
79 ; X32-NEXT: .cfi_def_cfa_register %ebp
80 ; X32-NEXT: andl $-16, %esp
81 ; X32-NEXT: subl $16, %esp
82 ; X32-NEXT: movl 24(%ebp), %eax
83 ; X32-NEXT: paddw 8(%ebp), %xmm1
84 ; X32-NEXT: movq %xmm1, (%eax)
85 ; X32-NEXT: movl %ebp, %esp
87 ; X32-NEXT: .cfi_def_cfa %esp, 4
90 ; X32AVX-LABEL: store_i64_from_vector256:
92 ; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
93 ; X32AVX-NEXT: vextracti128 $1, %ymm1, %xmm1
94 ; X32AVX-NEXT: vextracti128 $1, %ymm0, %xmm0
95 ; X32AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
96 ; X32AVX-NEXT: vmovq %xmm0, (%eax)
97 ; X32AVX-NEXT: vzeroupper
99 %z = add <16 x i16> %x, %y ; force execution domain
100 %bc = bitcast <16 x i16> %z to <4 x i64>
101 %vecext = extractelement <4 x i64> %bc, i32 2
102 store i64 %vecext, i64* %i, align 8
107 ; Handle extraction from a non-simple / pre-legalization type.
109 define void @PR23476(<5 x i64> %in, i64* %out, i32 %index) nounwind {
110 ; X64-LABEL: PR23476:
112 ; X64-NEXT: pushq %rbp
113 ; X64-NEXT: movq %rsp, %rbp
114 ; X64-NEXT: andq $-64, %rsp
115 ; X64-NEXT: subq $128, %rsp
116 ; X64-NEXT: movq %rsi, %xmm0
117 ; X64-NEXT: movq %rdi, %xmm1
118 ; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
119 ; X64-NEXT: movq %rcx, %xmm0
120 ; X64-NEXT: movq %rdx, %xmm2
121 ; X64-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
122 ; X64-NEXT: movl 16(%rbp), %eax
123 ; X64-NEXT: andl $7, %eax
124 ; X64-NEXT: movq %r8, %xmm0
125 ; X64-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp)
126 ; X64-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp)
127 ; X64-NEXT: movdqa %xmm1, (%rsp)
128 ; X64-NEXT: movq (%rsp,%rax,8), %rax
129 ; X64-NEXT: movq %rax, (%r9)
130 ; X64-NEXT: movq %rbp, %rsp
131 ; X64-NEXT: popq %rbp
134 ; X32-LABEL: PR23476:
136 ; X32-NEXT: pushl %ebp
137 ; X32-NEXT: movl %esp, %ebp
138 ; X32-NEXT: andl $-64, %esp
139 ; X32-NEXT: subl $128, %esp
140 ; X32-NEXT: movl 52(%ebp), %eax
141 ; X32-NEXT: andl $7, %eax
142 ; X32-NEXT: movl 48(%ebp), %ecx
143 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
144 ; X32-NEXT: movups 8(%ebp), %xmm1
145 ; X32-NEXT: movups 24(%ebp), %xmm2
146 ; X32-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
147 ; X32-NEXT: movaps %xmm1, (%esp)
148 ; X32-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
149 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
150 ; X32-NEXT: movsd %xmm0, (%ecx)
151 ; X32-NEXT: movl %ebp, %esp
152 ; X32-NEXT: popl %ebp
155 ; X32AVX-LABEL: PR23476:
157 ; X32AVX-NEXT: pushl %ebp
158 ; X32AVX-NEXT: movl %esp, %ebp
159 ; X32AVX-NEXT: andl $-64, %esp
160 ; X32AVX-NEXT: subl $128, %esp
161 ; X32AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
162 ; X32AVX-NEXT: movl 52(%ebp), %eax
163 ; X32AVX-NEXT: andl $7, %eax
164 ; X32AVX-NEXT: movl 48(%ebp), %ecx
165 ; X32AVX-NEXT: vmovups 8(%ebp), %ymm1
166 ; X32AVX-NEXT: vmovaps %ymm1, (%esp)
167 ; X32AVX-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
168 ; X32AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
169 ; X32AVX-NEXT: vmovsd %xmm0, (%ecx)
170 ; X32AVX-NEXT: movl %ebp, %esp
171 ; X32AVX-NEXT: popl %ebp
172 ; X32AVX-NEXT: vzeroupper
174 %ext = extractelement <5 x i64> %in, i32 %index
175 store i64 %ext, i64* %out, align 8