1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefixes=CHECK,SSE,SSE4A
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
10 ; Test codegen for under aligned nontemporal vector stores
14 define void @test_constant_v2f64_align1(ptr %dst) nounwind {
15 ; CHECK-LABEL: test_constant_v2f64_align1:
17 ; CHECK-NEXT: movabsq $4611686018427387904, %rax # imm = 0x4000000000000000
18 ; CHECK-NEXT: movntiq %rax, 8(%rdi)
19 ; CHECK-NEXT: movabsq $4607182418800017408, %rax # imm = 0x3FF0000000000000
20 ; CHECK-NEXT: movntiq %rax, (%rdi)
22 store <2 x double> <double 1.0, double 2.0>, ptr %dst, align 1, !nontemporal !1
26 define void @test_constant_v4f32_align1(ptr %dst) nounwind {
27 ; SSE2-LABEL: test_constant_v4f32_align1:
29 ; SSE2-NEXT: movabsq $4647714816524288000, %rax # imm = 0x4080000040400000
30 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
31 ; SSE2-NEXT: movabsq $4611686019492741120, %rax # imm = 0x400000003F800000
32 ; SSE2-NEXT: movntiq %rax, (%rdi)
35 ; SSE4A-LABEL: test_constant_v4f32_align1:
37 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
38 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
39 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [2.0000004731118679E+0,0.0E+0]
40 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
43 ; SSE41-LABEL: test_constant_v4f32_align1:
45 ; SSE41-NEXT: movabsq $4647714816524288000, %rax # imm = 0x4080000040400000
46 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
47 ; SSE41-NEXT: movabsq $4611686019492741120, %rax # imm = 0x400000003F800000
48 ; SSE41-NEXT: movntiq %rax, (%rdi)
51 ; AVX-LABEL: test_constant_v4f32_align1:
53 ; AVX-NEXT: movabsq $4647714816524288000, %rax # imm = 0x4080000040400000
54 ; AVX-NEXT: movntiq %rax, 8(%rdi)
55 ; AVX-NEXT: movabsq $4611686019492741120, %rax # imm = 0x400000003F800000
56 ; AVX-NEXT: movntiq %rax, (%rdi)
59 ; AVX512-LABEL: test_constant_v4f32_align1:
61 ; AVX512-NEXT: movabsq $4647714816524288000, %rax # imm = 0x4080000040400000
62 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
63 ; AVX512-NEXT: movabsq $4611686019492741120, %rax # imm = 0x400000003F800000
64 ; AVX512-NEXT: movntiq %rax, (%rdi)
66 store <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, ptr %dst, align 1, !nontemporal !1
70 define void @test_constant_v2i64_align1(ptr %dst) nounwind {
71 ; SSE2-LABEL: test_constant_v2i64_align1:
73 ; SSE2-NEXT: movl $1, %eax
74 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
75 ; SSE2-NEXT: xorl %eax, %eax
76 ; SSE2-NEXT: movntiq %rax, (%rdi)
79 ; SSE4A-LABEL: test_constant_v2i64_align1:
81 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [4.9406564584124654E-324,0.0E+0]
82 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
83 ; SSE4A-NEXT: xorl %eax, %eax
84 ; SSE4A-NEXT: movntiq %rax, (%rdi)
87 ; SSE41-LABEL: test_constant_v2i64_align1:
89 ; SSE41-NEXT: movl $1, %eax
90 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
91 ; SSE41-NEXT: xorl %eax, %eax
92 ; SSE41-NEXT: movntiq %rax, (%rdi)
95 ; AVX-LABEL: test_constant_v2i64_align1:
97 ; AVX-NEXT: movl $1, %eax
98 ; AVX-NEXT: movntiq %rax, 8(%rdi)
99 ; AVX-NEXT: xorl %eax, %eax
100 ; AVX-NEXT: movntiq %rax, (%rdi)
103 ; AVX512-LABEL: test_constant_v2i64_align1:
105 ; AVX512-NEXT: movl $1, %eax
106 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
107 ; AVX512-NEXT: xorl %eax, %eax
108 ; AVX512-NEXT: movntiq %rax, (%rdi)
110 store <2 x i64> <i64 0, i64 1>, ptr %dst, align 1, !nontemporal !1
114 define void @test_constant_v4i32_align1(ptr %dst) nounwind {
115 ; SSE2-LABEL: test_constant_v4i32_align1:
117 ; SSE2-NEXT: movabsq $12884901890, %rax # imm = 0x300000002
118 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
119 ; SSE2-NEXT: movabsq $4294967296, %rax # imm = 0x100000000
120 ; SSE2-NEXT: movntiq %rax, (%rdi)
123 ; SSE4A-LABEL: test_constant_v4i32_align1:
125 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
126 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
127 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [2.1219957909652723E-314,0.0E+0]
128 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
131 ; SSE41-LABEL: test_constant_v4i32_align1:
133 ; SSE41-NEXT: movabsq $12884901890, %rax # imm = 0x300000002
134 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
135 ; SSE41-NEXT: movabsq $4294967296, %rax # imm = 0x100000000
136 ; SSE41-NEXT: movntiq %rax, (%rdi)
139 ; AVX-LABEL: test_constant_v4i32_align1:
141 ; AVX-NEXT: movabsq $12884901890, %rax # imm = 0x300000002
142 ; AVX-NEXT: movntiq %rax, 8(%rdi)
143 ; AVX-NEXT: movabsq $4294967296, %rax # imm = 0x100000000
144 ; AVX-NEXT: movntiq %rax, (%rdi)
147 ; AVX512-LABEL: test_constant_v4i32_align1:
149 ; AVX512-NEXT: movabsq $12884901890, %rax # imm = 0x300000002
150 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
151 ; AVX512-NEXT: movabsq $4294967296, %rax # imm = 0x100000000
152 ; AVX512-NEXT: movntiq %rax, (%rdi)
154 store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr %dst, align 1, !nontemporal !1
158 define void @test_constant_v8i16_align1(ptr %dst) nounwind {
159 ; SSE2-LABEL: test_constant_v8i16_align1:
161 ; SSE2-NEXT: movabsq $1970350607106052, %rax # imm = 0x7000600050004
162 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
163 ; SSE2-NEXT: movabsq $844433520132096, %rax # imm = 0x3000200010000
164 ; SSE2-NEXT: movntiq %rax, (%rdi)
167 ; SSE4A-LABEL: test_constant_v8i16_align1:
169 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
170 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
171 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [4.1720559249406128E-309,0.0E+0]
172 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
175 ; SSE41-LABEL: test_constant_v8i16_align1:
177 ; SSE41-NEXT: movabsq $1970350607106052, %rax # imm = 0x7000600050004
178 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
179 ; SSE41-NEXT: movabsq $844433520132096, %rax # imm = 0x3000200010000
180 ; SSE41-NEXT: movntiq %rax, (%rdi)
183 ; AVX-LABEL: test_constant_v8i16_align1:
185 ; AVX-NEXT: movabsq $1970350607106052, %rax # imm = 0x7000600050004
186 ; AVX-NEXT: movntiq %rax, 8(%rdi)
187 ; AVX-NEXT: movabsq $844433520132096, %rax # imm = 0x3000200010000
188 ; AVX-NEXT: movntiq %rax, (%rdi)
191 ; AVX512-LABEL: test_constant_v8i16_align1:
193 ; AVX512-NEXT: movabsq $1970350607106052, %rax # imm = 0x7000600050004
194 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
195 ; AVX512-NEXT: movabsq $844433520132096, %rax # imm = 0x3000200010000
196 ; AVX512-NEXT: movntiq %rax, (%rdi)
198 store <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, ptr %dst, align 1, !nontemporal !1
202 define void @test_constant_v16i8_align1(ptr %dst) nounwind {
203 ; SSE2-LABEL: test_constant_v16i8_align1:
205 ; SSE2-NEXT: movabsq $1084818905618843912, %rax # imm = 0xF0E0D0C0B0A0908
206 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
207 ; SSE2-NEXT: movabsq $506097522914230528, %rax # imm = 0x706050403020100
208 ; SSE2-NEXT: movntiq %rax, (%rdi)
211 ; SSE4A-LABEL: test_constant_v16i8_align1:
213 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
214 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
215 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [7.9499288951273625E-275,0.0E+0]
216 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
219 ; SSE41-LABEL: test_constant_v16i8_align1:
221 ; SSE41-NEXT: movabsq $1084818905618843912, %rax # imm = 0xF0E0D0C0B0A0908
222 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
223 ; SSE41-NEXT: movabsq $506097522914230528, %rax # imm = 0x706050403020100
224 ; SSE41-NEXT: movntiq %rax, (%rdi)
227 ; AVX-LABEL: test_constant_v16i8_align1:
229 ; AVX-NEXT: movabsq $1084818905618843912, %rax # imm = 0xF0E0D0C0B0A0908
230 ; AVX-NEXT: movntiq %rax, 8(%rdi)
231 ; AVX-NEXT: movabsq $506097522914230528, %rax # imm = 0x706050403020100
232 ; AVX-NEXT: movntiq %rax, (%rdi)
235 ; AVX512-LABEL: test_constant_v16i8_align1:
237 ; AVX512-NEXT: movabsq $1084818905618843912, %rax # imm = 0xF0E0D0C0B0A0908
238 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
239 ; AVX512-NEXT: movabsq $506097522914230528, %rax # imm = 0x706050403020100
240 ; AVX512-NEXT: movntiq %rax, (%rdi)
242 store <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, ptr %dst, align 1, !nontemporal !1
248 define void @test_constant_v4f64_align1(ptr %dst) nounwind {
249 ; CHECK-LABEL: test_constant_v4f64_align1:
251 ; CHECK-NEXT: movabsq $-4616189618054758400, %rax # imm = 0xBFF0000000000000
252 ; CHECK-NEXT: movntiq %rax, 8(%rdi)
253 ; CHECK-NEXT: movabsq $-4611686018427387904, %rax # imm = 0xC000000000000000
254 ; CHECK-NEXT: movntiq %rax, (%rdi)
255 ; CHECK-NEXT: movabsq $4607182418800017408, %rax # imm = 0x3FF0000000000000
256 ; CHECK-NEXT: movntiq %rax, 24(%rdi)
257 ; CHECK-NEXT: xorl %eax, %eax
258 ; CHECK-NEXT: movntiq %rax, 16(%rdi)
260 store <4 x double> <double -2.0, double -1.0, double 0.0, double 1.0>, ptr %dst, align 1, !nontemporal !1
264 define void @test_constant_v8f32_align1(ptr %dst) nounwind {
265 ; SSE2-LABEL: test_constant_v8f32_align1:
267 ; SSE2-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
268 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
269 ; SSE2-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
270 ; SSE2-NEXT: movntiq %rax, (%rdi)
271 ; SSE2-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
272 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
273 ; SSE2-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
274 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
277 ; SSE4A-LABEL: test_constant_v8f32_align1:
279 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
280 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
281 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
282 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
283 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
284 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
285 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-5.1200036668777466E+2,0.0E+0]
286 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
289 ; SSE41-LABEL: test_constant_v8f32_align1:
291 ; SSE41-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
292 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
293 ; SSE41-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
294 ; SSE41-NEXT: movntiq %rax, (%rdi)
295 ; SSE41-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
296 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
297 ; SSE41-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
298 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
301 ; AVX-LABEL: test_constant_v8f32_align1:
303 ; AVX-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
304 ; AVX-NEXT: movntiq %rax, 8(%rdi)
305 ; AVX-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
306 ; AVX-NEXT: movntiq %rax, (%rdi)
307 ; AVX-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
308 ; AVX-NEXT: movntiq %rax, 24(%rdi)
309 ; AVX-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
310 ; AVX-NEXT: movntiq %rax, 16(%rdi)
313 ; AVX512-LABEL: test_constant_v8f32_align1:
315 ; AVX512-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
316 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
317 ; AVX512-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
318 ; AVX512-NEXT: movntiq %rax, (%rdi)
319 ; AVX512-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
320 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
321 ; AVX512-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
322 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
324 store <8 x float> <float 0.0, float -0.0, float -1.0, float -2.0, float -3.0, float -4.0, float -5.0, float -6.0>, ptr %dst, align 1, !nontemporal !1
328 define void @test_constant_v4i64_align1(ptr %dst) nounwind {
329 ; SSE2-LABEL: test_constant_v4i64_align1:
331 ; SSE2-NEXT: movq $-1, %rax
332 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
333 ; SSE2-NEXT: movq $-3, %rax
334 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
335 ; SSE2-NEXT: movq $-2, %rax
336 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
337 ; SSE2-NEXT: xorl %eax, %eax
338 ; SSE2-NEXT: movntiq %rax, (%rdi)
341 ; SSE4A-LABEL: test_constant_v4i64_align1:
343 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
344 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
345 ; SSE4A-NEXT: xorl %eax, %eax
346 ; SSE4A-NEXT: movntiq %rax, (%rdi)
347 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
348 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
349 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
350 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
353 ; SSE41-LABEL: test_constant_v4i64_align1:
355 ; SSE41-NEXT: movq $-1, %rax
356 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
357 ; SSE41-NEXT: movq $-3, %rax
358 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
359 ; SSE41-NEXT: movq $-2, %rax
360 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
361 ; SSE41-NEXT: xorl %eax, %eax
362 ; SSE41-NEXT: movntiq %rax, (%rdi)
365 ; AVX-LABEL: test_constant_v4i64_align1:
367 ; AVX-NEXT: movq $-1, %rax
368 ; AVX-NEXT: movntiq %rax, 8(%rdi)
369 ; AVX-NEXT: movq $-3, %rax
370 ; AVX-NEXT: movntiq %rax, 24(%rdi)
371 ; AVX-NEXT: movq $-2, %rax
372 ; AVX-NEXT: movntiq %rax, 16(%rdi)
373 ; AVX-NEXT: xorl %eax, %eax
374 ; AVX-NEXT: movntiq %rax, (%rdi)
377 ; AVX512-LABEL: test_constant_v4i64_align1:
379 ; AVX512-NEXT: movq $-1, %rax
380 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
381 ; AVX512-NEXT: movq $-3, %rax
382 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
383 ; AVX512-NEXT: movq $-2, %rax
384 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
385 ; AVX512-NEXT: xorl %eax, %eax
386 ; AVX512-NEXT: movntiq %rax, (%rdi)
388 store <4 x i64> <i64 0, i64 -1, i64 -2, i64 -3>, ptr %dst, align 1, !nontemporal !1
392 define void @test_constant_v8i32_align1(ptr %dst) nounwind {
393 ; SSE2-LABEL: test_constant_v8i32_align1:
395 ; SSE2-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
396 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
397 ; SSE2-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
398 ; SSE2-NEXT: movntiq %rax, (%rdi)
399 ; SSE2-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
400 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
401 ; SSE2-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
402 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
405 ; SSE4A-LABEL: test_constant_v8i32_align1:
407 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
408 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
409 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
410 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
411 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
412 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
413 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
414 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
417 ; SSE41-LABEL: test_constant_v8i32_align1:
419 ; SSE41-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
420 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
421 ; SSE41-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
422 ; SSE41-NEXT: movntiq %rax, (%rdi)
423 ; SSE41-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
424 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
425 ; SSE41-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
426 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
429 ; AVX-LABEL: test_constant_v8i32_align1:
431 ; AVX-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
432 ; AVX-NEXT: movntiq %rax, 8(%rdi)
433 ; AVX-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
434 ; AVX-NEXT: movntiq %rax, (%rdi)
435 ; AVX-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
436 ; AVX-NEXT: movntiq %rax, 24(%rdi)
437 ; AVX-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
438 ; AVX-NEXT: movntiq %rax, 16(%rdi)
441 ; AVX512-LABEL: test_constant_v8i32_align1:
443 ; AVX512-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
444 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
445 ; AVX512-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
446 ; AVX512-NEXT: movntiq %rax, (%rdi)
447 ; AVX512-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
448 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
449 ; AVX512-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
450 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
452 store <8 x i32> <i32 0, i32 -1, i32 -2, i32 -3, i32 -4, i32 -5, i32 -6, i32 -7>, ptr %dst, align 1, !nontemporal !1
456 define void @test_constant_v16i16_align1(ptr %dst) nounwind {
457 ; SSE2-LABEL: test_constant_v16i16_align1:
459 ; SSE2-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
460 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
461 ; SSE2-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
462 ; SSE2-NEXT: movntiq %rax, (%rdi)
463 ; SSE2-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
464 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
465 ; SSE2-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
466 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
469 ; SSE4A-LABEL: test_constant_v16i16_align1:
471 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
472 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
473 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
474 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
475 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
476 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
477 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
478 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
481 ; SSE41-LABEL: test_constant_v16i16_align1:
483 ; SSE41-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
484 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
485 ; SSE41-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
486 ; SSE41-NEXT: movntiq %rax, (%rdi)
487 ; SSE41-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
488 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
489 ; SSE41-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
490 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
493 ; AVX-LABEL: test_constant_v16i16_align1:
495 ; AVX-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
496 ; AVX-NEXT: movntiq %rax, 8(%rdi)
497 ; AVX-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
498 ; AVX-NEXT: movntiq %rax, (%rdi)
499 ; AVX-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
500 ; AVX-NEXT: movntiq %rax, 24(%rdi)
501 ; AVX-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
502 ; AVX-NEXT: movntiq %rax, 16(%rdi)
505 ; AVX512-LABEL: test_constant_v16i16_align1:
507 ; AVX512-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
508 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
509 ; AVX512-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
510 ; AVX512-NEXT: movntiq %rax, (%rdi)
511 ; AVX512-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
512 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
513 ; AVX512-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
514 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
516 store <16 x i16> <i16 0, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15>, ptr %dst, align 1, !nontemporal !1
520 define void @test_constant_v32i8_align1(ptr %dst) nounwind {
521 ; SSE2-LABEL: test_constant_v32i8_align1:
523 ; SSE2-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
524 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
525 ; SSE2-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
526 ; SSE2-NEXT: movntiq %rax, (%rdi)
527 ; SSE2-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
528 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
529 ; SSE2-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
530 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
533 ; SSE4A-LABEL: test_constant_v32i8_align1:
535 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
536 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
537 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-3.826728214441238E+279,0.0E+0]
538 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
539 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
540 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
541 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-1.6485712323024388E+202,0.0E+0]
542 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
545 ; SSE41-LABEL: test_constant_v32i8_align1:
547 ; SSE41-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
548 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
549 ; SSE41-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
550 ; SSE41-NEXT: movntiq %rax, (%rdi)
551 ; SSE41-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
552 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
553 ; SSE41-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
554 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
557 ; AVX-LABEL: test_constant_v32i8_align1:
559 ; AVX-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
560 ; AVX-NEXT: movntiq %rax, 8(%rdi)
561 ; AVX-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
562 ; AVX-NEXT: movntiq %rax, (%rdi)
563 ; AVX-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
564 ; AVX-NEXT: movntiq %rax, 24(%rdi)
565 ; AVX-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
566 ; AVX-NEXT: movntiq %rax, 16(%rdi)
569 ; AVX512-LABEL: test_constant_v32i8_align1:
571 ; AVX512-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
572 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
573 ; AVX512-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
574 ; AVX512-NEXT: movntiq %rax, (%rdi)
575 ; AVX512-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
576 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
577 ; AVX512-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
578 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
580 store <32 x i8> <i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -17, i8 -18, i8 -19, i8 -20, i8 -21, i8 -22, i8 -23, i8 -24, i8 -25, i8 -26, i8 -27, i8 -28, i8 -29, i8 -30, i8 -31>, ptr %dst, align 1, !nontemporal !1
584 define void @test_constant_v4f64_align16(ptr %dst) nounwind {
585 ; SSE-LABEL: test_constant_v4f64_align16:
587 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-2.0E+0,-1.0E+0]
588 ; SSE-NEXT: movntps %xmm0, (%rdi)
589 ; SSE-NEXT: xorps %xmm0, %xmm0
590 ; SSE-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
591 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
594 ; AVX-LABEL: test_constant_v4f64_align16:
596 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-2.0E+0,-1.0E+0]
597 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
598 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
599 ; AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
600 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
603 ; AVX512-LABEL: test_constant_v4f64_align16:
605 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-2.0E+0,-1.0E+0]
606 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
607 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
608 ; AVX512-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
609 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
611 store <4 x double> <double -2.0, double -1.0, double 0.0, double 1.0>, ptr %dst, align 16, !nontemporal !1
615 define void @test_constant_v8f32_align16(ptr %dst) nounwind {
616 ; SSE-LABEL: test_constant_v8f32_align16:
618 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
619 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
620 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0]
621 ; SSE-NEXT: movntps %xmm0, (%rdi)
624 ; AVX-LABEL: test_constant_v8f32_align16:
626 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
627 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
628 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0]
629 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
632 ; AVX512-LABEL: test_constant_v8f32_align16:
634 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
635 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
636 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0]
637 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
639 store <8 x float> <float 0.0, float -0.0, float -1.0, float -2.0, float -3.0, float -4.0, float -5.0, float -6.0>, ptr %dst, align 16, !nontemporal !1
643 define void @test_constant_v4i64_align16(ptr %dst) nounwind {
644 ; SSE-LABEL: test_constant_v4i64_align16:
646 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551614,18446744073709551613]
647 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
648 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
649 ; SSE-NEXT: movntps %xmm0, (%rdi)
652 ; AVX-LABEL: test_constant_v4i64_align16:
654 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551614,18446744073709551613]
655 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
656 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
657 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
660 ; AVX512-LABEL: test_constant_v4i64_align16:
662 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551614,18446744073709551613]
663 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
664 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
665 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
667 store <4 x i64> <i64 0, i64 -1, i64 -2, i64 -3>, ptr %dst, align 16, !nontemporal !1
671 define void @test_constant_v8i32_align16(ptr %dst) nounwind {
672 ; SSE-LABEL: test_constant_v8i32_align16:
674 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967292,4294967291,4294967290,4294967289]
675 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
676 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,4294967294,4294967293]
677 ; SSE-NEXT: movntps %xmm0, (%rdi)
680 ; AVX-LABEL: test_constant_v8i32_align16:
682 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967292,4294967291,4294967290,4294967289]
683 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
684 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,4294967294,4294967293]
685 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
688 ; AVX512-LABEL: test_constant_v8i32_align16:
690 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [4294967292,4294967291,4294967290,4294967289]
691 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
692 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,4294967294,4294967293]
693 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
695 store <8 x i32> <i32 0, i32 -1, i32 -2, i32 -3, i32 -4, i32 -5, i32 -6, i32 -7>, ptr %dst, align 16, !nontemporal !1
699 define void @test_constant_v16i16_align16(ptr %dst) nounwind {
700 ; SSE-LABEL: test_constant_v16i16_align16:
702 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65528,65527,65526,65525,65524,65523,65522,65521]
703 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
704 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,65535,65534,65533,65532,65531,65530,65529]
705 ; SSE-NEXT: movntps %xmm0, (%rdi)
708 ; AVX-LABEL: test_constant_v16i16_align16:
710 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65528,65527,65526,65525,65524,65523,65522,65521]
711 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
712 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,65535,65534,65533,65532,65531,65530,65529]
713 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
716 ; AVX512-LABEL: test_constant_v16i16_align16:
718 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [65528,65527,65526,65525,65524,65523,65522,65521]
719 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
720 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,65535,65534,65533,65532,65531,65530,65529]
721 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
723 store <16 x i16> <i16 0, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15>, ptr %dst, align 16, !nontemporal !1
727 define void @test_constant_v32i8_align16(ptr %dst) nounwind {
728 ; SSE-LABEL: test_constant_v32i8_align16:
730 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
731 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
732 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
733 ; SSE-NEXT: movntps %xmm0, (%rdi)
736 ; AVX-LABEL: test_constant_v32i8_align16:
738 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
739 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
740 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
741 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
744 ; AVX512-LABEL: test_constant_v32i8_align16:
746 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
747 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
748 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
749 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
751 store <32 x i8> <i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -17, i8 -18, i8 -19, i8 -20, i8 -21, i8 -22, i8 -23, i8 -24, i8 -25, i8 -26, i8 -27, i8 -28, i8 -29, i8 -30, i8 -31>, ptr %dst, align 16, !nontemporal !1
757 define void @test_constant_v8f64_align1(ptr %dst) nounwind {
758 ; CHECK-LABEL: test_constant_v8f64_align1:
760 ; CHECK-NEXT: movabsq $-4616189618054758400, %rax # imm = 0xBFF0000000000000
761 ; CHECK-NEXT: movntiq %rax, 8(%rdi)
762 ; CHECK-NEXT: movabsq $-4611686018427387904, %rax # imm = 0xC000000000000000
763 ; CHECK-NEXT: movntiq %rax, (%rdi)
764 ; CHECK-NEXT: movabsq $4607182418800017408, %rax # imm = 0x3FF0000000000000
765 ; CHECK-NEXT: movntiq %rax, 24(%rdi)
766 ; CHECK-NEXT: movabsq $4613937818241073152, %rax # imm = 0x4008000000000000
767 ; CHECK-NEXT: movntiq %rax, 40(%rdi)
768 ; CHECK-NEXT: movabsq $4611686018427387904, %rax # imm = 0x4000000000000000
769 ; CHECK-NEXT: movntiq %rax, 32(%rdi)
770 ; CHECK-NEXT: movabsq $4617315517961601024, %rax # imm = 0x4014000000000000
771 ; CHECK-NEXT: movntiq %rax, 56(%rdi)
772 ; CHECK-NEXT: movabsq $4616189618054758400, %rax # imm = 0x4010000000000000
773 ; CHECK-NEXT: movntiq %rax, 48(%rdi)
774 ; CHECK-NEXT: xorl %eax, %eax
775 ; CHECK-NEXT: movntiq %rax, 16(%rdi)
777 store <8 x double> <double -2.0, double -1.0, double 0.0, double 1.0, double 2.0, double 3.0, double 4.0, double 5.0>, ptr %dst, align 1, !nontemporal !1
781 define void @test_constant_v16f32_align1(ptr %dst) nounwind {
782 ; SSE2-LABEL: test_constant_v16f32_align1:
784 ; SSE2-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
785 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
786 ; SSE2-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
787 ; SSE2-NEXT: movntiq %rax, (%rdi)
788 ; SSE2-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
789 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
790 ; SSE2-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
791 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
792 ; SSE2-NEXT: movabsq $-4530621221895667712, %rax # imm = 0xC1200000C1100000
793 ; SSE2-NEXT: movntiq %rax, 40(%rdi)
794 ; SSE2-NEXT: movabsq $-4539628421153554432, %rax # imm = 0xC1000000C0E00000
795 ; SSE2-NEXT: movntiq %rax, 32(%rdi)
796 ; SSE2-NEXT: movabsq $-4512606823381991424, %rax # imm = 0xC1600000C1500000
797 ; SSE2-NEXT: movntiq %rax, 56(%rdi)
798 ; SSE2-NEXT: movabsq $-4521614022638829568, %rax # imm = 0xC1400000C1300000
799 ; SSE2-NEXT: movntiq %rax, 48(%rdi)
802 ; SSE4A-LABEL: test_constant_v16f32_align1:
804 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
805 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
806 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
807 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
808 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
809 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
810 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-5.1200036668777466E+2,0.0E+0]
811 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
812 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
813 ; SSE4A-NEXT: movntsd %xmm0, 40(%rdi)
814 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-1.3107209417724609E+5,0.0E+0]
815 ; SSE4A-NEXT: movntsd %xmm0, 32(%rdi)
816 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
817 ; SSE4A-NEXT: movntsd %xmm0, 56(%rdi)
818 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-2.0971535092773438E+6,0.0E+0]
819 ; SSE4A-NEXT: movntsd %xmm0, 48(%rdi)
822 ; SSE41-LABEL: test_constant_v16f32_align1:
824 ; SSE41-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
825 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
826 ; SSE41-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
827 ; SSE41-NEXT: movntiq %rax, (%rdi)
828 ; SSE41-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
829 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
830 ; SSE41-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
831 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
832 ; SSE41-NEXT: movabsq $-4530621221895667712, %rax # imm = 0xC1200000C1100000
833 ; SSE41-NEXT: movntiq %rax, 40(%rdi)
834 ; SSE41-NEXT: movabsq $-4539628421153554432, %rax # imm = 0xC1000000C0E00000
835 ; SSE41-NEXT: movntiq %rax, 32(%rdi)
836 ; SSE41-NEXT: movabsq $-4512606823381991424, %rax # imm = 0xC1600000C1500000
837 ; SSE41-NEXT: movntiq %rax, 56(%rdi)
838 ; SSE41-NEXT: movabsq $-4521614022638829568, %rax # imm = 0xC1400000C1300000
839 ; SSE41-NEXT: movntiq %rax, 48(%rdi)
842 ; AVX-LABEL: test_constant_v16f32_align1:
844 ; AVX-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
845 ; AVX-NEXT: movntiq %rax, 8(%rdi)
846 ; AVX-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
847 ; AVX-NEXT: movntiq %rax, (%rdi)
848 ; AVX-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
849 ; AVX-NEXT: movntiq %rax, 24(%rdi)
850 ; AVX-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
851 ; AVX-NEXT: movntiq %rax, 16(%rdi)
852 ; AVX-NEXT: movabsq $-4530621221895667712, %rax # imm = 0xC1200000C1100000
853 ; AVX-NEXT: movntiq %rax, 40(%rdi)
854 ; AVX-NEXT: movabsq $-4539628421153554432, %rax # imm = 0xC1000000C0E00000
855 ; AVX-NEXT: movntiq %rax, 32(%rdi)
856 ; AVX-NEXT: movabsq $-4512606823381991424, %rax # imm = 0xC1600000C1500000
857 ; AVX-NEXT: movntiq %rax, 56(%rdi)
858 ; AVX-NEXT: movabsq $-4521614022638829568, %rax # imm = 0xC1400000C1300000
859 ; AVX-NEXT: movntiq %rax, 48(%rdi)
862 ; AVX512-LABEL: test_constant_v16f32_align1:
864 ; AVX512-NEXT: movabsq $-4611686015214551040, %rax # imm = 0xC0000000BF800000
865 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
866 ; AVX512-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
867 ; AVX512-NEXT: movntiq %rax, (%rdi)
868 ; AVX512-NEXT: movabsq $-4557642819667230720, %rax # imm = 0xC0C00000C0A00000
869 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
870 ; AVX512-NEXT: movabsq $-4575657218183004160, %rax # imm = 0xC0800000C0400000
871 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
872 ; AVX512-NEXT: movabsq $-4530621221895667712, %rax # imm = 0xC1200000C1100000
873 ; AVX512-NEXT: movntiq %rax, 40(%rdi)
874 ; AVX512-NEXT: movabsq $-4539628421153554432, %rax # imm = 0xC1000000C0E00000
875 ; AVX512-NEXT: movntiq %rax, 32(%rdi)
876 ; AVX512-NEXT: movabsq $-4512606823381991424, %rax # imm = 0xC1600000C1500000
877 ; AVX512-NEXT: movntiq %rax, 56(%rdi)
878 ; AVX512-NEXT: movabsq $-4521614022638829568, %rax # imm = 0xC1400000C1300000
879 ; AVX512-NEXT: movntiq %rax, 48(%rdi)
881 store <16 x float> <float 0.0, float -0.0, float -1.0, float -2.0, float -3.0, float -4.0, float -5.0, float -6.0, float -7.0, float -8.0, float -9.0, float -10.0, float -11.0, float -12.0, float -13.0, float -14.0>, ptr %dst, align 1, !nontemporal !1
885 define void @test_constant_v8i64_align1(ptr %dst) nounwind {
886 ; SSE2-LABEL: test_constant_v8i64_align1:
888 ; SSE2-NEXT: movq $-1, %rax
889 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
890 ; SSE2-NEXT: movq $-3, %rax
891 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
892 ; SSE2-NEXT: movq $-2, %rax
893 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
894 ; SSE2-NEXT: movq $-5, %rax
895 ; SSE2-NEXT: movntiq %rax, 40(%rdi)
896 ; SSE2-NEXT: movq $-4, %rax
897 ; SSE2-NEXT: movntiq %rax, 32(%rdi)
898 ; SSE2-NEXT: movq $-7, %rax
899 ; SSE2-NEXT: movntiq %rax, 56(%rdi)
900 ; SSE2-NEXT: movq $-6, %rax
901 ; SSE2-NEXT: movntiq %rax, 48(%rdi)
902 ; SSE2-NEXT: xorl %eax, %eax
903 ; SSE2-NEXT: movntiq %rax, (%rdi)
906 ; SSE4A-LABEL: test_constant_v8i64_align1:
908 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
909 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
910 ; SSE4A-NEXT: xorl %eax, %eax
911 ; SSE4A-NEXT: movntiq %rax, (%rdi)
912 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
913 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
914 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
915 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
916 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
917 ; SSE4A-NEXT: movntsd %xmm0, 40(%rdi)
918 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
919 ; SSE4A-NEXT: movntsd %xmm0, 32(%rdi)
920 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
921 ; SSE4A-NEXT: movntsd %xmm0, 56(%rdi)
922 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
923 ; SSE4A-NEXT: movntsd %xmm0, 48(%rdi)
926 ; SSE41-LABEL: test_constant_v8i64_align1:
928 ; SSE41-NEXT: movq $-1, %rax
929 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
930 ; SSE41-NEXT: movq $-3, %rax
931 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
932 ; SSE41-NEXT: movq $-2, %rax
933 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
934 ; SSE41-NEXT: movq $-5, %rax
935 ; SSE41-NEXT: movntiq %rax, 40(%rdi)
936 ; SSE41-NEXT: movq $-4, %rax
937 ; SSE41-NEXT: movntiq %rax, 32(%rdi)
938 ; SSE41-NEXT: movq $-7, %rax
939 ; SSE41-NEXT: movntiq %rax, 56(%rdi)
940 ; SSE41-NEXT: movq $-6, %rax
941 ; SSE41-NEXT: movntiq %rax, 48(%rdi)
942 ; SSE41-NEXT: xorl %eax, %eax
943 ; SSE41-NEXT: movntiq %rax, (%rdi)
946 ; AVX-LABEL: test_constant_v8i64_align1:
948 ; AVX-NEXT: movq $-1, %rax
949 ; AVX-NEXT: movntiq %rax, 8(%rdi)
950 ; AVX-NEXT: movq $-3, %rax
951 ; AVX-NEXT: movntiq %rax, 24(%rdi)
952 ; AVX-NEXT: movq $-2, %rax
953 ; AVX-NEXT: movntiq %rax, 16(%rdi)
954 ; AVX-NEXT: movq $-5, %rax
955 ; AVX-NEXT: movntiq %rax, 40(%rdi)
956 ; AVX-NEXT: movq $-4, %rax
957 ; AVX-NEXT: movntiq %rax, 32(%rdi)
958 ; AVX-NEXT: movq $-7, %rax
959 ; AVX-NEXT: movntiq %rax, 56(%rdi)
960 ; AVX-NEXT: movq $-6, %rax
961 ; AVX-NEXT: movntiq %rax, 48(%rdi)
962 ; AVX-NEXT: xorl %eax, %eax
963 ; AVX-NEXT: movntiq %rax, (%rdi)
966 ; AVX512-LABEL: test_constant_v8i64_align1:
968 ; AVX512-NEXT: movq $-1, %rax
969 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
970 ; AVX512-NEXT: movq $-3, %rax
971 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
972 ; AVX512-NEXT: movq $-2, %rax
973 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
974 ; AVX512-NEXT: movq $-5, %rax
975 ; AVX512-NEXT: movntiq %rax, 40(%rdi)
976 ; AVX512-NEXT: movq $-4, %rax
977 ; AVX512-NEXT: movntiq %rax, 32(%rdi)
978 ; AVX512-NEXT: movq $-7, %rax
979 ; AVX512-NEXT: movntiq %rax, 56(%rdi)
980 ; AVX512-NEXT: movq $-6, %rax
981 ; AVX512-NEXT: movntiq %rax, 48(%rdi)
982 ; AVX512-NEXT: xorl %eax, %eax
983 ; AVX512-NEXT: movntiq %rax, (%rdi)
985 store <8 x i64> <i64 0, i64 -1, i64 -2, i64 -3, i64 -4, i64 -5, i64 -6, i64 -7>, ptr %dst, align 1, !nontemporal !1
989 define void @test_constant_v16i32_align1(ptr %dst) nounwind {
990 ; SSE2-LABEL: test_constant_v16i32_align1:
992 ; SSE2-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
993 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
994 ; SSE2-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
995 ; SSE2-NEXT: movntiq %rax, (%rdi)
996 ; SSE2-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
997 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
998 ; SSE2-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
999 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
1000 ; SSE2-NEXT: movabsq $-42949672970, %rax # imm = 0xFFFFFFF5FFFFFFF6
1001 ; SSE2-NEXT: movntiq %rax, 40(%rdi)
1002 ; SSE2-NEXT: movabsq $-34359738376, %rax # imm = 0xFFFFFFF7FFFFFFF8
1003 ; SSE2-NEXT: movntiq %rax, 32(%rdi)
1004 ; SSE2-NEXT: movabsq $-60129542158, %rax # imm = 0xFFFFFFF1FFFFFFF2
1005 ; SSE2-NEXT: movntiq %rax, 56(%rdi)
1006 ; SSE2-NEXT: movabsq $-51539607564, %rax # imm = 0xFFFFFFF3FFFFFFF4
1007 ; SSE2-NEXT: movntiq %rax, 48(%rdi)
1010 ; SSE4A-LABEL: test_constant_v16i32_align1:
1012 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1013 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
1014 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
1015 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
1016 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1017 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
1018 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
1019 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
1020 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1021 ; SSE4A-NEXT: movntsd %xmm0, 40(%rdi)
1022 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
1023 ; SSE4A-NEXT: movntsd %xmm0, 32(%rdi)
1024 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1025 ; SSE4A-NEXT: movntsd %xmm0, 56(%rdi)
1026 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
1027 ; SSE4A-NEXT: movntsd %xmm0, 48(%rdi)
1030 ; SSE41-LABEL: test_constant_v16i32_align1:
1032 ; SSE41-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
1033 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
1034 ; SSE41-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
1035 ; SSE41-NEXT: movntiq %rax, (%rdi)
1036 ; SSE41-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
1037 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
1038 ; SSE41-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
1039 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
1040 ; SSE41-NEXT: movabsq $-42949672970, %rax # imm = 0xFFFFFFF5FFFFFFF6
1041 ; SSE41-NEXT: movntiq %rax, 40(%rdi)
1042 ; SSE41-NEXT: movabsq $-34359738376, %rax # imm = 0xFFFFFFF7FFFFFFF8
1043 ; SSE41-NEXT: movntiq %rax, 32(%rdi)
1044 ; SSE41-NEXT: movabsq $-60129542158, %rax # imm = 0xFFFFFFF1FFFFFFF2
1045 ; SSE41-NEXT: movntiq %rax, 56(%rdi)
1046 ; SSE41-NEXT: movabsq $-51539607564, %rax # imm = 0xFFFFFFF3FFFFFFF4
1047 ; SSE41-NEXT: movntiq %rax, 48(%rdi)
1050 ; AVX-LABEL: test_constant_v16i32_align1:
1052 ; AVX-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
1053 ; AVX-NEXT: movntiq %rax, 8(%rdi)
1054 ; AVX-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
1055 ; AVX-NEXT: movntiq %rax, (%rdi)
1056 ; AVX-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
1057 ; AVX-NEXT: movntiq %rax, 24(%rdi)
1058 ; AVX-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
1059 ; AVX-NEXT: movntiq %rax, 16(%rdi)
1060 ; AVX-NEXT: movabsq $-42949672970, %rax # imm = 0xFFFFFFF5FFFFFFF6
1061 ; AVX-NEXT: movntiq %rax, 40(%rdi)
1062 ; AVX-NEXT: movabsq $-34359738376, %rax # imm = 0xFFFFFFF7FFFFFFF8
1063 ; AVX-NEXT: movntiq %rax, 32(%rdi)
1064 ; AVX-NEXT: movabsq $-60129542158, %rax # imm = 0xFFFFFFF1FFFFFFF2
1065 ; AVX-NEXT: movntiq %rax, 56(%rdi)
1066 ; AVX-NEXT: movabsq $-51539607564, %rax # imm = 0xFFFFFFF3FFFFFFF4
1067 ; AVX-NEXT: movntiq %rax, 48(%rdi)
1070 ; AVX512-LABEL: test_constant_v16i32_align1:
1072 ; AVX512-NEXT: movabsq $-8589934594, %rax # imm = 0xFFFFFFFDFFFFFFFE
1073 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
1074 ; AVX512-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
1075 ; AVX512-NEXT: movntiq %rax, (%rdi)
1076 ; AVX512-NEXT: movabsq $-25769803782, %rax # imm = 0xFFFFFFF9FFFFFFFA
1077 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
1078 ; AVX512-NEXT: movabsq $-17179869188, %rax # imm = 0xFFFFFFFBFFFFFFFC
1079 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
1080 ; AVX512-NEXT: movabsq $-42949672970, %rax # imm = 0xFFFFFFF5FFFFFFF6
1081 ; AVX512-NEXT: movntiq %rax, 40(%rdi)
1082 ; AVX512-NEXT: movabsq $-34359738376, %rax # imm = 0xFFFFFFF7FFFFFFF8
1083 ; AVX512-NEXT: movntiq %rax, 32(%rdi)
1084 ; AVX512-NEXT: movabsq $-60129542158, %rax # imm = 0xFFFFFFF1FFFFFFF2
1085 ; AVX512-NEXT: movntiq %rax, 56(%rdi)
1086 ; AVX512-NEXT: movabsq $-51539607564, %rax # imm = 0xFFFFFFF3FFFFFFF4
1087 ; AVX512-NEXT: movntiq %rax, 48(%rdi)
1089 store <16 x i32> <i32 0, i32 -1, i32 -2, i32 -3, i32 -4, i32 -5, i32 -6, i32 -7, i32 -8, i32 -9, i32 -10, i32 -11, i32 -12, i32 -13, i32 -14, i32 -15>, ptr %dst, align 1, !nontemporal !1
1093 define void @test_constant_v32i16_align1(ptr %dst) nounwind {
1094 ; SSE2-LABEL: test_constant_v32i16_align1:
1096 ; SSE2-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
1097 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
1098 ; SSE2-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
1099 ; SSE2-NEXT: movntiq %rax, (%rdi)
1100 ; SSE2-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
1101 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
1102 ; SSE2-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
1103 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
1104 ; SSE2-NEXT: movabsq $-6192539683258388, %rax # imm = 0xFFE9FFEAFFEBFFEC
1105 ; SSE2-NEXT: movntiq %rax, 40(%rdi)
1106 ; SSE2-NEXT: movabsq $-5066622596284432, %rax # imm = 0xFFEDFFEEFFEFFFF0
1107 ; SSE2-NEXT: movntiq %rax, 32(%rdi)
1108 ; SSE2-NEXT: movabsq $-8444373857206300, %rax # imm = 0xFFE1FFE2FFE3FFE4
1109 ; SSE2-NEXT: movntiq %rax, 56(%rdi)
1110 ; SSE2-NEXT: movabsq $-7318456770232344, %rax # imm = 0xFFE5FFE6FFE7FFE8
1111 ; SSE2-NEXT: movntiq %rax, 48(%rdi)
1114 ; SSE4A-LABEL: test_constant_v32i16_align1:
1116 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1117 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
1118 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
1119 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
1120 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1121 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
1122 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
1123 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
1124 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1125 ; SSE4A-NEXT: movntsd %xmm0, 40(%rdi)
1126 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-1.6853227412070812E+308,0.0E+0]
1127 ; SSE4A-NEXT: movntsd %xmm0, 32(%rdi)
1128 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1129 ; SSE4A-NEXT: movntsd %xmm0, 56(%rdi)
1130 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-1.2358925997317751E+308,0.0E+0]
1131 ; SSE4A-NEXT: movntsd %xmm0, 48(%rdi)
1134 ; SSE41-LABEL: test_constant_v32i16_align1:
1136 ; SSE41-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
1137 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
1138 ; SSE41-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
1139 ; SSE41-NEXT: movntiq %rax, (%rdi)
1140 ; SSE41-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
1141 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
1142 ; SSE41-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
1143 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
1144 ; SSE41-NEXT: movabsq $-6192539683258388, %rax # imm = 0xFFE9FFEAFFEBFFEC
1145 ; SSE41-NEXT: movntiq %rax, 40(%rdi)
1146 ; SSE41-NEXT: movabsq $-5066622596284432, %rax # imm = 0xFFEDFFEEFFEFFFF0
1147 ; SSE41-NEXT: movntiq %rax, 32(%rdi)
1148 ; SSE41-NEXT: movabsq $-8444373857206300, %rax # imm = 0xFFE1FFE2FFE3FFE4
1149 ; SSE41-NEXT: movntiq %rax, 56(%rdi)
1150 ; SSE41-NEXT: movabsq $-7318456770232344, %rax # imm = 0xFFE5FFE6FFE7FFE8
1151 ; SSE41-NEXT: movntiq %rax, 48(%rdi)
1154 ; AVX-LABEL: test_constant_v32i16_align1:
1156 ; AVX-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
1157 ; AVX-NEXT: movntiq %rax, 8(%rdi)
1158 ; AVX-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
1159 ; AVX-NEXT: movntiq %rax, (%rdi)
1160 ; AVX-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
1161 ; AVX-NEXT: movntiq %rax, 24(%rdi)
1162 ; AVX-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
1163 ; AVX-NEXT: movntiq %rax, 16(%rdi)
1164 ; AVX-NEXT: movabsq $-6192539683258388, %rax # imm = 0xFFE9FFEAFFEBFFEC
1165 ; AVX-NEXT: movntiq %rax, 40(%rdi)
1166 ; AVX-NEXT: movabsq $-5066622596284432, %rax # imm = 0xFFEDFFEEFFEFFFF0
1167 ; AVX-NEXT: movntiq %rax, 32(%rdi)
1168 ; AVX-NEXT: movabsq $-8444373857206300, %rax # imm = 0xFFE1FFE2FFE3FFE4
1169 ; AVX-NEXT: movntiq %rax, 56(%rdi)
1170 ; AVX-NEXT: movabsq $-7318456770232344, %rax # imm = 0xFFE5FFE6FFE7FFE8
1171 ; AVX-NEXT: movntiq %rax, 48(%rdi)
1174 ; AVX512-LABEL: test_constant_v32i16_align1:
1176 ; AVX512-NEXT: movabsq $-1688871335362564, %rax # imm = 0xFFF9FFFAFFFBFFFC
1177 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
1178 ; AVX512-NEXT: movabsq $-562954248454144, %rax # imm = 0xFFFDFFFEFFFF0000
1179 ; AVX512-NEXT: movntiq %rax, (%rdi)
1180 ; AVX512-NEXT: movabsq $-3940705509310476, %rax # imm = 0xFFF1FFF2FFF3FFF4
1181 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
1182 ; AVX512-NEXT: movabsq $-2814788422336520, %rax # imm = 0xFFF5FFF6FFF7FFF8
1183 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
1184 ; AVX512-NEXT: movabsq $-6192539683258388, %rax # imm = 0xFFE9FFEAFFEBFFEC
1185 ; AVX512-NEXT: movntiq %rax, 40(%rdi)
1186 ; AVX512-NEXT: movabsq $-5066622596284432, %rax # imm = 0xFFEDFFEEFFEFFFF0
1187 ; AVX512-NEXT: movntiq %rax, 32(%rdi)
1188 ; AVX512-NEXT: movabsq $-8444373857206300, %rax # imm = 0xFFE1FFE2FFE3FFE4
1189 ; AVX512-NEXT: movntiq %rax, 56(%rdi)
1190 ; AVX512-NEXT: movabsq $-7318456770232344, %rax # imm = 0xFFE5FFE6FFE7FFE8
1191 ; AVX512-NEXT: movntiq %rax, 48(%rdi)
1193 store <32 x i16> <i16 0, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 -17, i16 -18, i16 -19, i16 -20, i16 -21, i16 -22, i16 -23, i16 -24, i16 -25, i16 -26, i16 -27, i16 -28, i16 -29, i16 -30, i16 -31>, ptr %dst, align 1, !nontemporal !1
1197 define void @test_constant_v64i8_align1(ptr %dst) nounwind {
1198 ; SSE2-LABEL: test_constant_v64i8_align1:
1200 ; SSE2-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
1201 ; SSE2-NEXT: movntiq %rax, 8(%rdi)
1202 ; SSE2-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
1203 ; SSE2-NEXT: movntiq %rax, (%rdi)
1204 ; SSE2-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
1205 ; SSE2-NEXT: movntiq %rax, 24(%rdi)
1206 ; SSE2-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
1207 ; SSE2-NEXT: movntiq %rax, 16(%rdi)
1208 ; SSE2-NEXT: movabsq $-3327364263599220776, %rax # imm = 0xD1D2D3D4D5D6D7D8
1209 ; SSE2-NEXT: movntiq %rax, 40(%rdi)
1210 ; SSE2-NEXT: movabsq $-2748642880894607392, %rax # imm = 0xD9DADBDCDDDEDFE0
1211 ; SSE2-NEXT: movntiq %rax, 32(%rdi)
1212 ; SSE2-NEXT: movabsq $-4484807029008447544, %rax # imm = 0xC1C2C3C4C5C6C7C8
1213 ; SSE2-NEXT: movntiq %rax, 56(%rdi)
1214 ; SSE2-NEXT: movabsq $-3906085646303834160, %rax # imm = 0xC9CACBCCCDCECFD0
1215 ; SSE2-NEXT: movntiq %rax, 48(%rdi)
1218 ; SSE4A-LABEL: test_constant_v64i8_align1:
1220 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1221 ; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
1222 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-3.826728214441238E+279,0.0E+0]
1223 ; SSE4A-NEXT: movntsd %xmm0, (%rdi)
1224 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1225 ; SSE4A-NEXT: movntsd %xmm0, 24(%rdi)
1226 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-1.6485712323024388E+202,0.0E+0]
1227 ; SSE4A-NEXT: movntsd %xmm0, 16(%rdi)
1228 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1229 ; SSE4A-NEXT: movntsd %xmm0, 40(%rdi)
1230 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-7.1020783099933495E+124,0.0E+0]
1231 ; SSE4A-NEXT: movntsd %xmm0, 32(%rdi)
1232 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1233 ; SSE4A-NEXT: movntsd %xmm0, 56(%rdi)
1234 ; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [-3.0595730451167367E+47,0.0E+0]
1235 ; SSE4A-NEXT: movntsd %xmm0, 48(%rdi)
1238 ; SSE41-LABEL: test_constant_v64i8_align1:
1240 ; SSE41-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
1241 ; SSE41-NEXT: movntiq %rax, 8(%rdi)
1242 ; SSE41-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
1243 ; SSE41-NEXT: movntiq %rax, (%rdi)
1244 ; SSE41-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
1245 ; SSE41-NEXT: movntiq %rax, 24(%rdi)
1246 ; SSE41-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
1247 ; SSE41-NEXT: movntiq %rax, 16(%rdi)
1248 ; SSE41-NEXT: movabsq $-3327364263599220776, %rax # imm = 0xD1D2D3D4D5D6D7D8
1249 ; SSE41-NEXT: movntiq %rax, 40(%rdi)
1250 ; SSE41-NEXT: movabsq $-2748642880894607392, %rax # imm = 0xD9DADBDCDDDEDFE0
1251 ; SSE41-NEXT: movntiq %rax, 32(%rdi)
1252 ; SSE41-NEXT: movabsq $-4484807029008447544, %rax # imm = 0xC1C2C3C4C5C6C7C8
1253 ; SSE41-NEXT: movntiq %rax, 56(%rdi)
1254 ; SSE41-NEXT: movabsq $-3906085646303834160, %rax # imm = 0xC9CACBCCCDCECFD0
1255 ; SSE41-NEXT: movntiq %rax, 48(%rdi)
1258 ; AVX-LABEL: test_constant_v64i8_align1:
1260 ; AVX-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
1261 ; AVX-NEXT: movntiq %rax, 8(%rdi)
1262 ; AVX-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
1263 ; AVX-NEXT: movntiq %rax, (%rdi)
1264 ; AVX-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
1265 ; AVX-NEXT: movntiq %rax, 24(%rdi)
1266 ; AVX-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
1267 ; AVX-NEXT: movntiq %rax, 16(%rdi)
1268 ; AVX-NEXT: movabsq $-3327364263599220776, %rax # imm = 0xD1D2D3D4D5D6D7D8
1269 ; AVX-NEXT: movntiq %rax, 40(%rdi)
1270 ; AVX-NEXT: movabsq $-2748642880894607392, %rax # imm = 0xD9DADBDCDDDEDFE0
1271 ; AVX-NEXT: movntiq %rax, 32(%rdi)
1272 ; AVX-NEXT: movabsq $-4484807029008447544, %rax # imm = 0xC1C2C3C4C5C6C7C8
1273 ; AVX-NEXT: movntiq %rax, 56(%rdi)
1274 ; AVX-NEXT: movabsq $-3906085646303834160, %rax # imm = 0xC9CACBCCCDCECFD0
1275 ; AVX-NEXT: movntiq %rax, 48(%rdi)
1278 ; AVX512-LABEL: test_constant_v64i8_align1:
1280 ; AVX512-NEXT: movabsq $-1012478732780767240, %rax # imm = 0xF1F2F3F4F5F6F7F8
1281 ; AVX512-NEXT: movntiq %rax, 8(%rdi)
1282 ; AVX512-NEXT: movabsq $-433757350076154112, %rax # imm = 0xF9FAFBFCFDFEFF00
1283 ; AVX512-NEXT: movntiq %rax, (%rdi)
1284 ; AVX512-NEXT: movabsq $-2169921498189994008, %rax # imm = 0xE1E2E3E4E5E6E7E8
1285 ; AVX512-NEXT: movntiq %rax, 24(%rdi)
1286 ; AVX512-NEXT: movabsq $-1591200115485380624, %rax # imm = 0xE9EAEBECEDEEEFF0
1287 ; AVX512-NEXT: movntiq %rax, 16(%rdi)
1288 ; AVX512-NEXT: movabsq $-3327364263599220776, %rax # imm = 0xD1D2D3D4D5D6D7D8
1289 ; AVX512-NEXT: movntiq %rax, 40(%rdi)
1290 ; AVX512-NEXT: movabsq $-2748642880894607392, %rax # imm = 0xD9DADBDCDDDEDFE0
1291 ; AVX512-NEXT: movntiq %rax, 32(%rdi)
1292 ; AVX512-NEXT: movabsq $-4484807029008447544, %rax # imm = 0xC1C2C3C4C5C6C7C8
1293 ; AVX512-NEXT: movntiq %rax, 56(%rdi)
1294 ; AVX512-NEXT: movabsq $-3906085646303834160, %rax # imm = 0xC9CACBCCCDCECFD0
1295 ; AVX512-NEXT: movntiq %rax, 48(%rdi)
1297 store <64 x i8> <i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -17, i8 -18, i8 -19, i8 -20, i8 -21, i8 -22, i8 -23, i8 -24, i8 -25, i8 -26, i8 -27, i8 -28, i8 -29, i8 -30, i8 -31, i8 -32, i8 -33, i8 -34, i8 -35, i8 -36, i8 -37, i8 -38, i8 -39, i8 -40, i8 -41, i8 -42, i8 -43, i8 -44, i8 -45, i8 -46, i8 -47, i8 -48, i8 -49, i8 -50, i8 -51, i8 -52, i8 -53, i8 -54, i8 -55, i8 -56, i8 -57, i8 -58, i8 -59, i8 -60, i8 -61, i8 -62, i8 -63>, ptr %dst, align 1, !nontemporal !1
1301 define void @test_constant_v8f64_align16(ptr %dst) nounwind {
1302 ; SSE-LABEL: test_constant_v8f64_align16:
1304 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-2.0E+0,-1.0E+0]
1305 ; SSE-NEXT: movntps %xmm0, (%rdi)
1306 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4.0E+0,5.0E+0]
1307 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1308 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
1309 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1310 ; SSE-NEXT: xorps %xmm0, %xmm0
1311 ; SSE-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1312 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1315 ; AVX-LABEL: test_constant_v8f64_align16:
1317 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-2.0E+0,-1.0E+0]
1318 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
1319 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4.0E+0,5.0E+0]
1320 ; AVX-NEXT: vmovntps %xmm0, 48(%rdi)
1321 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
1322 ; AVX-NEXT: vmovntps %xmm0, 32(%rdi)
1323 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
1324 ; AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1325 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
1328 ; AVX512-LABEL: test_constant_v8f64_align16:
1330 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-2.0E+0,-1.0E+0]
1331 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
1332 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [4.0E+0,5.0E+0]
1333 ; AVX512-NEXT: vmovntps %xmm0, 48(%rdi)
1334 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
1335 ; AVX512-NEXT: vmovntps %xmm0, 32(%rdi)
1336 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
1337 ; AVX512-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1338 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
1340 store <8 x double> <double -2.0, double -1.0, double 0.0, double 1.0, double 2.0, double 3.0, double 4.0, double 5.0>, ptr %dst, align 16, !nontemporal !1
1344 define void @test_constant_v16f32_align16(ptr %dst) nounwind {
1345 ; SSE-LABEL: test_constant_v16f32_align16:
1347 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
1348 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1349 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0]
1350 ; SSE-NEXT: movntps %xmm0, (%rdi)
1351 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-1.1E+1,-1.2E+1,-1.3E+1,-1.4E+1]
1352 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1353 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-7.0E+0,-8.0E+0,-9.0E+0,-1.0E+1]
1354 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1357 ; AVX-LABEL: test_constant_v16f32_align16:
1359 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
1360 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
1361 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0]
1362 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
1363 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-1.1E+1,-1.2E+1,-1.3E+1,-1.4E+1]
1364 ; AVX-NEXT: vmovntps %xmm0, 48(%rdi)
1365 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-7.0E+0,-8.0E+0,-9.0E+0,-1.0E+1]
1366 ; AVX-NEXT: vmovntps %xmm0, 32(%rdi)
1369 ; AVX512-LABEL: test_constant_v16f32_align16:
1371 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
1372 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
1373 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0]
1374 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
1375 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-1.1E+1,-1.2E+1,-1.3E+1,-1.4E+1]
1376 ; AVX512-NEXT: vmovntps %xmm0, 48(%rdi)
1377 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-7.0E+0,-8.0E+0,-9.0E+0,-1.0E+1]
1378 ; AVX512-NEXT: vmovntps %xmm0, 32(%rdi)
1380 store <16 x float> <float 0.0, float -0.0, float -1.0, float -2.0, float -3.0, float -4.0, float -5.0, float -6.0, float -7.0, float -8.0, float -9.0, float -10.0, float -11.0, float -12.0, float -13.0, float -14.0>, ptr %dst, align 16, !nontemporal !1
1384 define void @test_constant_v8i64_align16(ptr %dst) nounwind {
1385 ; SSE-LABEL: test_constant_v8i64_align16:
1387 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551614,18446744073709551613]
1388 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1389 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
1390 ; SSE-NEXT: movntps %xmm0, (%rdi)
1391 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551610,18446744073709551609]
1392 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1393 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551612,18446744073709551611]
1394 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1397 ; AVX-LABEL: test_constant_v8i64_align16:
1399 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551614,18446744073709551613]
1400 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
1401 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
1402 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
1403 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551610,18446744073709551609]
1404 ; AVX-NEXT: vmovntps %xmm0, 48(%rdi)
1405 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551612,18446744073709551611]
1406 ; AVX-NEXT: vmovntps %xmm0, 32(%rdi)
1409 ; AVX512-LABEL: test_constant_v8i64_align16:
1411 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551614,18446744073709551613]
1412 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
1413 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
1414 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
1415 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551610,18446744073709551609]
1416 ; AVX512-NEXT: vmovntps %xmm0, 48(%rdi)
1417 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551612,18446744073709551611]
1418 ; AVX512-NEXT: vmovntps %xmm0, 32(%rdi)
1420 store <8 x i64> <i64 0, i64 -1, i64 -2, i64 -3, i64 -4, i64 -5, i64 -6, i64 -7>, ptr %dst, align 16, !nontemporal !1
1424 define void @test_constant_v16i32_align16(ptr %dst) nounwind {
1425 ; SSE-LABEL: test_constant_v16i32_align16:
1427 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967292,4294967291,4294967290,4294967289]
1428 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1429 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,4294967294,4294967293]
1430 ; SSE-NEXT: movntps %xmm0, (%rdi)
1431 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967284,4294967283,4294967282,4294967281]
1432 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1433 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967288,4294967287,4294967286,4294967285]
1434 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1437 ; AVX-LABEL: test_constant_v16i32_align16:
1439 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967292,4294967291,4294967290,4294967289]
1440 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
1441 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,4294967294,4294967293]
1442 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
1443 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967284,4294967283,4294967282,4294967281]
1444 ; AVX-NEXT: vmovntps %xmm0, 48(%rdi)
1445 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967288,4294967287,4294967286,4294967285]
1446 ; AVX-NEXT: vmovntps %xmm0, 32(%rdi)
1449 ; AVX512-LABEL: test_constant_v16i32_align16:
1451 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [4294967292,4294967291,4294967290,4294967289]
1452 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
1453 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,4294967294,4294967293]
1454 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
1455 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [4294967284,4294967283,4294967282,4294967281]
1456 ; AVX512-NEXT: vmovntps %xmm0, 48(%rdi)
1457 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [4294967288,4294967287,4294967286,4294967285]
1458 ; AVX512-NEXT: vmovntps %xmm0, 32(%rdi)
1460 store <16 x i32> <i32 0, i32 -1, i32 -2, i32 -3, i32 -4, i32 -5, i32 -6, i32 -7, i32 -8, i32 -9, i32 -10, i32 -11, i32 -12, i32 -13, i32 -14, i32 -15>, ptr %dst, align 16, !nontemporal !1
1464 define void @test_constant_v32i16_align16(ptr %dst) nounwind {
1465 ; SSE-LABEL: test_constant_v32i16_align16:
1467 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65528,65527,65526,65525,65524,65523,65522,65521]
1468 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1469 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,65535,65534,65533,65532,65531,65530,65529]
1470 ; SSE-NEXT: movntps %xmm0, (%rdi)
1471 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65512,65511,65510,65509,65508,65507,65506,65505]
1472 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1473 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65520,65519,65518,65517,65516,65515,65514,65513]
1474 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1477 ; AVX-LABEL: test_constant_v32i16_align16:
1479 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65528,65527,65526,65525,65524,65523,65522,65521]
1480 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
1481 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,65535,65534,65533,65532,65531,65530,65529]
1482 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
1483 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65512,65511,65510,65509,65508,65507,65506,65505]
1484 ; AVX-NEXT: vmovntps %xmm0, 48(%rdi)
1485 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65520,65519,65518,65517,65516,65515,65514,65513]
1486 ; AVX-NEXT: vmovntps %xmm0, 32(%rdi)
1489 ; AVX512-LABEL: test_constant_v32i16_align16:
1491 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [65528,65527,65526,65525,65524,65523,65522,65521]
1492 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
1493 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,65535,65534,65533,65532,65531,65530,65529]
1494 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
1495 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [65512,65511,65510,65509,65508,65507,65506,65505]
1496 ; AVX512-NEXT: vmovntps %xmm0, 48(%rdi)
1497 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [65520,65519,65518,65517,65516,65515,65514,65513]
1498 ; AVX512-NEXT: vmovntps %xmm0, 32(%rdi)
1500 store <32 x i16> <i16 0, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 -17, i16 -18, i16 -19, i16 -20, i16 -21, i16 -22, i16 -23, i16 -24, i16 -25, i16 -26, i16 -27, i16 -28, i16 -29, i16 -30, i16 -31>, ptr %dst, align 16, !nontemporal !1
1504 define void @test_constant_v64i8_align16(ptr %dst) nounwind {
1505 ; SSE-LABEL: test_constant_v64i8_align16:
1507 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
1508 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1509 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
1510 ; SSE-NEXT: movntps %xmm0, (%rdi)
1511 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193]
1512 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1513 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [224,223,222,221,220,219,218,217,216,215,214,213,212,211,210,209]
1514 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1517 ; AVX-LABEL: test_constant_v64i8_align16:
1519 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
1520 ; AVX-NEXT: vmovntps %xmm0, 16(%rdi)
1521 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
1522 ; AVX-NEXT: vmovntps %xmm0, (%rdi)
1523 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193]
1524 ; AVX-NEXT: vmovntps %xmm0, 48(%rdi)
1525 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [224,223,222,221,220,219,218,217,216,215,214,213,212,211,210,209]
1526 ; AVX-NEXT: vmovntps %xmm0, 32(%rdi)
1529 ; AVX512-LABEL: test_constant_v64i8_align16:
1531 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
1532 ; AVX512-NEXT: vmovntps %xmm0, 16(%rdi)
1533 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
1534 ; AVX512-NEXT: vmovntps %xmm0, (%rdi)
1535 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193]
1536 ; AVX512-NEXT: vmovntps %xmm0, 48(%rdi)
1537 ; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [224,223,222,221,220,219,218,217,216,215,214,213,212,211,210,209]
1538 ; AVX512-NEXT: vmovntps %xmm0, 32(%rdi)
1540 store <64 x i8> <i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -17, i8 -18, i8 -19, i8 -20, i8 -21, i8 -22, i8 -23, i8 -24, i8 -25, i8 -26, i8 -27, i8 -28, i8 -29, i8 -30, i8 -31, i8 -32, i8 -33, i8 -34, i8 -35, i8 -36, i8 -37, i8 -38, i8 -39, i8 -40, i8 -41, i8 -42, i8 -43, i8 -44, i8 -45, i8 -46, i8 -47, i8 -48, i8 -49, i8 -50, i8 -51, i8 -52, i8 -53, i8 -54, i8 -55, i8 -56, i8 -57, i8 -58, i8 -59, i8 -60, i8 -61, i8 -62, i8 -63>, ptr %dst, align 16, !nontemporal !1
1544 define void @test_constant_v8f64_align32(ptr %dst) nounwind {
1545 ; SSE-LABEL: test_constant_v8f64_align32:
1547 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4.0E+0,5.0E+0]
1548 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1549 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
1550 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1551 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-2.0E+0,-1.0E+0]
1552 ; SSE-NEXT: movntps %xmm0, (%rdi)
1553 ; SSE-NEXT: xorps %xmm0, %xmm0
1554 ; SSE-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1555 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1558 ; AVX-LABEL: test_constant_v8f64_align32:
1560 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [2.0E+0,3.0E+0,4.0E+0,5.0E+0]
1561 ; AVX-NEXT: vmovntps %ymm0, 32(%rdi)
1562 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [-2.0E+0,-1.0E+0,0.0E+0,1.0E+0]
1563 ; AVX-NEXT: vmovntps %ymm0, (%rdi)
1564 ; AVX-NEXT: vzeroupper
1567 ; AVX512-LABEL: test_constant_v8f64_align32:
1569 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [2.0E+0,3.0E+0,4.0E+0,5.0E+0]
1570 ; AVX512-NEXT: vmovntps %ymm0, 32(%rdi)
1571 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [-2.0E+0,-1.0E+0,0.0E+0,1.0E+0]
1572 ; AVX512-NEXT: vmovntps %ymm0, (%rdi)
1573 ; AVX512-NEXT: vzeroupper
1575 store <8 x double> <double -2.0, double -1.0, double 0.0, double 1.0, double 2.0, double 3.0, double 4.0, double 5.0>, ptr %dst, align 32, !nontemporal !1
1579 define void @test_constant_v16f32_align32(ptr %dst) nounwind {
1580 ; SSE-LABEL: test_constant_v16f32_align32:
1582 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-1.1E+1,-1.2E+1,-1.3E+1,-1.4E+1]
1583 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1584 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-7.0E+0,-8.0E+0,-9.0E+0,-1.0E+1]
1585 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1586 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
1587 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1588 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0]
1589 ; SSE-NEXT: movntps %xmm0, (%rdi)
1592 ; AVX-LABEL: test_constant_v16f32_align32:
1594 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [-7.0E+0,-8.0E+0,-9.0E+0,-1.0E+1,-1.1E+1,-1.2E+1,-1.3E+1,-1.4E+1]
1595 ; AVX-NEXT: vmovntps %ymm0, 32(%rdi)
1596 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0,-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
1597 ; AVX-NEXT: vmovntps %ymm0, (%rdi)
1598 ; AVX-NEXT: vzeroupper
1601 ; AVX512-LABEL: test_constant_v16f32_align32:
1603 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [-7.0E+0,-8.0E+0,-9.0E+0,-1.0E+1,-1.1E+1,-1.2E+1,-1.3E+1,-1.4E+1]
1604 ; AVX512-NEXT: vmovntps %ymm0, 32(%rdi)
1605 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [0.0E+0,-0.0E+0,-1.0E+0,-2.0E+0,-3.0E+0,-4.0E+0,-5.0E+0,-6.0E+0]
1606 ; AVX512-NEXT: vmovntps %ymm0, (%rdi)
1607 ; AVX512-NEXT: vzeroupper
1609 store <16 x float> <float 0.0, float -0.0, float -1.0, float -2.0, float -3.0, float -4.0, float -5.0, float -6.0, float -7.0, float -8.0, float -9.0, float -10.0, float -11.0, float -12.0, float -13.0, float -14.0>, ptr %dst, align 32, !nontemporal !1
1613 define void @test_constant_v8i64_align32(ptr %dst) nounwind {
1614 ; SSE-LABEL: test_constant_v8i64_align32:
1616 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551610,18446744073709551609]
1617 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1618 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551612,18446744073709551611]
1619 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1620 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551614,18446744073709551613]
1621 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1622 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
1623 ; SSE-NEXT: movntps %xmm0, (%rdi)
1626 ; AVX-LABEL: test_constant_v8i64_align32:
1628 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551612,18446744073709551611,18446744073709551610,18446744073709551609]
1629 ; AVX-NEXT: vmovntps %ymm0, 32(%rdi)
1630 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551614,18446744073709551613]
1631 ; AVX-NEXT: vmovntps %ymm0, (%rdi)
1632 ; AVX-NEXT: vzeroupper
1635 ; AVX512-LABEL: test_constant_v8i64_align32:
1637 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551612,18446744073709551611,18446744073709551610,18446744073709551609]
1638 ; AVX512-NEXT: vmovntps %ymm0, 32(%rdi)
1639 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551614,18446744073709551613]
1640 ; AVX512-NEXT: vmovntps %ymm0, (%rdi)
1641 ; AVX512-NEXT: vzeroupper
1643 store <8 x i64> <i64 0, i64 -1, i64 -2, i64 -3, i64 -4, i64 -5, i64 -6, i64 -7>, ptr %dst, align 32, !nontemporal !1
1647 define void @test_constant_v16i32_align32(ptr %dst) nounwind {
1648 ; SSE-LABEL: test_constant_v16i32_align32:
1650 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967284,4294967283,4294967282,4294967281]
1651 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1652 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967288,4294967287,4294967286,4294967285]
1653 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1654 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967292,4294967291,4294967290,4294967289]
1655 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1656 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,4294967294,4294967293]
1657 ; SSE-NEXT: movntps %xmm0, (%rdi)
1660 ; AVX-LABEL: test_constant_v16i32_align32:
1662 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967288,4294967287,4294967286,4294967285,4294967284,4294967283,4294967282,4294967281]
1663 ; AVX-NEXT: vmovntps %ymm0, 32(%rdi)
1664 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,4294967294,4294967293,4294967292,4294967291,4294967290,4294967289]
1665 ; AVX-NEXT: vmovntps %ymm0, (%rdi)
1666 ; AVX-NEXT: vzeroupper
1669 ; AVX512-LABEL: test_constant_v16i32_align32:
1671 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [4294967288,4294967287,4294967286,4294967285,4294967284,4294967283,4294967282,4294967281]
1672 ; AVX512-NEXT: vmovntps %ymm0, 32(%rdi)
1673 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,4294967294,4294967293,4294967292,4294967291,4294967290,4294967289]
1674 ; AVX512-NEXT: vmovntps %ymm0, (%rdi)
1675 ; AVX512-NEXT: vzeroupper
1677 store <16 x i32> <i32 0, i32 -1, i32 -2, i32 -3, i32 -4, i32 -5, i32 -6, i32 -7, i32 -8, i32 -9, i32 -10, i32 -11, i32 -12, i32 -13, i32 -14, i32 -15>, ptr %dst, align 32, !nontemporal !1
1681 define void @test_constant_v32i16_align32(ptr %dst) nounwind {
1682 ; SSE-LABEL: test_constant_v32i16_align32:
1684 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65512,65511,65510,65509,65508,65507,65506,65505]
1685 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1686 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65520,65519,65518,65517,65516,65515,65514,65513]
1687 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1688 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65528,65527,65526,65525,65524,65523,65522,65521]
1689 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1690 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,65535,65534,65533,65532,65531,65530,65529]
1691 ; SSE-NEXT: movntps %xmm0, (%rdi)
1694 ; AVX-LABEL: test_constant_v32i16_align32:
1696 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65520,65519,65518,65517,65516,65515,65514,65513,65512,65511,65510,65509,65508,65507,65506,65505]
1697 ; AVX-NEXT: vmovntps %ymm0, 32(%rdi)
1698 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,65534,65533,65532,65531,65530,65529,65528,65527,65526,65525,65524,65523,65522,65521]
1699 ; AVX-NEXT: vmovntps %ymm0, (%rdi)
1700 ; AVX-NEXT: vzeroupper
1703 ; AVX512-LABEL: test_constant_v32i16_align32:
1705 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [65520,65519,65518,65517,65516,65515,65514,65513,65512,65511,65510,65509,65508,65507,65506,65505]
1706 ; AVX512-NEXT: vmovntps %ymm0, 32(%rdi)
1707 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,65534,65533,65532,65531,65530,65529,65528,65527,65526,65525,65524,65523,65522,65521]
1708 ; AVX512-NEXT: vmovntps %ymm0, (%rdi)
1709 ; AVX512-NEXT: vzeroupper
1711 store <32 x i16> <i16 0, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 -17, i16 -18, i16 -19, i16 -20, i16 -21, i16 -22, i16 -23, i16 -24, i16 -25, i16 -26, i16 -27, i16 -28, i16 -29, i16 -30, i16 -31>, ptr %dst, align 32, !nontemporal !1
1715 define void @test_constant_v64i8_align32(ptr %dst) nounwind {
1716 ; SSE-LABEL: test_constant_v64i8_align32:
1718 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193]
1719 ; SSE-NEXT: movntps %xmm0, 48(%rdi)
1720 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [224,223,222,221,220,219,218,217,216,215,214,213,212,211,210,209]
1721 ; SSE-NEXT: movntps %xmm0, 32(%rdi)
1722 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
1723 ; SSE-NEXT: movntps %xmm0, 16(%rdi)
1724 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
1725 ; SSE-NEXT: movntps %xmm0, (%rdi)
1728 ; AVX-LABEL: test_constant_v64i8_align32:
1730 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [224,223,222,221,220,219,218,217,216,215,214,213,212,211,210,209,208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193]
1731 ; AVX-NEXT: vmovntps %ymm0, 32(%rdi)
1732 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241,240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
1733 ; AVX-NEXT: vmovntps %ymm0, (%rdi)
1734 ; AVX-NEXT: vzeroupper
1737 ; AVX512-LABEL: test_constant_v64i8_align32:
1739 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [224,223,222,221,220,219,218,217,216,215,214,213,212,211,210,209,208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193]
1740 ; AVX512-NEXT: vmovntps %ymm0, 32(%rdi)
1741 ; AVX512-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241,240,239,238,237,236,235,234,233,232,231,230,229,228,227,226,225]
1742 ; AVX512-NEXT: vmovntps %ymm0, (%rdi)
1743 ; AVX512-NEXT: vzeroupper
1745 store <64 x i8> <i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -17, i8 -18, i8 -19, i8 -20, i8 -21, i8 -22, i8 -23, i8 -24, i8 -25, i8 -26, i8 -27, i8 -28, i8 -29, i8 -30, i8 -31, i8 -32, i8 -33, i8 -34, i8 -35, i8 -36, i8 -37, i8 -38, i8 -39, i8 -40, i8 -41, i8 -42, i8 -43, i8 -44, i8 -45, i8 -46, i8 -47, i8 -48, i8 -49, i8 -50, i8 -51, i8 -52, i8 -53, i8 -54, i8 -55, i8 -56, i8 -57, i8 -58, i8 -59, i8 -60, i8 -61, i8 -62, i8 -63>, ptr %dst, align 32, !nontemporal !1