1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-slow-unaligned-mem-16 | FileCheck %s --check-prefix=FAST
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+slow-unaligned-mem-16 | FileCheck %s --check-prefix=SLOW
5 ; Verify that the DAGCombiner is creating unaligned 16-byte loads and stores
6 ; if and only if those are fast.
8 define void @merge_const_vec_store(i64* %ptr) {
9 ; FAST-LABEL: merge_const_vec_store:
11 ; FAST-NEXT: xorps %xmm0, %xmm0
12 ; FAST-NEXT: movups %xmm0, (%rdi)
15 ; SLOW-LABEL: merge_const_vec_store:
17 ; SLOW-NEXT: movq $0, (%rdi)
18 ; SLOW-NEXT: movq $0, 8(%rdi)
21 %idx0 = getelementptr i64, i64* %ptr, i64 0
22 %idx1 = getelementptr i64, i64* %ptr, i64 1
24 store i64 0, i64* %idx0, align 8
25 store i64 0, i64* %idx1, align 8
30 define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
31 ; FAST-LABEL: merge_vec_element_store:
33 ; FAST-NEXT: movups %xmm0, (%rdi)
36 ; SLOW-LABEL: merge_vec_element_store:
38 ; SLOW-NEXT: movlps %xmm0, (%rdi)
39 ; SLOW-NEXT: movhps %xmm0, 8(%rdi)
42 %vecext0 = extractelement <4 x double> %v, i32 0
43 %vecext1 = extractelement <4 x double> %v, i32 1
45 %idx0 = getelementptr double, double* %ptr, i64 0
46 %idx1 = getelementptr double, double* %ptr, i64 1
48 store double %vecext0, double* %idx0, align 8
49 store double %vecext1, double* %idx1, align 8
54 define void @merge_vec_load_and_stores(i64 *%ptr) {
55 ; FAST-LABEL: merge_vec_load_and_stores:
57 ; FAST-NEXT: movups (%rdi), %xmm0
58 ; FAST-NEXT: movups %xmm0, 40(%rdi)
61 ; SLOW-LABEL: merge_vec_load_and_stores:
63 ; SLOW-NEXT: movq (%rdi), %rax
64 ; SLOW-NEXT: movq 8(%rdi), %rcx
65 ; SLOW-NEXT: movq %rax, 40(%rdi)
66 ; SLOW-NEXT: movq %rcx, 48(%rdi)
69 %idx0 = getelementptr i64, i64* %ptr, i64 0
70 %idx1 = getelementptr i64, i64* %ptr, i64 1
72 %ld0 = load i64, i64* %idx0, align 4
73 %ld1 = load i64, i64* %idx1, align 4
75 %idx4 = getelementptr i64, i64* %ptr, i64 5
76 %idx5 = getelementptr i64, i64* %ptr, i64 6
78 store i64 %ld0, i64* %idx4, align 4
79 store i64 %ld1, i64* %idx5, align 4