1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mattr=sse2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=SSE
3 ; RUN: opt < %s -mattr=avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX
6 ; With AVX, we are able to vectorize the 1st 4 elements as 256-bit vector ops,
7 ; but the final 2 elements remain scalar. They should get vectorized using
8 ; 128-bit ops identically to what happens with SSE.
10 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
11 target triple = "x86_64-unknown-linux-gnu"
13 define void @PR28457(ptr noalias nocapture align 32 %q, ptr noalias nocapture readonly align 32 %p) {
14 ; SSE-LABEL: @PR28457(
15 ; SSE-NEXT: [[P2:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 2
16 ; SSE-NEXT: [[P4:%.*]] = getelementptr inbounds double, ptr [[P]], i64 4
17 ; SSE-NEXT: [[Q2:%.*]] = getelementptr inbounds double, ptr [[Q:%.*]], i64 2
18 ; SSE-NEXT: [[Q4:%.*]] = getelementptr inbounds double, ptr [[Q]], i64 4
19 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[P]], align 8
20 ; SSE-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], splat (double 1.000000e+00)
21 ; SSE-NEXT: store <2 x double> [[TMP3]], ptr [[Q]], align 8
22 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr [[P2]], align 8
23 ; SSE-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], splat (double 1.000000e+00)
24 ; SSE-NEXT: store <2 x double> [[TMP7]], ptr [[Q2]], align 8
25 ; SSE-NEXT: [[TMP10:%.*]] = load <2 x double>, ptr [[P4]], align 8
26 ; SSE-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[TMP10]], splat (double 1.000000e+00)
27 ; SSE-NEXT: store <2 x double> [[TMP11]], ptr [[Q4]], align 8
30 ; AVX-LABEL: @PR28457(
31 ; AVX-NEXT: [[P4:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 4
32 ; AVX-NEXT: [[Q4:%.*]] = getelementptr inbounds double, ptr [[Q:%.*]], i64 4
33 ; AVX-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr [[P]], align 8
34 ; AVX-NEXT: [[TMP3:%.*]] = fadd <4 x double> [[TMP2]], splat (double 1.000000e+00)
35 ; AVX-NEXT: store <4 x double> [[TMP3]], ptr [[Q]], align 8
36 ; AVX-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr [[P4]], align 8
37 ; AVX-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], splat (double 1.000000e+00)
38 ; AVX-NEXT: store <2 x double> [[TMP7]], ptr [[Q4]], align 8
41 %p1 = getelementptr inbounds double, ptr %p, i64 1
42 %p2 = getelementptr inbounds double, ptr %p, i64 2
43 %p3 = getelementptr inbounds double, ptr %p, i64 3
44 %p4 = getelementptr inbounds double, ptr %p, i64 4
45 %p5 = getelementptr inbounds double, ptr %p, i64 5
47 %q1 = getelementptr inbounds double, ptr %q, i64 1
48 %q2 = getelementptr inbounds double, ptr %q, i64 2
49 %q3 = getelementptr inbounds double, ptr %q, i64 3
50 %q4 = getelementptr inbounds double, ptr %q, i64 4
51 %q5 = getelementptr inbounds double, ptr %q, i64 5
53 %d0 = load double, ptr %p
54 %d1 = load double, ptr %p1
55 %d2 = load double, ptr %p2
56 %d3 = load double, ptr %p3
57 %d4 = load double, ptr %p4
58 %d5 = load double, ptr %p5
60 %a0 = fadd double %d0, 1.0
61 %a1 = fadd double %d1, 1.0
62 %a2 = fadd double %d2, 1.0
63 %a3 = fadd double %d3, 1.0
64 %a4 = fadd double %d4, 1.0
65 %a5 = fadd double %d5, 1.0
67 store double %a0, ptr %q
68 store double %a1, ptr %q1
69 store double %a2, ptr %q2
70 store double %a3, ptr %q3
71 store double %a4, ptr %q4
72 store double %a5, ptr %q5