1 ; RUN: opt < %s -passes=loop-vectorize -mattr=avx,+slow-unaligned-mem-32 -S | FileCheck %s --check-prefix=SLOWMEM32 --check-prefix=CHECK
2 ; RUN: opt < %s -passes=loop-vectorize -mattr=avx,-slow-unaligned-mem-32 -S | FileCheck %s --check-prefix=FASTMEM32 --check-prefix=CHECK
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-apple-macosx10.8.0"
7 ; CHECK-LABEL: @read_mod_write_single_ptr(
8 ; CHECK: load <8 x float>
10 define i32 @read_mod_write_single_ptr(ptr nocapture %a, i32 %n) nounwind uwtable ssp {
11 %1 = icmp sgt i32 %n, 0
12 br i1 %1, label %.lr.ph, label %._crit_edge
14 .lr.ph: ; preds = %0, %.lr.ph
15 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
16 %2 = getelementptr inbounds float, ptr %a, i64 %indvars.iv
17 %3 = load float, ptr %2, align 4
18 %4 = fmul float %3, 3.000000e+00
19 store float %4, ptr %2, align 4
20 %indvars.iv.next = add i64 %indvars.iv, 1
21 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
22 %exitcond = icmp eq i32 %lftr.wideiv, %n
23 br i1 %exitcond, label %._crit_edge, label %.lr.ph
25 ._crit_edge: ; preds = %.lr.ph, %0
30 ; CHECK-LABEL: @read_mod_i64(
31 ; SLOWMEM32: load <2 x i64>
32 ; FASTMEM32: load <4 x i64>
34 define i32 @read_mod_i64(ptr nocapture %a, i32 %n) nounwind uwtable ssp {
35 %1 = icmp sgt i32 %n, 0
36 br i1 %1, label %.lr.ph, label %._crit_edge
38 .lr.ph: ; preds = %0, %.lr.ph
39 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
40 %2 = getelementptr inbounds i64, ptr %a, i64 %indvars.iv
41 %3 = load i64, ptr %2, align 4
43 store i64 %4, ptr %2, align 4
44 %indvars.iv.next = add i64 %indvars.iv, 1
45 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
46 %exitcond = icmp eq i32 %lftr.wideiv, %n
47 br i1 %exitcond, label %._crit_edge, label %.lr.ph
49 ._crit_edge: ; preds = %.lr.ph, %0