1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
3 target datalayout = "E-p:64:64:64-p1:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
5 ; Instcombine should be able to prove vector alignment in the
6 ; presence of a few mild address computation tricks.
8 define void @test0(i8* %b, i64 %n, i64 %u, i64 %y) nounwind {
11 ; CHECK-NEXT: [[C:%.*]] = ptrtoint i8* [[B:%.*]] to i64
12 ; CHECK-NEXT: [[D:%.*]] = and i64 [[C]], -16
13 ; CHECK-NEXT: [[E:%.*]] = inttoptr i64 [[D]] to double*
14 ; CHECK-NEXT: [[V:%.*]] = shl i64 [[U:%.*]], 1
15 ; CHECK-NEXT: [[Z:%.*]] = and i64 [[Y:%.*]], -2
16 ; CHECK-NEXT: [[T1421:%.*]] = icmp eq i64 [[N:%.*]], 0
17 ; CHECK-NEXT: br i1 [[T1421]], label [[RETURN:%.*]], label [[BB:%.*]]
19 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[BB]] ], [ 20, [[ENTRY:%.*]] ]
20 ; CHECK-NEXT: [[J:%.*]] = mul i64 [[I]], [[V]]
21 ; CHECK-NEXT: [[H:%.*]] = add i64 [[J]], [[Z]]
22 ; CHECK-NEXT: [[T8:%.*]] = getelementptr double, double* [[E]], i64 [[H]]
23 ; CHECK-NEXT: [[P:%.*]] = bitcast double* [[T8]] to <2 x double>*
24 ; CHECK-NEXT: store <2 x double> zeroinitializer, <2 x double>* [[P]], align 16
25 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[I]], 1
26 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[N]]
27 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
29 ; CHECK-NEXT: ret void
32 %c = ptrtoint i8* %b to i64
34 %e = inttoptr i64 %d to double*
37 %t1421 = icmp eq i64 %n, 0
38 br i1 %t1421, label %return, label %bb
41 %i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
44 %t8 = getelementptr double, double* %e, i64 %h
45 %p = bitcast double* %t8 to <2 x double>*
46 store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
47 %indvar.next = add i64 %i, 1
48 %exitcond = icmp eq i64 %indvar.next, %n
49 br i1 %exitcond, label %return, label %bb
55 ; When we see a unaligned load from an insufficiently aligned global or
56 ; alloca, increase the alignment of the load, turning it into an aligned load.
58 @GLOBAL = internal global [4 x i32] zeroinitializer
60 define <16 x i8> @test1(<2 x i64> %x) {
61 ; CHECK-LABEL: @test1(
63 ; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 16
64 ; CHECK-NEXT: ret <16 x i8> [[TMP]]
67 %tmp = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
71 @GLOBAL_as1 = internal addrspace(1) global [4 x i32] zeroinitializer
73 define <16 x i8> @test1_as1(<2 x i64> %x) {
74 ; CHECK-LABEL: @test1_as1(
75 ; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 16
76 ; CHECK-NEXT: ret <16 x i8> [[TMP]]
78 %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 1
82 @GLOBAL_as1_gep = internal addrspace(1) global [8 x i32] zeroinitializer
84 define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
85 ; CHECK-LABEL: @test1_as1_gep(
86 ; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr inbounds ([8 x i32], [8 x i32] addrspace(1)* @GLOBAL_as1_gep, i32 0, i32 4) to <16 x i8> addrspace(1)*), align 16
87 ; CHECK-NEXT: ret <16 x i8> [[TMP]]
89 %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr ([8 x i32], [8 x i32] addrspace(1)* @GLOBAL_as1_gep, i16 0, i16 4) to <16 x i8> addrspace(1)*), align 1
94 ; When a load or store lacks an explicit alignment, add one.
96 define double @test2(double* %p, double %n) nounwind {
97 ; CHECK-LABEL: @test2(
98 ; CHECK-NEXT: [[T:%.*]] = load double, double* [[P:%.*]], align 8
99 ; CHECK-NEXT: store double [[N:%.*]], double* [[P]], align 8
100 ; CHECK-NEXT: ret double [[T]]
102 %t = load double, double* %p
103 store double %n, double* %p
107 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
109 declare void @use(i8*)
111 %struct.s = type { i32, i32, i32, i32 }
113 define void @test3(%struct.s* sret %a4) {
114 ; Check that the alignment is bumped up the alignment of the sret type.
115 ; CHECK-LABEL: @test3(
116 ; CHECK-NEXT: [[A4_CAST:%.*]] = bitcast %struct.s* [[A4:%.*]] to i8*
117 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull align 4 dereferenceable(16) [[A4_CAST]], i8 0, i64 16, i1 false)
118 ; CHECK-NEXT: call void @use(i8* [[A4_CAST]])
119 ; CHECK-NEXT: ret void
121 %a4.cast = bitcast %struct.s* %a4 to i8*
122 call void @llvm.memset.p0i8.i64(i8* %a4.cast, i8 0, i64 16, i1 false)
123 call void @use(i8* %a4.cast)