1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -newgvn -S < %s | FileCheck %s
4 target datalayout = "E-m:e-i64:64-n32:64"
5 target triple = "powerpc64-unknown-linux-gnu"
7 ;; Make sure we use correct bit shift based on storage size for
8 ;; loads reusing a load value.
9 define i64 @test1({ i1, i8 }* %predA, { i1, i8 }* %predB) {
10 ; CHECK-LABEL: @test1(
11 ; CHECK-NEXT: [[VALUELOADA_FCA_0_GEP:%.*]] = getelementptr inbounds { i1, i8 }, { i1, i8 }* [[PREDA:%.*]], i64 0, i32 0
12 ; CHECK-NEXT: [[VALUELOADA_FCA_0_LOAD:%.*]] = load i1, i1* [[VALUELOADA_FCA_0_GEP]], align 8
13 ; CHECK-NEXT: [[VALUELOADB_FCA_0_GEP:%.*]] = getelementptr inbounds { i1, i8 }, { i1, i8 }* [[PREDB:%.*]], i64 0, i32 0
14 ; CHECK-NEXT: [[VALUELOADB_FCA_0_LOAD:%.*]] = load i1, i1* [[VALUELOADB_FCA_0_GEP]], align 8
15 ; CHECK-NEXT: [[ISTRUE:%.*]] = and i1 [[VALUELOADA_FCA_0_LOAD]], [[VALUELOADB_FCA_0_LOAD]]
16 ; CHECK-NEXT: [[VALUELOADA_FCA_1_GEP:%.*]] = getelementptr inbounds { i1, i8 }, { i1, i8 }* [[PREDA]], i64 0, i32 1
17 ; CHECK-NEXT: [[VALUELOADA_FCA_1_LOAD:%.*]] = load i8, i8* [[VALUELOADA_FCA_1_GEP]], align 1
18 ; CHECK-NEXT: [[ISNOTNULLA:%.*]] = icmp ne i8 [[VALUELOADA_FCA_1_LOAD]], 0
19 ; CHECK-NEXT: [[VALUELOADB_FCA_1_GEP:%.*]] = getelementptr inbounds { i1, i8 }, { i1, i8 }* [[PREDB]], i64 0, i32 1
20 ; CHECK-NEXT: [[VALUELOADB_FCA_1_LOAD:%.*]] = load i8, i8* [[VALUELOADB_FCA_1_GEP]], align 1
21 ; CHECK-NEXT: [[ISNOTNULLB:%.*]] = icmp ne i8 [[VALUELOADB_FCA_1_LOAD]], 0
22 ; CHECK-NEXT: [[ISNOTNULL:%.*]] = and i1 [[ISNOTNULLA]], [[ISNOTNULLB]]
23 ; CHECK-NEXT: [[ISTRUEANDNOTNULL:%.*]] = and i1 [[ISTRUE]], [[ISNOTNULL]]
24 ; CHECK-NEXT: [[RET:%.*]] = zext i1 [[ISTRUEANDNOTNULL]] to i64
25 ; CHECK-NEXT: ret i64 [[RET]]
28 %valueLoadA.fca.0.gep = getelementptr inbounds { i1, i8 }, { i1, i8 }* %predA, i64 0, i32 0
29 %valueLoadA.fca.0.load = load i1, i1* %valueLoadA.fca.0.gep, align 8
30 %valueLoadB.fca.0.gep = getelementptr inbounds { i1, i8 }, { i1, i8 }* %predB, i64 0, i32 0
31 %valueLoadB.fca.0.load = load i1, i1* %valueLoadB.fca.0.gep, align 8
32 %isTrue = and i1 %valueLoadA.fca.0.load, %valueLoadB.fca.0.load
33 %valueLoadA.fca.1.gep = getelementptr inbounds { i1, i8 }, { i1, i8 }* %predA, i64 0, i32 1
34 %valueLoadA.fca.1.load = load i8, i8* %valueLoadA.fca.1.gep, align 1
35 %isNotNullA = icmp ne i8 %valueLoadA.fca.1.load, 0
36 %valueLoadB.fca.1.gep = getelementptr inbounds { i1, i8 }, { i1, i8 }* %predB, i64 0, i32 1
37 %valueLoadB.fca.1.load = load i8, i8* %valueLoadB.fca.1.gep, align 1
38 %isNotNullB = icmp ne i8 %valueLoadB.fca.1.load, 0
39 %isNotNull = and i1 %isNotNullA, %isNotNullB
40 %isTrueAndNotNull = and i1 %isTrue, %isNotNull
41 %ret = zext i1 %isTrueAndNotNull to i64
45 ;; And likewise for loads reusing a store value.
46 define i1 @test2(i8 %V, i8* %P) {
47 ; CHECK-LABEL: @test2(
48 ; CHECK-NEXT: store i8 [[V:%.*]], i8* [[P:%.*]], align 1
49 ; CHECK-NEXT: [[P2:%.*]] = bitcast i8* [[P]] to i1*
50 ; CHECK-NEXT: [[A:%.*]] = load i1, i1* [[P2]], align 1
51 ; CHECK-NEXT: ret i1 [[A]]
54 %P2 = bitcast i8* %P to i1*