1 ; Test EfficiencySanitizer working set instrumentation without aggressive
4 ; RUN: opt < %s -esan -esan-working-set -esan-assume-intra-cache-line=0 -S | FileCheck %s
6 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
9 define i8 @aligned1(i8* %a) {
11 %tmp1 = load i8, i8* %a, align 1
13 ; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
14 ; CHECK: %0 = ptrtoint i8* %a to i64
15 ; CHECK-NEXT: %1 = and i64 %0, 17592186044415
16 ; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
17 ; CHECK-NEXT: %3 = lshr i64 %2, 6
18 ; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
19 ; CHECK-NEXT: %5 = load i8, i8* %4
20 ; CHECK-NEXT: %6 = and i8 %5, -127
21 ; CHECK-NEXT: %7 = icmp ne i8 %6, -127
22 ; CHECK-NEXT: br i1 %7, label %8, label %11
23 ; CHECK: %9 = or i8 %5, -127
24 ; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
25 ; CHECK-NEXT: store i8 %9, i8* %10
26 ; CHECK-NEXT: br label %11
27 ; CHECK: %tmp1 = load i8, i8* %a, align 1
28 ; CHECK-NEXT: ret i8 %tmp1
31 define i16 @aligned2(i16* %a) {
33 %tmp1 = load i16, i16* %a, align 2
35 ; CHECK: %0 = ptrtoint i16* %a to i64
36 ; CHECK-NEXT: %1 = and i64 %0, 17592186044415
37 ; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
38 ; CHECK-NEXT: %3 = lshr i64 %2, 6
39 ; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
40 ; CHECK-NEXT: %5 = load i8, i8* %4
41 ; CHECK-NEXT: %6 = and i8 %5, -127
42 ; CHECK-NEXT: %7 = icmp ne i8 %6, -127
43 ; CHECK-NEXT: br i1 %7, label %8, label %11
44 ; CHECK: %9 = or i8 %5, -127
45 ; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
46 ; CHECK-NEXT: store i8 %9, i8* %10
47 ; CHECK-NEXT: br label %11
48 ; CHECK: %tmp1 = load i16, i16* %a, align 2
49 ; CHECK-NEXT: ret i16 %tmp1
52 define i32 @aligned4(i32* %a) {
54 %tmp1 = load i32, i32* %a, align 4
56 ; CHECK: %0 = ptrtoint i32* %a to i64
57 ; CHECK-NEXT: %1 = and i64 %0, 17592186044415
58 ; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
59 ; CHECK-NEXT: %3 = lshr i64 %2, 6
60 ; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
61 ; CHECK-NEXT: %5 = load i8, i8* %4
62 ; CHECK-NEXT: %6 = and i8 %5, -127
63 ; CHECK-NEXT: %7 = icmp ne i8 %6, -127
64 ; CHECK-NEXT: br i1 %7, label %8, label %11
65 ; CHECK: %9 = or i8 %5, -127
66 ; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
67 ; CHECK-NEXT: store i8 %9, i8* %10
68 ; CHECK-NEXT: br label %11
69 ; CHECK: %tmp1 = load i32, i32* %a, align 4
70 ; CHECK-NEXT: ret i32 %tmp1
73 define i64 @aligned8(i64* %a) {
75 %tmp1 = load i64, i64* %a, align 8
77 ; CHECK: %0 = ptrtoint i64* %a to i64
78 ; CHECK-NEXT: %1 = and i64 %0, 17592186044415
79 ; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
80 ; CHECK-NEXT: %3 = lshr i64 %2, 6
81 ; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
82 ; CHECK-NEXT: %5 = load i8, i8* %4
83 ; CHECK-NEXT: %6 = and i8 %5, -127
84 ; CHECK-NEXT: %7 = icmp ne i8 %6, -127
85 ; CHECK-NEXT: br i1 %7, label %8, label %11
86 ; CHECK: %9 = or i8 %5, -127
87 ; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
88 ; CHECK-NEXT: store i8 %9, i8* %10
89 ; CHECK-NEXT: br label %11
90 ; CHECK: %tmp1 = load i64, i64* %a, align 8
91 ; CHECK-NEXT: ret i64 %tmp1
94 define i128 @aligned16(i128* %a) {
96 %tmp1 = load i128, i128* %a, align 16
98 ; CHECK: %0 = ptrtoint i128* %a to i64
99 ; CHECK-NEXT: %1 = and i64 %0, 17592186044415
100 ; CHECK-NEXT: %2 = add i64 %1, 1337006139375616
101 ; CHECK-NEXT: %3 = lshr i64 %2, 6
102 ; CHECK-NEXT: %4 = inttoptr i64 %3 to i8*
103 ; CHECK-NEXT: %5 = load i8, i8* %4
104 ; CHECK-NEXT: %6 = and i8 %5, -127
105 ; CHECK-NEXT: %7 = icmp ne i8 %6, -127
106 ; CHECK-NEXT: br i1 %7, label %8, label %11
107 ; CHECK: %9 = or i8 %5, -127
108 ; CHECK-NEXT: %10 = inttoptr i64 %3 to i8*
109 ; CHECK-NEXT: store i8 %9, i8* %10
110 ; CHECK-NEXT: br label %11
111 ; CHECK: %tmp1 = load i128, i128* %a, align 16
112 ; CHECK-NEXT: ret i128 %tmp1
115 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
116 ; Not guaranteed to be intra-cache-line
118 define i16 @unaligned2(i16* %a) {
120 %tmp1 = load i16, i16* %a, align 1
122 ; CHECK: %0 = bitcast i16* %a to i8*
123 ; CHECK-NEXT: call void @__esan_unaligned_load2(i8* %0)
124 ; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
125 ; CHECK-NEXT: ret i16 %tmp1
128 define i32 @unaligned4(i32* %a) {
130 %tmp1 = load i32, i32* %a, align 2
132 ; CHECK: %0 = bitcast i32* %a to i8*
133 ; CHECK-NEXT: call void @__esan_unaligned_load4(i8* %0)
134 ; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 2
135 ; CHECK-NEXT: ret i32 %tmp1
138 define i64 @unaligned8(i64* %a) {
140 %tmp1 = load i64, i64* %a, align 4
142 ; CHECK: %0 = bitcast i64* %a to i8*
143 ; CHECK-NEXT: call void @__esan_unaligned_load8(i8* %0)
144 ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 4
145 ; CHECK-NEXT: ret i64 %tmp1
148 define i128 @unaligned16(i128* %a) {
150 %tmp1 = load i128, i128* %a, align 8
152 ; CHECK: %0 = bitcast i128* %a to i8*
153 ; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
154 ; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 8
155 ; CHECK-NEXT: ret i128 %tmp1