1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
4 ; Verify that the result of memchr calls used in equality expressions
5 ; with either the first argument or null are optimally folded.
7 declare ptr @memchr(ptr, i32, i64)
10 @a5 = constant [5 x i8] c"12345"
12 ; Fold memchr(a5, c, 5) == a5 to *a5 == c.
14 define i1 @fold_memchr_a_c_5_eq_a(i32 %c) {
15 ; CHECK-LABEL: @fold_memchr_a_c_5_eq_a(
16 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
17 ; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 49
18 ; CHECK-NEXT: ret i1 [[CHAR0CMP]]
20 %q = call ptr @memchr(ptr @a5, i32 %c, i64 5)
21 %cmp = icmp eq ptr %q, @a5
26 ; Fold memchr(a5, c, n) == a5 to n && *a5 == c. Unlike the case when
27 ; the first argument is an arbitrary, including potentially past-the-end,
28 ; pointer, this is safe because a5 is dereferenceable.
30 define i1 @fold_memchr_a_c_n_eq_a(i32 %c, i64 %n) {
31 ; CHECK-LABEL: @fold_memchr_a_c_n_eq_a(
32 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
33 ; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 49
34 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[N:%.*]], 0
35 ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i1 [[CHAR0CMP]], i1 false
36 ; CHECK-NEXT: ret i1 [[TMP3]]
38 %q = call ptr @memchr(ptr @a5, i32 %c, i64 %n)
39 %cmp = icmp eq ptr %q, @a5
44 ; Do not fold memchr(a5 + i, c, n).
46 define i1 @call_memchr_api_c_n_eq_a(i64 %i, i32 %c, i64 %n) {
47 ; CHECK-LABEL: @call_memchr_api_c_n_eq_a(
48 ; CHECK-NEXT: [[P:%.*]] = getelementptr [5 x i8], ptr @a5, i64 0, i64 [[I:%.*]]
49 ; CHECK-NEXT: [[Q:%.*]] = call ptr @memchr(ptr [[P]], i32 [[C:%.*]], i64 [[N:%.*]])
50 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Q]], [[P]]
51 ; CHECK-NEXT: ret i1 [[CMP]]
53 %p = getelementptr [5 x i8], ptr @a5, i64 0, i64 %i
54 %q = call ptr @memchr(ptr %p, i32 %c, i64 %n)
55 %cmp = icmp eq ptr %q, %p
60 ; Fold memchr(s, c, 15) == s to *s == c.
62 define i1 @fold_memchr_s_c_15_eq_s(ptr %s, i32 %c) {
63 ; CHECK-LABEL: @fold_memchr_s_c_15_eq_s(
64 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
65 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
66 ; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], [[TMP2]]
67 ; CHECK-NEXT: ret i1 [[CHAR0CMP]]
69 %p = call ptr @memchr(ptr %s, i32 %c, i64 15)
70 %cmp = icmp eq ptr %p, %s
75 ; Fold memchr(s, c, 17) != s to *s != c.
77 define i1 @fold_memchr_s_c_17_neq_s(ptr %s, i32 %c) {
78 ; CHECK-LABEL: @fold_memchr_s_c_17_neq_s(
79 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
80 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
81 ; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp ne i8 [[TMP1]], [[TMP2]]
82 ; CHECK-NEXT: ret i1 [[CHAR0CMP]]
84 %p = call ptr @memchr(ptr %s, i32 %c, i64 17)
85 %cmp = icmp ne ptr %p, %s
90 ; Fold memchr(s, c, n) == s to *s == c for nonzero n.
92 define i1 @fold_memchr_s_c_nz_eq_s(ptr %s, i32 %c, i64 %n) {
93 ; CHECK-LABEL: @fold_memchr_s_c_nz_eq_s(
94 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[S:%.*]], align 1
95 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
96 ; CHECK-NEXT: [[CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], [[TMP2]]
97 ; CHECK-NEXT: ret i1 [[CHAR0CMP]]
100 %p = call ptr @memchr(ptr %s, i32 %c, i64 %nz)
101 %cmp = icmp eq ptr %p, %s
106 ; But do not fold memchr(s, c, n) as above if n might be zero. This could
107 ; be optimized to the equivalent of N && *S == C provided a short-circuiting
108 ; AND, otherwise the load could read a byte just past the end of an array.
110 define i1 @call_memchr_s_c_n_eq_s(ptr %s, i32 %c, i64 %n) {
111 ; CHECK-LABEL: @call_memchr_s_c_n_eq_s(
112 ; CHECK-NEXT: [[P:%.*]] = call ptr @memchr(ptr [[S:%.*]], i32 [[C:%.*]], i64 [[N:%.*]])
113 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[S]]
114 ; CHECK-NEXT: ret i1 [[CMP]]
116 %p = call ptr @memchr(ptr %s, i32 %c, i64 %n)
117 %cmp = icmp eq ptr %p, %s