[llvm] d8b2224 - [InstCombine] Add tests in anticipation of D128939 (NFC)

Martin Sebor via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 1 10:12:33 PDT 2022


Author: Martin Sebor
Date: 2022-07-01T11:10:00-06:00
New Revision: d8b22243c8e97181b81993e8e714539d6b73ff27

URL: https://github.com/llvm/llvm-project/commit/d8b22243c8e97181b81993e8e714539d6b73ff27
DIFF: https://github.com/llvm/llvm-project/commit/d8b22243c8e97181b81993e8e714539d6b73ff27.diff

LOG: [InstCombine] Add tests in anticipation of D128939 (NFC)

Precommit tests exercising the future folding of memchr and strchr calls
in equality expressions with the first function argument.

Added: 
    llvm/test/Transforms/InstCombine/memchr-11.ll
    llvm/test/Transforms/InstCombine/memrchr-8.ll
    llvm/test/Transforms/InstCombine/strchr-4.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/memchr-11.ll b/llvm/test/Transforms/InstCombine/memchr-11.ll
new file mode 100644
index 000000000000..7f67357f6449
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/memchr-11.ll
@@ -0,0 +1,117 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+;
+; Verify that the result of memchr calls used in equality expressions
+; with either the first argument or null are optimally folded.
+
+declare i8* @memchr(i8*, i32, i64)
+
+
+ at a5 = constant [5 x i8] c"12345"
+
+; Fold memchr(a5, c, 5) == a5 to *a5 == c.
+
+define i1 @fold_memchr_a_c_5_eq_a(i32 %c) {
+; CHECK-LABEL: @fold_memchr_a_c_5_eq_a(
+; CHECK-NEXT:    [[Q:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0), i32 [[C:%.*]], i64 5)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[Q]], getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0)
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
+  %q = call i8* @memchr(i8* %p, i32 %c, i64 5)
+  %cmp = icmp eq i8* %q, %p
+  ret i1 %cmp
+}
+
+
+; Fold memchr(a5, c, n) == a5 to n && *a5 == c.  Unlike the case when
+; the first argument is an arbitrary, including potentially past-the-end,
+; pointer, this is safe because a5 is dereferenceable.
+
+define i1 @fold_memchr_a_c_n_eq_a(i32 %c, i64 %n) {
+; CHECK-LABEL: @fold_memchr_a_c_n_eq_a(
+; CHECK-NEXT:    [[Q:%.*]] = call i8* @memchr(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0), i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[Q]], getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0)
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
+  %q = call i8* @memchr(i8* %p, i32 %c, i64 %n)
+  %cmp = icmp eq i8* %q, %p
+  ret i1 %cmp
+}
+
+
+; Do not fold memchr(a5 + i, c, n).
+
+define i1 @call_memchr_api_c_n_eq_a(i64 %i, i32 %c, i64 %n) {
+; CHECK-LABEL: @call_memchr_api_c_n_eq_a(
+; CHECK-NEXT:    [[P:%.*]] = getelementptr [5 x i8], [5 x i8]* @a5, i64 0, i64 [[I:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = call i8* @memchr(i8* [[P]], i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[Q]], [[P]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = getelementptr [5 x i8], [5 x i8]* @a5, i64 0, i64 %i
+  %q = call i8* @memchr(i8* %p, i32 %c, i64 %n)
+  %cmp = icmp eq i8* %q, %p
+  ret i1 %cmp
+}
+
+
+; Fold memchr(s, c, 15) == s to *s == c.
+
+define i1 @fold_memchr_s_c_15_eq_s(i8* %s, i32 %c) {
+; CHECK-LABEL: @fold_memchr_s_c_15_eq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) [[S:%.*]], i32 [[C:%.*]], i64 15)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @memchr(i8* %s, i32 %c, i64 15)
+  %cmp = icmp eq i8* %p, %s
+  ret i1 %cmp
+}
+
+
+; Fold memchr(s, c, 17) != s to *s != c.
+
+define i1 @fold_memchr_s_c_17_neq_s(i8* %s, i32 %c) {
+; CHECK-LABEL: @fold_memchr_s_c_17_neq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) [[S:%.*]], i32 [[C:%.*]], i64 17)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @memchr(i8* %s, i32 %c, i64 17)
+  %cmp = icmp ne i8* %p, %s
+  ret i1 %cmp
+}
+
+
+; Fold memchr(s, c, n) == s to *s == c for nonzero n.
+
+define i1 @fold_memchr_s_c_nz_eq_s(i8* %s, i32 %c, i64 %n) {
+; CHECK-LABEL: @fold_memchr_s_c_nz_eq_s(
+; CHECK-NEXT:    [[NZ:%.*]] = or i64 [[N:%.*]], 1
+; CHECK-NEXT:    [[P:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) [[S:%.*]], i32 [[C:%.*]], i64 [[NZ]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %nz = or i64 %n, 1
+  %p = call i8* @memchr(i8* %s, i32 %c, i64 %nz)
+  %cmp = icmp eq i8* %p, %s
+  ret i1 %cmp
+}
+
+
+; But do not fold memchr(s, c, n) as above if n might be zero.  This could
+; be optimized to the equivalent of N && *S == C provided a short-circuiting
+; AND, otherwise the load could read a byte just past the end of an array.
+
+define i1 @call_memchr_s_c_n_eq_s(i8* %s, i32 %c, i64 %n) {
+; CHECK-LABEL: @call_memchr_s_c_n_eq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @memchr(i8* [[S:%.*]], i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @memchr(i8* %s, i32 %c, i64 %n)
+  %cmp = icmp eq i8* %p, %s
+  ret i1 %cmp
+}

diff  --git a/llvm/test/Transforms/InstCombine/memrchr-8.ll b/llvm/test/Transforms/InstCombine/memrchr-8.ll
new file mode 100644
index 000000000000..094571a87219
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/memrchr-8.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+;
+; Verify that the result of memrchr calls used in equality expressions
+; with the first argument aren't folded like the corresponding calls
+; to memchr might be.
+; Folding of equality expressions with the first argument plus the bound
+; -1, i.e., memrchr(S, C, N) == N && S[N - 1] == C is not implemented.
+
+declare i8* @memrchr(i8*, i32, i64)
+
+
+ at a5 = constant [5 x i8] c"12345";
+
+; Do not fold memrchr(a5, c, 9) == a5.  The corresponding call to memchr
+; is folded so this test verifies that the memrchr folder doesn't make
+; the wrong assumption.  The bound of 9 tries to avoid having to adjust
+; the test if the call is folded into a series of ORs as in D128011.
+
+define i1 @call_memrchr_a_c_9_eq_a(i32 %c) {
+; CHECK-LABEL: @call_memrchr_a_c_9_eq_a(
+; CHECK-NEXT:    [[Q:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(9) getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0), i32 [[C:%.*]], i64 9)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[Q]], getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0)
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
+  %q = call i8* @memrchr(i8* %p, i32 %c, i64 9)
+  %cmp = icmp eq i8* %q, %p
+  ret i1 %cmp
+}
+
+
+; Do not fold memrchr(a5, c, n).
+
+define i1 @call_memrchr_a_c_n_eq_a(i32 %c, i64 %n) {
+; CHECK-LABEL: @call_memrchr_a_c_n_eq_a(
+; CHECK-NEXT:    [[Q:%.*]] = call i8* @memrchr(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0), i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[Q]], getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0)
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
+  %q = call i8* @memrchr(i8* %p, i32 %c, i64 %n)
+  %cmp = icmp eq i8* %q, %p
+  ret i1 %cmp
+}
+
+
+; Do not fold memrchr(s, c, 17).
+
+define i1 @call_memrchr_s_c_17_eq_s(i8* %s, i32 %c) {
+; CHECK-LABEL: @call_memrchr_s_c_17_eq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(17) [[S:%.*]], i32 [[C:%.*]], i64 17)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @memrchr(i8* %s, i32 %c, i64 17)
+  %cmp = icmp eq i8* %p, %s
+  ret i1 %cmp
+}
+
+
+; Do not fold memrchr(s, c, 9).
+
+define i1 @call_memrchr_s_c_9_neq_s(i8* %s, i32 %c) {
+; CHECK-LABEL: @call_memrchr_s_c_9_neq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(7) [[S:%.*]], i32 [[C:%.*]], i64 7)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @memrchr(i8* %s, i32 %c, i64 7)
+  %cmp = icmp ne i8* %p, %s
+  ret i1 %cmp
+}
+
+
+; Do not fold memrchr(s, c, n).
+
+define i1 @fold_memrchr_s_c_n_eq_s(i8* %s, i32 %c, i64 %n) {
+; CHECK-LABEL: @fold_memrchr_s_c_n_eq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @memrchr(i8* [[S:%.*]], i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @memrchr(i8* %s, i32 %c, i64 %n)
+  %cmp = icmp eq i8* %p, %s
+  ret i1 %cmp
+}

diff  --git a/llvm/test/Transforms/InstCombine/strchr-4.ll b/llvm/test/Transforms/InstCombine/strchr-4.ll
new file mode 100644
index 000000000000..aa22d3e36d16
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/strchr-4.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+;
+; Verify that the result of strchr calls used in equality expressions
+; with either the first argument or null are optimally folded.
+
+declare i8* @strchr(i8*, i32)
+
+
+; Fold strchr(s, c) == s to *s == c.
+
+define i1 @fold_strchr_s_c_eq_s(i8* %s, i32 %c) {
+; CHECK-LABEL: @fold_strchr_s_c_eq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @strchr(i8* noundef nonnull dereferenceable(1) [[S:%.*]], i32 [[C:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @strchr(i8* %s, i32 %c)
+  %cmp = icmp eq i8* %p, %s
+  ret i1 %cmp
+}
+
+
+; Fold strchr(s, c) != s to *s != c.
+
+define i1 @fold_strchr_s_c_neq_s(i8* %s, i32 %c) {
+; CHECK-LABEL: @fold_strchr_s_c_neq_s(
+; CHECK-NEXT:    [[P:%.*]] = call i8* @strchr(i8* noundef nonnull dereferenceable(1) [[S:%.*]], i32 [[C:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8* [[P]], [[S]]
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @strchr(i8* %s, i32 %c)
+  %cmp = icmp ne i8* %p, %s
+  ret i1 %cmp
+}
+
+
+; Fold strchr(s, '\0') == null to false.  (A string must be nul-terminated,
+; otherwise the call would read past the end of the array.)
+
+define i1 @fold_strchr_s_nul_eqz(i8* %s) {
+; CHECK-LABEL: @fold_strchr_s_nul_eqz(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[S:%.*]], null
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @strchr(i8* %s, i32 0)
+  %cmp = icmp eq i8* %p, null
+  ret i1 %cmp
+}
+
+
+; Fold strchr(s, '\0') != null to true.
+
+define i1 @fold_strchr_s_nul_nez(i8* %s) {
+; CHECK-LABEL: @fold_strchr_s_nul_nez(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8* [[S:%.*]], null
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = call i8* @strchr(i8* %s, i32 0)
+  %cmp = icmp ne i8* %p, null
+  ret i1 %cmp
+}
+
+
+ at a5 = constant [5 x i8] c"12345";
+
+; Fold strchr(a5, c) == a5 to *a5 == c.
+
+define i1 @fold_strchr_a_c_eq_a(i32 %c) {
+; CHECK-LABEL: @fold_strchr_a_c_eq_a(
+; CHECK-NEXT:    [[MEMCHR:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0), i32 [[C:%.*]], i64 6)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[MEMCHR]], getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 0, i64 0)
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %p = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0
+  %q = call i8* @strchr(i8* %p, i32 %c)
+  %cmp = icmp eq i8* %q, %p
+  ret i1 %cmp
+}


        


More information about the llvm-commits mailing list