[llvm] 7cbf710 - [CGP] Precommit tests for D74228.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 19 00:59:44 PST 2020


Author: Florian Hahn
Date: 2020-02-19T09:24:06+01:00
New Revision: 7cbf710396df77e854bbb2611492336c0d37b996

URL: https://github.com/llvm/llvm-project/commit/7cbf710396df77e854bbb2611492336c0d37b996
DIFF: https://github.com/llvm/llvm-project/commit/7cbf710396df77e854bbb2611492336c0d37b996.diff

LOG: [CGP] Precommit tests for D74228.

Added: 
    

Modified: 
    llvm/test/Transforms/CodeGenPrepare/AArch64/overflow-intrinsics.ll
    llvm/test/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/overflow-intrinsics.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/overflow-intrinsics.ll
index a2225e495482..e1408ffdeb19 100644
--- a/llvm/test/Transforms/CodeGenPrepare/AArch64/overflow-intrinsics.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/overflow-intrinsics.ll
@@ -98,6 +98,51 @@ define i64 @uaddo3_math_overflow_used(i64 %a, i64 %b, i64* %res) nounwind ssp {
   ret i64 %Q
 }
 
+; Instcombine folds (a + b <u a)  to (a ^ -1 <u b). Make sure we match this
+; pattern as well.
+define i64 @uaddo6_xor(i64 %a, i64 %b) {
+; CHECK-LABEL: @uaddo6_xor(
+; CHECK-NEXT:    [[X:%.*]] = xor i64 [[A:%.*]], -1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
+; CHECK-NEXT:    ret i64 [[Q]]
+;
+  %x = xor i64 %a, -1
+  %cmp = icmp ult i64 %x, %b
+  %Q = select i1 %cmp, i64 %b, i64 42
+  ret i64 %Q
+}
+
+define i64 @uaddo6_xor_commuted(i64 %a, i64 %b) {
+; CHECK-LABEL: @uaddo6_xor_commuted(
+; CHECK-NEXT:    [[X:%.*]] = xor i64 -1, [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
+; CHECK-NEXT:    ret i64 [[Q]]
+;
+  %x = xor i64 -1, %a
+  %cmp = icmp ult i64 %x, %b
+  %Q = select i1 %cmp, i64 %b, i64 42
+  ret i64 %Q
+}
+
+declare void @use(i64)
+
+define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
+; CHECK-LABEL: @uaddo6_xor_multi_use(
+; CHECK-NEXT:    [[X:%.*]] = xor i64 -1, [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
+; CHECK-NEXT:    call void @use(i64 [[X]])
+; CHECK-NEXT:    ret i64 [[Q]]
+;
+  %x = xor i64 -1, %a
+  %cmp = icmp ult i64 %x, %b
+  %Q = select i1 %cmp, i64 %b, i64 42
+  call void @use(i64 %x)
+  ret i64 %Q
+}
+
 define i1 @usubo_ult_i64_overflow_used(i64 %x, i64 %y, i64* %p) {
 ; CHECK-LABEL: @usubo_ult_i64_overflow_used(
 ; CHECK-NEXT:    [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]

diff  --git a/llvm/test/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll b/llvm/test/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll
index 6090a0a16199..5cf408a66100 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll
@@ -149,6 +149,51 @@ exit:
   ret i64 0
 }
 
+; Instcombine folds (a + b <u a)  to (a ^ -1 <u b). Make sure we match this
+; pattern as well.
+define i64 @uaddo6_xor(i64 %a, i64 %b) {
+; CHECK-LABEL: @uaddo6_xor(
+; CHECK-NEXT:    [[X:%.*]] = xor i64 [[A:%.*]], -1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
+; CHECK-NEXT:    ret i64 [[Q]]
+;
+  %x = xor i64 %a, -1
+  %cmp = icmp ult i64 %x, %b
+  %Q = select i1 %cmp, i64 %b, i64 42
+  ret i64 %Q
+}
+
+define i64 @uaddo6_xor_commuted(i64 %a, i64 %b) {
+; CHECK-LABEL: @uaddo6_xor_commuted(
+; CHECK-NEXT:    [[X:%.*]] = xor i64 -1, [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
+; CHECK-NEXT:    ret i64 [[Q]]
+;
+  %x = xor i64 -1, %a
+  %cmp = icmp ult i64 %x, %b
+  %Q = select i1 %cmp, i64 %b, i64 42
+  ret i64 %Q
+}
+
+declare void @use(i64)
+
+define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
+; CHECK-LABEL: @uaddo6_xor_multi_use(
+; CHECK-NEXT:    [[X:%.*]] = xor i64 -1, [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
+; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
+; CHECK-NEXT:    call void @use(i64 [[X]])
+; CHECK-NEXT:    ret i64 [[Q]]
+;
+  %x = xor i64 -1, %a
+  %cmp = icmp ult i64 %x, %b
+  %Q = select i1 %cmp, i64 %b, i64 42
+  call void @use(i64 %x)
+  ret i64 %Q
+}
+
 ; When adding 1, the general pattern for add-overflow may be 
diff erent due to icmp canonicalization.
 ; PR31754: https://bugs.llvm.org/show_bug.cgi?id=31754
 


        


More information about the llvm-commits mailing list