[llvm] 13deb6a - Exact ashr/lshr don't loose any set bits and are thus trivially invertible
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 5 19:22:44 PDT 2021
Author: Philip Reames
Date: 2021-04-05T19:22:36-07:00
New Revision: 13deb6aac723e5694d404f21ee136e9773eb27a6
URL: https://github.com/llvm/llvm-project/commit/13deb6aac723e5694d404f21ee136e9773eb27a6
DIFF: https://github.com/llvm/llvm-project/commit/13deb6aac723e5694d404f21ee136e9773eb27a6.diff
LOG: Exact ashr/lshr don't loose any set bits and are thus trivially invertible
Use that fact to improve isKnownNonEqual.
Added:
Modified:
llvm/lib/Analysis/ValueTracking.cpp
llvm/test/Analysis/ValueTracking/known-non-equal.ll
Removed:
################################################################################
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 93c628b66575b..4732a1f468b49 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -2588,6 +2588,17 @@ static Optional<unsigned> getInvertibleOperand(const Operator *Op1,
return 0;
break;
}
+ case Instruction::AShr:
+ case Instruction::LShr: {
+ auto *PEO1 = cast<PossiblyExactOperator>(Op1);
+ auto *PEO2 = cast<PossiblyExactOperator>(Op2);
+ if (!PEO1->isExact() || !PEO2->isExact())
+ break;
+
+ if (Op1->getOperand(1) == Op2->getOperand(1))
+ return 0;
+ break;
+ }
case Instruction::SExt:
case Instruction::ZExt:
if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
diff --git a/llvm/test/Analysis/ValueTracking/known-non-equal.ll b/llvm/test/Analysis/ValueTracking/known-non-equal.ll
index 68cf010e61a8d..6a68dc6cab20b 100644
--- a/llvm/test/Analysis/ValueTracking/known-non-equal.ll
+++ b/llvm/test/Analysis/ValueTracking/known-non-equal.ll
@@ -485,6 +485,104 @@ define i1 @shl_shl_may_be_equal(i8 %A, i8 %B, i8 %shift) {
ret i1 %cmp
}
+define i1 @ashr_ashr_exact(i8 %B, i8 %shift) {
+; CHECK-LABEL: @ashr_ashr_exact(
+; CHECK-NEXT: ret i1 false
+;
+ %A = add i8 %B, 1
+ %A.op = ashr exact i8 %A, %shift
+ %B.op = ashr exact i8 %B, %shift
+ %A.op2 = mul nuw i8 %A.op, 3
+ %B.op2 = mul nuw i8 %B.op, 3
+ %cmp = icmp eq i8 %A.op2, %B.op2
+ ret i1 %cmp
+}
+
+define i1 @ashr_ashr_discard_bits(i8 %B, i8 %shift) {
+; CHECK-LABEL: @ashr_ashr_discard_bits(
+; CHECK-NEXT: [[A:%.*]] = add i8 [[B:%.*]], 1
+; CHECK-NEXT: [[A_OP:%.*]] = ashr i8 [[A]], [[SHIFT:%.*]]
+; CHECK-NEXT: [[B_OP:%.*]] = ashr exact i8 [[B]], [[SHIFT]]
+; CHECK-NEXT: [[A_OP2:%.*]] = mul nuw i8 [[A_OP]], 3
+; CHECK-NEXT: [[B_OP2:%.*]] = mul nuw i8 [[B_OP]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP2]], [[B_OP2]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %A = add i8 %B, 1
+ %A.op = ashr i8 %A, %shift
+ %B.op = ashr exact i8 %B, %shift
+ %A.op2 = mul nuw i8 %A.op, 3
+ %B.op2 = mul nuw i8 %B.op, 3
+ %cmp = icmp eq i8 %A.op2, %B.op2
+ ret i1 %cmp
+}
+
+define i1 @ashr_ashr_may_be_equal(i8 %A, i8 %B, i8 %shift) {
+; CHECK-LABEL: @ashr_ashr_may_be_equal(
+; CHECK-NEXT: [[A_OP:%.*]] = ashr exact i8 [[A:%.*]], [[SHIFT:%.*]]
+; CHECK-NEXT: [[B_OP:%.*]] = ashr exact i8 [[B:%.*]], [[SHIFT]]
+; CHECK-NEXT: [[A_OP2:%.*]] = mul nuw i8 [[A_OP]], 3
+; CHECK-NEXT: [[B_OP2:%.*]] = mul nuw i8 [[B_OP]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP2]], [[B_OP2]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %A.op = ashr exact i8 %A, %shift
+ %B.op = ashr exact i8 %B, %shift
+ %A.op2 = mul nuw i8 %A.op, 3
+ %B.op2 = mul nuw i8 %B.op, 3
+ %cmp = icmp eq i8 %A.op2, %B.op2
+ ret i1 %cmp
+}
+
+define i1 @lshr_lshr_exact(i8 %B, i8 %shift) {
+; CHECK-LABEL: @lshr_lshr_exact(
+; CHECK-NEXT: ret i1 false
+;
+ %A = add i8 %B, 1
+ %A.op = lshr exact i8 %A, %shift
+ %B.op = lshr exact i8 %B, %shift
+ %A.op2 = mul nuw i8 %A.op, 3
+ %B.op2 = mul nuw i8 %B.op, 3
+ %cmp = icmp eq i8 %A.op2, %B.op2
+ ret i1 %cmp
+}
+
+define i1 @lshr_lshr_discard_bits(i8 %B, i8 %shift) {
+; CHECK-LABEL: @lshr_lshr_discard_bits(
+; CHECK-NEXT: [[A:%.*]] = add i8 [[B:%.*]], 1
+; CHECK-NEXT: [[A_OP:%.*]] = lshr i8 [[A]], [[SHIFT:%.*]]
+; CHECK-NEXT: [[B_OP:%.*]] = lshr exact i8 [[B]], [[SHIFT]]
+; CHECK-NEXT: [[A_OP2:%.*]] = mul nuw i8 [[A_OP]], 3
+; CHECK-NEXT: [[B_OP2:%.*]] = mul nuw i8 [[B_OP]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP2]], [[B_OP2]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %A = add i8 %B, 1
+ %A.op = lshr i8 %A, %shift
+ %B.op = lshr exact i8 %B, %shift
+ %A.op2 = mul nuw i8 %A.op, 3
+ %B.op2 = mul nuw i8 %B.op, 3
+ %cmp = icmp eq i8 %A.op2, %B.op2
+ ret i1 %cmp
+}
+
+define i1 @lshr_lshr_may_be_equal(i8 %A, i8 %B, i8 %shift) {
+; CHECK-LABEL: @lshr_lshr_may_be_equal(
+; CHECK-NEXT: [[A_OP:%.*]] = lshr exact i8 [[A:%.*]], [[SHIFT:%.*]]
+; CHECK-NEXT: [[B_OP:%.*]] = lshr exact i8 [[B:%.*]], [[SHIFT]]
+; CHECK-NEXT: [[A_OP2:%.*]] = mul nuw i8 [[A_OP]], 3
+; CHECK-NEXT: [[B_OP2:%.*]] = mul nuw i8 [[B_OP]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP2]], [[B_OP2]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %A.op = lshr exact i8 %A, %shift
+ %B.op = lshr exact i8 %B, %shift
+ %A.op2 = mul nuw i8 %A.op, 3
+ %B.op2 = mul nuw i8 %B.op, 3
+ %cmp = icmp eq i8 %A.op2, %B.op2
+ ret i1 %cmp
+}
+
define i1 @recurrence_add_neq(i8 %A) {
; CHECK-LABEL: @recurrence_add_neq(
; CHECK-NEXT: entry:
More information about the llvm-commits
mailing list