[llvm] aafaa2f - [SCCP] convert ashr to lshr for non-negative shift value

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 15 10:57:20 PDT 2022


Author: Sanjay Patel
Date: 2022-09-15T13:54:52-04:00
New Revision: aafaa2f4fc52a132321e02683ea0d8f7ec190490

URL: https://github.com/llvm/llvm-project/commit/aafaa2f4fc52a132321e02683ea0d8f7ec190490
DIFF: https://github.com/llvm/llvm-project/commit/aafaa2f4fc52a132321e02683ea0d8f7ec190490.diff

LOG: [SCCP] convert ashr to lshr for non-negative shift value

This is similar to the existing signed instruction folds.
We get the obvious minimal patterns in other passes, but
this avoids potential missed folds when the multi-block
tests are converted to selects.

Added: 
    

Modified: 
    llvm/lib/Transforms/Scalar/SCCP.cpp
    llvm/test/Transforms/SCCP/ashr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/SCCP.cpp b/llvm/lib/Transforms/Scalar/SCCP.cpp
index 63a58b2d62340..422a41ea4aa91 100644
--- a/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -181,6 +181,14 @@ static bool replaceSignedInst(SCCPSolver &Solver,
     NewInst = new ZExtInst(Op0, Inst.getType(), "", &Inst);
     break;
   }
+  case Instruction::AShr: {
+    // If the shifted value is not negative, this is a logical shift right.
+    Value *Op0 = Inst.getOperand(0);
+    if (InsertedValues.count(Op0) || !isNonNegative(Op0))
+      return false;
+    NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", &Inst);
+    break;
+  }
   case Instruction::SDiv:
   case Instruction::SRem: {
     // If both operands are not negative, this is the same as udiv/urem.

diff  --git a/llvm/test/Transforms/SCCP/ashr.ll b/llvm/test/Transforms/SCCP/ashr.ll
index a67fcefee7843..40d444d0b874b 100644
--- a/llvm/test/Transforms/SCCP/ashr.ll
+++ b/llvm/test/Transforms/SCCP/ashr.ll
@@ -4,7 +4,7 @@
 define i8 @ashr_and(i8 %x) {
 ; CHECK-LABEL: @ashr_and(
 ; CHECK-NEXT:    [[PX:%.*]] = and i8 [[X:%.*]], 127
-; CHECK-NEXT:    [[R:%.*]] = ashr i8 [[PX]], 1
+; CHECK-NEXT:    [[R:%.*]] = lshr i8 [[PX]], 1
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %px = and i8 %x, 127
@@ -14,7 +14,7 @@ define i8 @ashr_and(i8 %x) {
 
 define i8 @ashr_const(i8 %x) {
 ; CHECK-LABEL: @ashr_const(
-; CHECK-NEXT:    [[R:%.*]] = ashr i8 42, [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = lshr i8 42, [[X:%.*]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %r = ashr i8 42, %x
@@ -24,7 +24,7 @@ define i8 @ashr_const(i8 %x) {
 define i8 @ashr_zext(i7 %x, i8 %y) {
 ; CHECK-LABEL: @ashr_zext(
 ; CHECK-NEXT:    [[PX:%.*]] = zext i7 [[X:%.*]] to i8
-; CHECK-NEXT:    [[R:%.*]] = ashr i8 [[PX]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = lshr i8 [[PX]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %px = zext i7 %x to i8
@@ -32,6 +32,8 @@ define i8 @ashr_zext(i7 %x, i8 %y) {
   ret i8 %r
 }
 
+; negative test
+
 define i8 @ashr_not_nonneg(i7 %x, i8 %y) {
 ; CHECK-LABEL: @ashr_not_nonneg(
 ; CHECK-NEXT:    [[PX:%.*]] = sext i7 [[X:%.*]] to i8
@@ -51,7 +53,7 @@ define i32 @dominating_condition(i32 %x) {
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp sge i32 [[X:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[T:%.*]], label [[F:%.*]]
 ; CHECK:       t:
-; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X]], 16
+; CHECK-NEXT:    [[A:%.*]] = lshr i32 [[X]], 16
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       f:
 ; CHECK-NEXT:    br label [[EXIT]]
@@ -85,7 +87,7 @@ define i32 @dominating_condition_alt(i32 %x, i32 %y) {
 ; CHECK:       t:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       f:
-; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[A:%.*]] = lshr i32 [[X]], [[Y:%.*]]
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ -42, [[T]] ], [ [[A]], [[F]] ]


        


More information about the llvm-commits mailing list