[llvm-branch-commits] [llvm] 43ee392 - [InstCombine] try to fold low-mask of ashr to lshr

Tom Stellard via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Apr 14 16:45:41 PDT 2022


Author: Sanjay Patel
Date: 2022-04-14T16:12:32-07:00
New Revision: 43ee392dd462f218af0fe18207e6dba347927de2

URL: https://github.com/llvm/llvm-project/commit/43ee392dd462f218af0fe18207e6dba347927de2
DIFF: https://github.com/llvm/llvm-project/commit/43ee392dd462f218af0fe18207e6dba347927de2.diff

LOG: [InstCombine] try to fold low-mask of ashr to lshr

With one-use, we handle this via demanded-bits.
But We need to handle extra uses to improve issue #54750.

https://alive2.llvm.org/ce/z/aDYkPv
(cherry picked from commit 7783db55afefd3b0d83f4d1b727b6aaa2c2286d6)

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/test/Transforms/InstCombine/and.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 6bbb0251f2bc0..2aab79e890786 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1961,6 +1961,12 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
       }
     }
 
+    // If this 'and' clears the sign-bits added by ashr, replace with lshr:
+    // and (ashr X, ShiftC), C --> lshr X, ShiftC
+    if (match(Op0, m_AShr(m_Value(X), m_APInt(ShiftC))) && ShiftC->ult(Width) &&
+        C->isMask(Width - ShiftC->getZExtValue()))
+      return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, *ShiftC));
+
     const APInt *AddC;
     if (match(Op0, m_Add(m_Value(X), m_APInt(AddC)))) {
       // If we add zeros to every bit below a mask, the add has no effect:

diff  --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll
index 786535fe67e43..f6585343a9290 100644
--- a/llvm/test/Transforms/InstCombine/and.ll
+++ b/llvm/test/Transforms/InstCombine/and.ll
@@ -406,7 +406,7 @@ define i32 @ashr_lowmask_use(i32 %x) {
 ; CHECK-LABEL: @ashr_lowmask_use(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 1
 ; CHECK-NEXT:    call void @use32(i32 [[A]])
-; CHECK-NEXT:    [[R:%.*]] = and i32 [[A]], 2147483647
+; CHECK-NEXT:    [[R:%.*]] = lshr i32 [[X]], 1
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = ashr i32 %x, 1
@@ -419,7 +419,7 @@ define <2 x i8> @ashr_lowmask_use_splat(<2 x i8> %x, <2 x i8>* %p) {
 ; CHECK-LABEL: @ashr_lowmask_use_splat(
 ; CHECK-NEXT:    [[A:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 7, i8 7>
 ; CHECK-NEXT:    store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
-; CHECK-NEXT:    [[R:%.*]] = and <2 x i8> [[A]], <i8 1, i8 1>
+; CHECK-NEXT:    [[R:%.*]] = lshr <2 x i8> [[X]], <i8 7, i8 7>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %a = ashr <2 x i8> %x, <i8 7, i8 7>
@@ -428,6 +428,8 @@ define <2 x i8> @ashr_lowmask_use_splat(<2 x i8> %x, <2 x i8>* %p) {
   ret <2 x i8> %r
 }
 
+; negative test - must keep all low bits
+
 define i32 @ashr_not_lowmask1_use(i32 %x) {
 ; CHECK-LABEL: @ashr_not_lowmask1_use(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 24
@@ -441,6 +443,8 @@ define i32 @ashr_not_lowmask1_use(i32 %x) {
   ret i32 %r
 }
 
+; negative test - must keep all low bits
+
 define i32 @ashr_not_lowmask2_use(i32 %x) {
 ; CHECK-LABEL: @ashr_not_lowmask2_use(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 24
@@ -454,6 +458,8 @@ define i32 @ashr_not_lowmask2_use(i32 %x) {
   ret i32 %r
 }
 
+; negative test - must keep only low bits
+
 define i32 @ashr_not_lowmask3_use(i32 %x) {
 ; CHECK-LABEL: @ashr_not_lowmask3_use(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i32 [[X:%.*]], 24
@@ -1231,14 +1237,14 @@ define i32 @lowmask_sext_in_reg(i32 %x) {
   ret i32 %and
 }
 
-; Negative test - mismatched shift amounts
+; Mismatched shift amounts, but the mask op can be replaced by a shift.
 
 define i32 @lowmask_not_sext_in_reg(i32 %x) {
 ; CHECK-LABEL: @lowmask_not_sext_in_reg(
 ; CHECK-NEXT:    [[L:%.*]] = shl i32 [[X:%.*]], 19
 ; CHECK-NEXT:    [[R:%.*]] = ashr i32 [[L]], 20
 ; CHECK-NEXT:    call void @use32(i32 [[R]])
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[R]], 4095
+; CHECK-NEXT:    [[AND:%.*]] = lshr i32 [[L]], 20
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %l = shl i32 %x, 19


        


More information about the llvm-branch-commits mailing list