[llvm] 1034b4d - [InstCombine] lshr (mul (X, 2^N + 1)), N -> X when X is half-width (#93677)

via llvm-commits llvm-commits at lists.llvm.org
Thu May 30 00:39:35 PDT 2024


Author: AtariDreams
Date: 2024-05-30T09:39:32+02:00
New Revision: 1034b4d38dd2df542204d55b3e14e985452e4fe4

URL: https://github.com/llvm/llvm-project/commit/1034b4d38dd2df542204d55b3e14e985452e4fe4
DIFF: https://github.com/llvm/llvm-project/commit/1034b4d38dd2df542204d55b3e14e985452e4fe4.diff

LOG: [InstCombine] lshr (mul (X, 2^N + 1)), N -> X when X is half-width (#93677)

Alive2 Proof:
https://alive2.llvm.org/ce/z/Yd2CKF

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
    llvm/test/Transforms/InstCombine/lshr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 4f91993750fd2..fbc02cddfb005 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1464,10 +1464,10 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
       if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
           MulC->logBase2() == ShAmtC) {
         // Look for a "splat" mul pattern - it replicates bits across each half
-        // of a value, so a right shift is just a mask of the low bits:
-        // lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
+        // of a value, so a right shift simplifies back to just X:
+        // lshr i[2N] (mul nuw X, (2^N)+1), N --> X
         if (ShAmtC * 2 == BitWidth)
-          return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
+          return replaceInstUsesWith(I, X);
 
         // lshr (mul nuw (X, 2^N + 1)), N -> add nuw (X, lshr(X, N))
         if (Op0->hasOneUse()) {

diff  --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index dfdb6c7b4b268..0392764490945 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -348,8 +348,7 @@ define <2 x i32> @narrow_lshr_constant(<2 x i8> %x, <2 x i8> %y) {
 
 define i32 @mul_splat_fold(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold(
-; CHECK-NEXT:    [[T:%.*]] = and i32 [[X:%.*]], 65535
-; CHECK-NEXT:    ret i32 [[T]]
+; CHECK-NEXT:    ret i32 [[X:%.*]]
 ;
   %m = mul nuw i32 %x, 65537
   %t = lshr i32 %m, 16
@@ -362,8 +361,7 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
 ; CHECK-LABEL: @mul_splat_fold_vec(
 ; CHECK-NEXT:    [[M:%.*]] = mul nuw <3 x i14> [[X:%.*]], <i14 129, i14 129, i14 129>
 ; CHECK-NEXT:    call void @usevec(<3 x i14> [[M]])
-; CHECK-NEXT:    [[T:%.*]] = and <3 x i14> [[X]], <i14 127, i14 127, i14 127>
-; CHECK-NEXT:    ret <3 x i14> [[T]]
+; CHECK-NEXT:    ret <3 x i14> [[X]]
 ;
   %m = mul nuw <3 x i14> %x, <i14 129, i14 129, i14 129>
   call void @usevec(<3 x i14> %m)
@@ -628,8 +626,6 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
   ret i32 %t
 }
 
-; Negative test (but simplifies into a 
diff erent transform)
-
 define i32 @mul_splat_fold_no_nuw(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_no_nuw(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 16
@@ -641,7 +637,7 @@ define i32 @mul_splat_fold_no_nuw(i32 %x) {
   ret i32 %t
 }
 
-; Negative test 
+; Negative test
 
 define i32 @mul_splat_fold_no_flags(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_no_flags(


        


More information about the llvm-commits mailing list