[llvm] 3363d23 - [InstCombine] Do not simplify lshr/shl arg if it is part of a rotate pattern

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 16 07:55:40 PST 2024


Author: Eikansh Gupta
Date: 2024-02-16T16:52:00+01:00
New Revision: 3363d23bd39970cbd5e32bfca6892ffd97ceb023

URL: https://github.com/llvm/llvm-project/commit/3363d23bd39970cbd5e32bfca6892ffd97ceb023
DIFF: https://github.com/llvm/llvm-project/commit/3363d23bd39970cbd5e32bfca6892ffd97ceb023.diff

LOG: [InstCombine] Do not simplify lshr/shl arg if it is part of a rotate pattern

fshl/fshr having first two arguments as same gets lowered to target
specific rotate. But based on the uses, one of the arguments can get
simplified resulting in different arguments performing equivalent
operation.

This patch prevents the simplification of the arguments of lshr/shl if
they are part of fshl pattern.

Closes https://github.com/llvm/llvm-project/pull/73441.

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
    llvm/test/Transforms/InstCombine/fsh.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index be6ee9d96d2630..5f13454089e515 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -640,6 +640,19 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
                                                     DemandedMask, Known))
             return R;
 
+      // Do not simplify if shl is part of funnel-shift pattern
+      if (I->hasOneUse()) {
+        auto *Inst = dyn_cast<Instruction>(I->user_back());
+        if (Inst && Inst->getOpcode() == BinaryOperator::Or) {
+          if (auto Opt = convertOrOfShiftsToFunnelShift(*Inst)) {
+            auto [IID, FShiftArgs] = *Opt;
+            if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
+                FShiftArgs[0] == FShiftArgs[1])
+              return nullptr;
+          }
+        }
+      }
+
       // TODO: If we only want bits that already match the signbit then we don't
       // need to shift.
 
@@ -700,6 +713,19 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
     if (match(I->getOperand(1), m_APInt(SA))) {
       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
 
+      // Do not simplify if lshr is part of funnel-shift pattern
+      if (I->hasOneUse()) {
+        auto *Inst = dyn_cast<Instruction>(I->user_back());
+        if (Inst && Inst->getOpcode() == BinaryOperator::Or) {
+          if (auto Opt = convertOrOfShiftsToFunnelShift(*Inst)) {
+            auto [IID, FShiftArgs] = *Opt;
+            if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
+                FShiftArgs[0] == FShiftArgs[1])
+              return nullptr;
+          }
+        }
+      }
+
       // If we are just demanding the shifted sign bit and below, then this can
       // be treated as an ASHR in disguise.
       if (DemandedMask.countl_zero() >= ShiftAmt) {

diff  --git a/llvm/test/Transforms/InstCombine/fsh.ll b/llvm/test/Transforms/InstCombine/fsh.ll
index 6ab47febb71405..6c85169453d15a 100644
--- a/llvm/test/Transforms/InstCombine/fsh.ll
+++ b/llvm/test/Transforms/InstCombine/fsh.ll
@@ -726,7 +726,7 @@ define i32 @fsh_rotate_5(i8 %x, i32 %y) {
 ; CHECK-LABEL: @fsh_rotate_5(
 ; CHECK-NEXT:    [[T1:%.*]] = zext i8 [[X:%.*]] to i32
 ; CHECK-NEXT:    [[OR1:%.*]] = or i32 [[T1]], [[Y:%.*]]
-; CHECK-NEXT:    [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[Y]], i32 5)
+; CHECK-NEXT:    [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[OR1]], i32 5)
 ; CHECK-NEXT:    ret i32 [[OR2]]
 ;
 
@@ -742,7 +742,7 @@ define i32 @fsh_rotate_18(i8 %x, i32 %y) {
 ; CHECK-LABEL: @fsh_rotate_18(
 ; CHECK-NEXT:    [[T1:%.*]] = zext i8 [[X:%.*]] to i32
 ; CHECK-NEXT:    [[OR1:%.*]] = or i32 [[T1]], [[Y:%.*]]
-; CHECK-NEXT:    [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[Y]], i32 18)
+; CHECK-NEXT:    [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[OR1]], i32 18)
 ; CHECK-NEXT:    ret i32 [[OR2]]
 ;
 
@@ -769,12 +769,12 @@ define i32 @fsh_load_rotate_12(ptr %data) {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1
 ; CHECK-NEXT:    [[CONV5:%.*]] = zext i8 [[TMP2]] to i32
 ; CHECK-NEXT:    [[SHL6:%.*]] = shl nuw nsw i32 [[CONV5]], 8
+; CHECK-NEXT:    [[OR7:%.*]] = or disjoint i32 [[OR]], [[SHL6]]
 ; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[DATA]], i64 3
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1
 ; CHECK-NEXT:    [[CONV9:%.*]] = zext i8 [[TMP3]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = or disjoint i32 [[SHL6]], [[CONV9]]
-; CHECK-NEXT:    [[OR10:%.*]] = or disjoint i32 [[TMP4]], [[SHL3]]
-; CHECK-NEXT:    [[OR15:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR10]], i32 [[OR]], i32 12)
+; CHECK-NEXT:    [[OR10:%.*]] = or disjoint i32 [[OR7]], [[CONV9]]
+; CHECK-NEXT:    [[OR15:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR10]], i32 [[OR10]], i32 12)
 ; CHECK-NEXT:    ret i32 [[OR15]]
 ;
 
@@ -822,7 +822,7 @@ define i32 @fsh_load_rotate_25(ptr %data) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1
 ; CHECK-NEXT:    [[CONV9:%.*]] = zext i8 [[TMP3]] to i32
 ; CHECK-NEXT:    [[OR10:%.*]] = or disjoint i32 [[OR7]], [[CONV9]]
-; CHECK-NEXT:    [[OR15:%.*]] = call i32 @llvm.fshl.i32(i32 [[CONV9]], i32 [[OR10]], i32 25)
+; CHECK-NEXT:    [[OR15:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR10]], i32 [[OR10]], i32 25)
 ; CHECK-NEXT:    ret i32 [[OR15]]
 ;
 


        


More information about the llvm-commits mailing list