[llvm] 04c44c9 - [X86] foldMaskAndShiftToScale - use isShiftedMask_64 directly instead of separate LZ/TZ counting logic

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 24 01:51:13 PDT 2023


Author: Simon Pilgrim
Date: 2023-08-24T09:51:05+01:00
New Revision: 04c44c9359ae1d1303316803566e0af0295b43ce

URL: https://github.com/llvm/llvm-project/commit/04c44c9359ae1d1303316803566e0af0295b43ce
DIFF: https://github.com/llvm/llvm-project/commit/04c44c9359ae1d1303316803566e0af0295b43ce.diff

LOG: [X86] foldMaskAndShiftToScale - use isShiftedMask_64 directly instead of separate LZ/TZ counting logic

I've updated foldMaskedShiftToBEXTR as well to use matching code.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelDAGToDAG.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 1784a8103a66bb..9f057839e7dbf6 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2067,22 +2067,22 @@ static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
       !isa<ConstantSDNode>(Shift.getOperand(1)))
     return true;
 
+  // We need to ensure that mask is a continuous run of bits.
+  unsigned MaskIdx, MaskLen;
+  if (!isShiftedMask_64(Mask, MaskIdx, MaskLen))
+    return true;
+  unsigned MaskLZ = 64 - (MaskIdx + MaskLen);
+
   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
-  unsigned MaskLZ = llvm::countl_zero(Mask);
-  unsigned MaskTZ = llvm::countr_zero(Mask);
 
   // The amount of shift we're trying to fit into the addressing mode is taken
-  // from the trailing zeros of the mask.
-  unsigned AMShiftAmt = MaskTZ;
+  // from the shifted mask index (number of trailing zeros of the mask).
+  unsigned AMShiftAmt = MaskIdx;
 
   // There is nothing we can do here unless the mask is removing some bits.
   // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
   if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
 
-  // We also need to ensure that mask is a continuous run of bits.
-  if (llvm::countr_one(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64)
-    return true;
-
   // Scale the leading zero count down based on the actual size of the value.
   // Also scale it down based on the size of the shift.
   unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
@@ -2163,13 +2163,15 @@ static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N,
     return true;
 
   // We need to ensure that mask is a continuous run of bits.
-  if (!isShiftedMask_64(Mask)) return true;
+  unsigned MaskIdx, MaskLen;
+  if (!isShiftedMask_64(Mask, MaskIdx, MaskLen))
+    return true;
 
   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
 
   // The amount of shift we're trying to fit into the addressing mode is taken
-  // from the trailing zeros of the mask.
-  unsigned AMShiftAmt = llvm::countr_zero(Mask);
+  // from the shifted mask index (number of trailing zeros of the mask).
+  unsigned AMShiftAmt = MaskIdx;
 
   // There is nothing we can do here unless the mask is removing some bits.
   // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.


        


More information about the llvm-commits mailing list