[llvm] 7eed772 - [PatternMatch] abbreviate vector inst matchers; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Sun May 24 06:21:04 PDT 2020


Author: Sanjay Patel
Date: 2020-05-24T09:19:47-04:00
New Revision: 7eed772a279e8cd45eee70cab2cfa71f71cc90c8

URL: https://github.com/llvm/llvm-project/commit/7eed772a279e8cd45eee70cab2cfa71f71cc90c8
DIFF: https://github.com/llvm/llvm-project/commit/7eed772a279e8cd45eee70cab2cfa71f71cc90c8.diff

LOG: [PatternMatch] abbreviate vector inst matchers; NFC

Readability is not reduced with these opcodes/match lines,
so reduce odds of awkward wrapping from 80-col limit.

Added: 
    

Modified: 
    llvm/include/llvm/IR/PatternMatch.h
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/lib/Analysis/VectorUtils.cpp
    llvm/lib/CodeGen/CodeGenPrepare.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/lib/Target/ARM/MVETailPredication.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
    llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
    llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
    llvm/lib/Transforms/Vectorize/VectorCombine.cpp
    llvm/unittests/IR/PatternMatch.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index c7ce2554ca95..feb2da000d81 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -1343,7 +1343,7 @@ inline OneOps_match<OpTy, Instruction::Freeze> m_Freeze(const OpTy &Op) {
 /// Matches InsertElementInst.
 template <typename Val_t, typename Elt_t, typename Idx_t>
 inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>
-m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
+m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
   return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>(
       Val, Elt, Idx);
 }
@@ -1351,7 +1351,7 @@ m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
 /// Matches ExtractElementInst.
 template <typename Val_t, typename Idx_t>
 inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>
-m_ExtractElement(const Val_t &Val, const Idx_t &Idx) {
+m_ExtractElt(const Val_t &Val, const Idx_t &Idx) {
   return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx);
 }
 
@@ -1410,13 +1410,13 @@ struct m_SplatOrUndefMask {
 /// Matches ShuffleVectorInst independently of mask value.
 template <typename V1_t, typename V2_t>
 inline TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>
-m_ShuffleVector(const V1_t &v1, const V2_t &v2) {
+m_Shuffle(const V1_t &v1, const V2_t &v2) {
   return TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>(v1, v2);
 }
 
 template <typename V1_t, typename V2_t, typename Mask_t>
 inline Shuffle_match<V1_t, V2_t, Mask_t>
-m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &mask) {
+m_Shuffle(const V1_t &v1, const V2_t &v2, const Mask_t &mask) {
   return Shuffle_match<V1_t, V2_t, Mask_t>(v1, v2, mask);
 }
 

diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index ef7f35c90861..15f5a9c672c8 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -4310,7 +4310,7 @@ Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
   // If we are extracting a value from a vector, then inserting it into the same
   // place, that's the input vector:
   // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
-  if (match(Val, m_ExtractElement(m_Specific(Vec), m_Specific(Idx))))
+  if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
     return Vec;
 
   return nullptr;
@@ -4570,8 +4570,8 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
   // known at compile time for scalable vectors
   Constant *C;
   ConstantInt *IndexC;
-  if (!Scalable && match(Op0, m_InsertElement(m_Value(), m_Constant(C),
-                                              m_ConstantInt(IndexC)))) {
+  if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
+                                          m_ConstantInt(IndexC)))) {
     // Match a splat shuffle mask of the insert index allowing undef elements.
     int InsertIndex = IndexC->getZExtValue();
     if (all_of(Indices, [InsertIndex](int MaskElt) {

diff  --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 2276c1fd843c..8a8bb19f3663 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -342,9 +342,9 @@ const llvm::Value *llvm::getSplatValue(const Value *V) {
 
   // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
   Value *Splat;
-  if (match(V, m_ShuffleVector(
-                   m_InsertElement(m_Value(), m_Value(Splat), m_ZeroInt()),
-                   m_Value(), m_ZeroMask())))
+  if (match(V,
+            m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
+                      m_Value(), m_ZeroMask())))
     return Splat;
 
   return nullptr;

diff  --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index e04fb2507571..d4c471afb3b6 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -6460,9 +6460,8 @@ bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
-  if (!match(SVI,
-             m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
-                             m_Undef(), m_ZeroMask())))
+  if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
+                            m_Undef(), m_ZeroMask())))
     return false;
   Type *NewType = TLI->shouldConvertSplatType(SVI);
   if (!NewType)

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6cddcf2a2fc1..856a2e4d9d67 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -9358,8 +9358,8 @@ static bool areExtractShuffleVectors(Value *Op1, Value *Op2) {
 
   ArrayRef<int> M1, M2;
   Value *S1Op1, *S2Op1;
-  if (!match(Op1, m_ShuffleVector(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
-      !match(Op2, m_ShuffleVector(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
+  if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
+      !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
     return false;
 
   // Check that the operands are half as wide as the result and we extract
@@ -9402,8 +9402,8 @@ static bool areExtractExts(Value *Ext1, Value *Ext2) {
 static bool isOperandOfVmullHighP64(Value *Op) {
   Value *VectorOperand = nullptr;
   ConstantInt *ElementIndex = nullptr;
-  return match(Op, m_ExtractElement(m_Value(VectorOperand),
-                                    m_ConstantInt(ElementIndex))) &&
+  return match(Op, m_ExtractElt(m_Value(VectorOperand),
+                                m_ConstantInt(ElementIndex))) &&
          ElementIndex->getValue() == 1 &&
          isa<FixedVectorType>(VectorOperand->getType()) &&
          cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2;

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 76773c74e394..c5c99610dd3a 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -15897,8 +15897,8 @@ bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
       Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0));
     // We are looking for a splat that can be sunk.
     if (!Shuffle ||
-        !match(Shuffle, m_ShuffleVector(
-                            m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
+        !match(Shuffle, m_Shuffle(
+                            m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
                             m_Undef(), m_ZeroMask())))
       continue;
     if (!IsSinker(I, OpIdx.index()))

diff  --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp
index e6bf433753ff..284d278c5cab 100644
--- a/llvm/lib/Target/ARM/MVETailPredication.cpp
+++ b/llvm/lib/Target/ARM/MVETailPredication.cpp
@@ -304,12 +304,12 @@ bool MVETailPredication::isTailPredicate(TripCountPattern &TCP) {
   Instruction *Insert = nullptr;
   // The shuffle which broadcasts the index iv into a vector.
   if (!match(BroadcastSplat,
-             m_ShuffleVector(m_Instruction(Insert), m_Undef(), m_ZeroMask())))
+             m_Shuffle(m_Instruction(Insert), m_Undef(), m_ZeroMask())))
     return false;
 
   // The insert element which initialises a vector with the index iv.
   Instruction *IV = nullptr;
-  if (!match(Insert, m_InsertElement(m_Undef(), m_Instruction(IV), m_Zero())))
+  if (!match(Insert, m_InsertElt(m_Undef(), m_Instruction(IV), m_Zero())))
     return false;
 
   // The index iv.
@@ -429,13 +429,13 @@ static bool MatchElemCountLoopSetup(Loop *L, Instruction *Shuffle,
   Instruction *Insert = nullptr;
 
   if (!match(Shuffle,
-             m_ShuffleVector(m_Instruction(Insert), m_Undef(), m_ZeroMask())))
+             m_Shuffle(m_Instruction(Insert), m_Undef(), m_ZeroMask())))
     return false;
 
   // Insert the limit into a vector.
   Instruction *BECount = nullptr;
   if (!match(Insert,
-             m_InsertElement(m_Undef(), m_Instruction(BECount), m_Zero())))
+             m_InsertElt(m_Undef(), m_Instruction(BECount), m_Zero())))
     return false;
 
   // The limit calculation, backedge count.

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index fa8e9e6d9f26..44ca3613ae4d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -854,8 +854,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
   //   --->
   //   extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0
   Value *VecOp;
-  if (match(Src,
-            m_OneUse(m_ExtractElement(m_Value(VecOp), m_ConstantInt(Cst))))) {
+  if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) {
     auto *VecOpTy = cast<VectorType>(VecOp->getType());
     unsigned DestScalarSize = DestTy->getScalarSizeInBits();
     unsigned VecOpScalarSize = VecOpTy->getScalarSizeInBits();

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 3a9a46f8ca52..8f50358d1d3d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2826,7 +2826,7 @@ static Instruction *foldICmpBitCast(ICmpInst &Cmp,
 
   Value *Vec;
   ArrayRef<int> Mask;
-  if (match(BCSrcOp, m_ShuffleVector(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
+  if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
     // Check whether every element of Mask is the same constant
     if (is_splat(Mask)) {
       auto *VecTy = cast<VectorType>(BCSrcOp->getType());
@@ -5393,14 +5393,14 @@ static Instruction *foldVectorCmp(CmpInst &Cmp,
 
   Value *V1, *V2;
   ArrayRef<int> M;
-  if (!match(LHS, m_ShuffleVector(m_Value(V1), m_Undef(), m_Mask(M))))
+  if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
     return nullptr;
 
   // If both arguments of the cmp are shuffles that use the same mask and
   // shuffle within a single vector, move the shuffle after the cmp:
   // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
   Type *V1Ty = V1->getType();
-  if (match(RHS, m_ShuffleVector(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
+  if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
       V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
     Value *NewCmp = IsFP ? Builder.CreateFCmp(Pred, V1, V2)
                          : Builder.CreateICmp(Pred, V1, V2);

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index a43a026f05e2..47297e0b4d4c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -2009,7 +2009,7 @@ static Instruction *canonicalizeScalarSelectOfVecs(
 
   // We can replace a single-use extract with constant index.
   Value *Cond = Sel.getCondition();
-  if (!match(Cond, m_OneUse(m_ExtractElement(m_Value(), m_ConstantInt()))))
+  if (!match(Cond, m_OneUse(m_ExtractElt(m_Value(), m_ConstantInt()))))
     return nullptr;
 
   // select (extelt V, Index), T, F --> select (splat V, Index), T, F

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index d7c6db2051f4..430f2f4de3ac 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -58,7 +58,7 @@ static bool cheapToScalarize(Value *V, bool IsConstantExtractIndex) {
   // An insertelement to the same constant index as our extract will simplify
   // to the scalar inserted element. An insertelement to a 
diff erent constant
   // index is irrelevant to our extract.
-  if (match(V, m_InsertElement(m_Value(), m_Value(), m_ConstantInt())))
+  if (match(V, m_InsertElt(m_Value(), m_Value(), m_ConstantInt())))
     return IsConstantExtractIndex;
 
   if (match(V, m_OneUse(m_Load(m_Value()))))
@@ -189,8 +189,8 @@ static Instruction *foldBitcastExtElt(ExtractElementInst &Ext,
   if (NumSrcElts < NumElts) {
     Value *Scalar;
     uint64_t InsIndexC;
-    if (!match(X, m_InsertElement(m_Value(), m_Value(Scalar),
-                                  m_ConstantInt(InsIndexC))))
+    if (!match(X, m_InsertElt(m_Value(), m_Value(Scalar),
+                              m_ConstantInt(InsIndexC))))
       return nullptr;
 
     // The extract must be from the subset of vector elements that we inserted
@@ -847,7 +847,7 @@ static Instruction *foldInsEltIntoSplat(InsertElementInst &InsElt) {
   // Check if the splat shuffle's input is the same as this insert's scalar op.
   Value *X = InsElt.getOperand(1);
   Value *Op0 = Shuf->getOperand(0);
-  if (!match(Op0, m_InsertElement(m_Undef(), m_Specific(X), m_ZeroInt())))
+  if (!match(Op0, m_InsertElt(m_Undef(), m_Specific(X), m_ZeroInt())))
     return nullptr;
 
   // Replace the shuffle mask element at the index of this insert with a zero.
@@ -885,7 +885,7 @@ static Instruction *foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt) {
   // input vector.
   Value *Scalar = InsElt.getOperand(1);
   Value *X = Shuf->getOperand(0);
-  if (!match(Scalar, m_ExtractElement(m_Specific(X), m_SpecificInt(IdxC))))
+  if (!match(Scalar, m_ExtractElt(m_Specific(X), m_SpecificInt(IdxC))))
     return nullptr;
 
   // Replace the shuffle mask element at the index of this extract+insert with
@@ -1091,7 +1091,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
   if (isa<FixedVectorType>(IE.getType()) &&
       match(IdxOp, m_ConstantInt(InsertedIdx)) &&
       match(ScalarOp,
-            m_ExtractElement(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) &&
+            m_ExtractElt(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) &&
       isa<FixedVectorType>(ExtVecOp->getType()) &&
       ExtractedIdx <
           cast<FixedVectorType>(ExtVecOp->getType())->getNumElements()) {
@@ -1553,8 +1553,8 @@ static Instruction *canonicalizeInsertSplat(ShuffleVectorInst &Shuf,
   uint64_t IndexC;
 
   // Match a shuffle that is a splat to a non-zero element.
-  if (!match(Op0, m_OneUse(m_InsertElement(m_Undef(), m_Value(X),
-                                           m_ConstantInt(IndexC)))) ||
+  if (!match(Op0, m_OneUse(m_InsertElt(m_Undef(), m_Value(X),
+                                       m_ConstantInt(IndexC)))) ||
       !match(Op1, m_Undef()) || match(Mask, m_ZeroMask()) || IndexC == 0)
     return nullptr;
 
@@ -1766,7 +1766,7 @@ static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
   // and have the same number of elements as this shuffle.
   unsigned NarrowNumElts = Shuf.getType()->getNumElements();
   Value *NarrowCond;
-  if (!match(Cond, m_OneUse(m_ShuffleVector(m_Value(NarrowCond), m_Undef()))) ||
+  if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Undef()))) ||
       cast<VectorType>(NarrowCond->getType())->getNumElements() !=
           NarrowNumElts ||
       !cast<ShuffleVectorInst>(Cond)->isIdentityWithPadding())
@@ -1788,7 +1788,7 @@ static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
 
   Value *X, *Y;
   ArrayRef<int> Mask;
-  if (!match(Op0, m_ShuffleVector(m_Value(X), m_Value(Y), m_Mask(Mask))))
+  if (!match(Op0, m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask))))
     return nullptr;
 
   // Be conservative with shuffle transforms. If we can't kill the 1st shuffle,
@@ -1842,12 +1842,12 @@ static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf,
   // operand with the source vector of the insertelement.
   Value *X;
   uint64_t IdxC;
-  if (match(V0, m_InsertElement(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
+  if (match(V0, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
     // shuf (inselt X, ?, IdxC), ?, Mask --> shuf X, ?, Mask
     if (none_of(Mask, [IdxC](int MaskElt) { return MaskElt == (int)IdxC; }))
       return IC.replaceOperand(Shuf, 0, X);
   }
-  if (match(V1, m_InsertElement(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
+  if (match(V1, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
     // Offset the index constant by the vector width because we are checking for
     // accesses to the 2nd vector input of the shuffle.
     IdxC += NumElts;
@@ -1859,8 +1859,8 @@ static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf,
   // shuffle (insert ?, Scalar, IndexC), V1, Mask --> insert V1, Scalar, IndexC'
   auto isShufflingScalarIntoOp1 = [&](Value *&Scalar, ConstantInt *&IndexC) {
     // We need an insertelement with a constant index.
-    if (!match(V0, m_InsertElement(m_Value(), m_Value(Scalar),
-                                   m_ConstantInt(IndexC))))
+    if (!match(V0, m_InsertElt(m_Value(), m_Value(Scalar),
+                               m_ConstantInt(IndexC))))
       return false;
 
     // Test the shuffle mask to see if it splices the inserted scalar into the

diff  --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 14076ab78e8e..3b405ee6f86e 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1445,9 +1445,8 @@ Instruction *InstCombiner::foldVectorBinop(BinaryOperator &Inst) {
   // of the results.
   Value *L0, *L1, *R0, *R1;
   ArrayRef<int> Mask;
-  if (match(LHS, m_ShuffleVector(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
-      match(RHS,
-            m_ShuffleVector(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
+  if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
+      match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
       LHS->hasOneUse() && RHS->hasOneUse() &&
       cast<ShuffleVectorInst>(LHS)->isConcat() &&
       cast<ShuffleVectorInst>(RHS)->isConcat()) {
@@ -1481,9 +1480,8 @@ Instruction *InstCombiner::foldVectorBinop(BinaryOperator &Inst) {
   // If both arguments of the binary operation are shuffles that use the same
   // mask and shuffle within a single vector, move the shuffle after the binop.
   Value *V1, *V2;
-  if (match(LHS, m_ShuffleVector(m_Value(V1), m_Undef(), m_Mask(Mask))) &&
-      match(RHS,
-            m_ShuffleVector(m_Value(V2), m_Undef(), m_SpecificMask(Mask))) &&
+  if (match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))) &&
+      match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(Mask))) &&
       V1->getType() == V2->getType() &&
       (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
     // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
@@ -1493,9 +1491,9 @@ Instruction *InstCombiner::foldVectorBinop(BinaryOperator &Inst) {
   // If both arguments of a commutative binop are select-shuffles that use the
   // same mask with commuted operands, the shuffles are unnecessary.
   if (Inst.isCommutative() &&
-      match(LHS, m_ShuffleVector(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
-      match(RHS, m_ShuffleVector(m_Specific(V2), m_Specific(V1),
-                                 m_SpecificMask(Mask)))) {
+      match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
+      match(RHS,
+            m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
     auto *LShuf = cast<ShuffleVectorInst>(LHS);
     auto *RShuf = cast<ShuffleVectorInst>(RHS);
     // TODO: Allow shuffles that contain undefs in the mask?
@@ -1523,9 +1521,9 @@ Instruction *InstCombiner::foldVectorBinop(BinaryOperator &Inst) {
   // transforms.
   unsigned NumElts = cast<FixedVectorType>(Inst.getType())->getNumElements();
   Constant *C;
-  if (match(&Inst, m_c_BinOp(m_OneUse(m_ShuffleVector(m_Value(V1), m_Undef(),
-                                                      m_Mask(Mask))),
-                             m_Constant(C))) &&
+  if (match(&Inst,
+            m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))),
+                      m_Constant(C))) &&
       cast<FixedVectorType>(V1->getType())->getNumElements() <= NumElts) {
     assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
            "Shuffle should not change scalar type");
@@ -1605,8 +1603,8 @@ Instruction *InstCombiner::foldVectorBinop(BinaryOperator &Inst) {
     ArrayRef<int> MaskC;
     int SplatIndex;
     BinaryOperator *BO;
-    if (!match(LHS, m_OneUse(m_ShuffleVector(m_Value(X), m_Undef(),
-                                             m_Mask(MaskC)))) ||
+    if (!match(LHS,
+               m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
         !match(MaskC, m_SplatOrUndefMask(SplatIndex)) ||
         X->getType() != Inst.getType() || !match(RHS, m_OneUse(m_BinOp(BO))) ||
         BO->getOpcode() != Opcode)

diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 5627fa42dfc6..1657b9e90115 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -843,8 +843,8 @@ class BoUpSLP {
       // the extracts could be optimized away.
       Value *EV;
       ConstantInt *Ex1Idx, *Ex2Idx;
-      if (match(V1, m_ExtractElement(m_Value(EV), m_ConstantInt(Ex1Idx))) &&
-          match(V2, m_ExtractElement(m_Deferred(EV), m_ConstantInt(Ex2Idx))) &&
+      if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) &&
+          match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) &&
           Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue())
         return VLOperands::ScoreConsecutiveExtracts;
 

diff  --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 8001b076e74c..c08aac080931 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -211,8 +211,8 @@ static bool foldExtractExtract(Instruction &I, const TargetTransformInfo &TTI) {
 
   Value *V0, *V1;
   uint64_t C0, C1;
-  if (!match(Ext0, m_ExtractElement(m_Value(V0), m_ConstantInt(C0))) ||
-      !match(Ext1, m_ExtractElement(m_Value(V1), m_ConstantInt(C1))) ||
+  if (!match(Ext0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
+      !match(Ext1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
       V0->getType() != V1->getType())
     return false;
 
@@ -223,8 +223,8 @@ static bool foldExtractExtract(Instruction &I, const TargetTransformInfo &TTI) {
   //       probably becomes unnecessary.
   uint64_t InsertIndex = std::numeric_limits<uint64_t>::max();
   if (I.hasOneUse())
-    match(I.user_back(), m_InsertElement(m_Value(), m_Value(),
-                                         m_ConstantInt(InsertIndex)));
+    match(I.user_back(),
+          m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
 
   Instruction *ConvertToShuffle;
   if (isExtractExtractCheap(Ext0, Ext1, I.getOpcode(), TTI, ConvertToShuffle,
@@ -266,8 +266,8 @@ static bool foldExtractExtract(Instruction &I, const TargetTransformInfo &TTI) {
 static bool foldBitcastShuf(Instruction &I, const TargetTransformInfo &TTI) {
   Value *V;
   ArrayRef<int> Mask;
-  if (!match(&I, m_BitCast(m_OneUse(m_ShuffleVector(m_Value(V), m_Undef(),
-                                                    m_Mask(Mask))))))
+  if (!match(&I, m_BitCast(
+                     m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
     return false;
 
   // Disallow non-vector casts and length-changing shuffles.
@@ -303,8 +303,8 @@ static bool foldBitcastShuf(Instruction &I, const TargetTransformInfo &TTI) {
   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
   IRBuilder<> Builder(&I);
   Value *CastV = Builder.CreateBitCast(V, DestTy);
-  Value *Shuf = Builder.CreateShuffleVector(CastV, UndefValue::get(DestTy),
-                                            NewMask);
+  Value *Shuf =
+      Builder.CreateShuffleVector(CastV, UndefValue::get(DestTy), NewMask);
   I.replaceAllUsesWith(Shuf);
   return true;
 }
@@ -320,10 +320,10 @@ static bool scalarizeBinop(Instruction &I, const TargetTransformInfo &TTI) {
   Constant *VecC0, *VecC1;
   Value *V0, *V1;
   uint64_t Index;
-  if (!match(Ins0, m_InsertElement(m_Constant(VecC0), m_Value(V0),
-                                   m_ConstantInt(Index))) ||
-      !match(Ins1, m_InsertElement(m_Constant(VecC1), m_Value(V1),
-                                   m_SpecificInt(Index))))
+  if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
+                               m_ConstantInt(Index))) ||
+      !match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
+                               m_SpecificInt(Index))))
     return false;
 
   Type *ScalarTy = V0->getType();

diff  --git a/llvm/unittests/IR/PatternMatch.cpp b/llvm/unittests/IR/PatternMatch.cpp
index 186746bca791..2a47e24f11d1 100644
--- a/llvm/unittests/IR/PatternMatch.cpp
+++ b/llvm/unittests/IR/PatternMatch.cpp
@@ -962,39 +962,38 @@ TEST_F(PatternMatchTest, VectorOps) {
   Value *A = nullptr, *B = nullptr, *C = nullptr;
 
   // Test matching insertelement
-  EXPECT_TRUE(match(VI1, m_InsertElement(m_Value(), m_Value(), m_Value())));
+  EXPECT_TRUE(match(VI1, m_InsertElt(m_Value(), m_Value(), m_Value())));
   EXPECT_TRUE(
-      match(VI1, m_InsertElement(m_Undef(), m_ConstantInt(), m_ConstantInt())));
+      match(VI1, m_InsertElt(m_Undef(), m_ConstantInt(), m_ConstantInt())));
   EXPECT_TRUE(
-      match(VI1, m_InsertElement(m_Undef(), m_ConstantInt(), m_Zero())));
+      match(VI1, m_InsertElt(m_Undef(), m_ConstantInt(), m_Zero())));
   EXPECT_TRUE(
-      match(VI1, m_InsertElement(m_Undef(), m_SpecificInt(1), m_Zero())));
-  EXPECT_TRUE(match(VI2, m_InsertElement(m_Value(), m_Value(), m_Value())));
+      match(VI1, m_InsertElt(m_Undef(), m_SpecificInt(1), m_Zero())));
+  EXPECT_TRUE(match(VI2, m_InsertElt(m_Value(), m_Value(), m_Value())));
   EXPECT_FALSE(
-      match(VI2, m_InsertElement(m_Value(), m_Value(), m_ConstantInt())));
+      match(VI2, m_InsertElt(m_Value(), m_Value(), m_ConstantInt())));
   EXPECT_FALSE(
-      match(VI2, m_InsertElement(m_Value(), m_ConstantInt(), m_Value())));
-  EXPECT_FALSE(match(VI2, m_InsertElement(m_Constant(), m_Value(), m_Value())));
-  EXPECT_TRUE(match(VI3, m_InsertElement(m_Value(A), m_Value(B), m_Value(C))));
+      match(VI2, m_InsertElt(m_Value(), m_ConstantInt(), m_Value())));
+  EXPECT_FALSE(match(VI2, m_InsertElt(m_Constant(), m_Value(), m_Value())));
+  EXPECT_TRUE(match(VI3, m_InsertElt(m_Value(A), m_Value(B), m_Value(C))));
   EXPECT_TRUE(A == VI1);
   EXPECT_TRUE(B == Val2);
   EXPECT_TRUE(isa<ConstantInt>(C));
   A = B = C = nullptr; // reset
 
   // Test matching extractelement
-  EXPECT_TRUE(match(EX1, m_ExtractElement(m_Value(A), m_Value(B))));
+  EXPECT_TRUE(match(EX1, m_ExtractElt(m_Value(A), m_Value(B))));
   EXPECT_TRUE(A == VI4);
   EXPECT_TRUE(B == Val);
   A = B = C = nullptr; // reset
-  EXPECT_FALSE(match(EX1, m_ExtractElement(m_Value(), m_ConstantInt())));
-  EXPECT_TRUE(match(EX2, m_ExtractElement(m_Value(), m_ConstantInt())));
-  EXPECT_TRUE(match(EX3, m_ExtractElement(m_Constant(), m_ConstantInt())));
+  EXPECT_FALSE(match(EX1, m_ExtractElt(m_Value(), m_ConstantInt())));
+  EXPECT_TRUE(match(EX2, m_ExtractElt(m_Value(), m_ConstantInt())));
+  EXPECT_TRUE(match(EX3, m_ExtractElt(m_Constant(), m_ConstantInt())));
 
   // Test matching shufflevector
   ArrayRef<int> Mask;
-  EXPECT_TRUE(match(SI1, m_ShuffleVector(m_Value(), m_Undef(), m_ZeroMask())));
-  EXPECT_TRUE(
-      match(SI2, m_ShuffleVector(m_Value(A), m_Value(B), m_Mask(Mask))));
+  EXPECT_TRUE(match(SI1, m_Shuffle(m_Value(), m_Undef(), m_ZeroMask())));
+  EXPECT_TRUE(match(SI2, m_Shuffle(m_Value(A), m_Value(B), m_Mask(Mask))));
   EXPECT_TRUE(A == VI3);
   EXPECT_TRUE(B == VI4);
   A = B = C = nullptr; // reset
@@ -1002,21 +1001,21 @@ TEST_F(PatternMatchTest, VectorOps) {
   // Test matching the vector splat pattern
   EXPECT_TRUE(match(
       SI1,
-      m_ShuffleVector(m_InsertElement(m_Undef(), m_SpecificInt(1), m_Zero()),
-                      m_Undef(), m_ZeroMask())));
+      m_Shuffle(m_InsertElt(m_Undef(), m_SpecificInt(1), m_Zero()),
+                m_Undef(), m_ZeroMask())));
   EXPECT_FALSE(match(
-      SI3, m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(), m_Zero()),
-                           m_Undef(), m_ZeroMask())));
+      SI3, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_Zero()),
+                     m_Undef(), m_ZeroMask())));
   EXPECT_FALSE(match(
-      SI4, m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(), m_Zero()),
-                           m_Undef(), m_ZeroMask())));
+      SI4, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_Zero()),
+                     m_Undef(), m_ZeroMask())));
   EXPECT_TRUE(match(
       SP1,
-      m_ShuffleVector(m_InsertElement(m_Undef(), m_SpecificInt(2), m_Zero()),
-                      m_Undef(), m_ZeroMask())));
+      m_Shuffle(m_InsertElt(m_Undef(), m_SpecificInt(2), m_Zero()),
+                m_Undef(), m_ZeroMask())));
   EXPECT_TRUE(match(
-      SP2, m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(A), m_Zero()),
-                           m_Undef(), m_ZeroMask())));
+      SP2, m_Shuffle(m_InsertElt(m_Undef(), m_Value(A), m_Zero()),
+                     m_Undef(), m_ZeroMask())));
   EXPECT_TRUE(A == Val);
 }
 


        


More information about the llvm-commits mailing list