[llvm] 341443d - [InstCombine] Fold (-a >> b) and/or/xor (~a >> b) into (-a and/or/xor ~a) >> b

Maksim Kita via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 21 02:50:08 PDT 2023


Author: Maksim Kita
Date: 2023-08-21T12:49:20+03:00
New Revision: 341443d731af67fbcc9418a3b5f8388a6325324e

URL: https://github.com/llvm/llvm-project/commit/341443d731af67fbcc9418a3b5f8388a6325324e
DIFF: https://github.com/llvm/llvm-project/commit/341443d731af67fbcc9418a3b5f8388a6325324e.diff

LOG: [InstCombine] Fold (-a >> b) and/or/xor (~a >> b) into (-a and/or/xor ~a) >> b

Fold (-a >> b) and/or/xor (~a >> b) into (-a and/or/xor ~a) >> b.
Depends on D157289.

Differential Revision: https://reviews.llvm.org/D157290

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/test/Transforms/InstCombine/binop-and-shifts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 09baa695096466..7acee644dfc75f 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -751,6 +751,14 @@ static Value *tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ,
 //    2) BinOp1 == BinOp2 (if BinOp ==  `add`, then also requires `shl`).
 //
 //    -> (BinOp (logic_shift (BinOp X, Y)), Mask)
+//
+// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
+//   IFF
+//   1) Binop1 is bitwise logical operator `and`, `or` or `xor`
+//   2) Binop2 is `not`
+//
+//   -> (arithmetic_shift Binop1((not X), Y), Amt)
+
 Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {
   auto IsValidBinOpc = [](unsigned Opc) {
     switch (Opc) {
@@ -770,11 +778,13 @@ Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {
   // constraints.
   auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
                                       unsigned ShOpc) {
+    assert(ShOpc != Instruction::AShr);
     return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
            ShOpc == Instruction::Shl;
   };
 
   auto GetInvShift = [](unsigned ShOpc) {
+    assert(ShOpc != Instruction::AShr);
     return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
   };
 
@@ -807,14 +817,13 @@ Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {
     Constant *CMask, *CShift;
     Value *X, *Y, *ShiftedX, *Mask, *Shift;
     if (!match(I.getOperand(ShOpnum),
-               m_OneUse(m_LogicalShift(m_Value(Y), m_Value(Shift)))))
+               m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
       return nullptr;
     if (!match(I.getOperand(1 - ShOpnum),
                m_BinOp(m_Value(ShiftedX), m_Value(Mask))))
       return nullptr;
 
-    if (!match(ShiftedX,
-               m_OneUse(m_LogicalShift(m_Value(X), m_Specific(Shift)))))
+    if (!match(ShiftedX, m_OneUse(m_Shift(m_Value(X), m_Specific(Shift)))))
       return nullptr;
 
     // Make sure we are matching instruction shifts and not ConstantExpr
@@ -838,6 +847,18 @@ Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {
     if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
       return nullptr;
 
+    if (ShOpc == Instruction::AShr) {
+      if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
+          BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
+        Value *NotX = Builder.CreateNot(X);
+        Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
+        return BinaryOperator::Create(
+            static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
+      }
+
+      return nullptr;
+    }
+
     // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
     // distribute to drop the shift irrelevant of constants.
     if (BinOpc == I.getOpcode() &&

diff  --git a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll
index d2fa11b0d433c7..45fd87be3c3318 100644
--- a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll
+++ b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll
@@ -554,10 +554,9 @@ define i8 @shl_add_and_fail_mismatch_shift(i8 %x, i8 %y) {
 
 define i8 @and_ashr_not(i8 %x, i8 %y, i8 %shamt) {
 ; CHECK-LABEL: @and_ashr_not(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor i8 [[Y_SHIFT]], -1
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret i8 [[AND]]
 ;
   %x.shift = ashr i8 %x, %shamt
@@ -569,10 +568,9 @@ define i8 @and_ashr_not(i8 %x, i8 %y, i8 %shamt) {
 
 define i8 @and_ashr_not_commuted(i8 %x, i8 %y, i8 %shamt) {
 ; CHECK-LABEL: @and_ashr_not_commuted(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor i8 [[Y_SHIFT]], -1
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret i8 [[AND]]
 ;
   %x.shift = ashr i8 %x, %shamt
@@ -635,10 +633,9 @@ define i8 @and_ashr_not_fail_invalid_xor_constant(i8 %x, i8 %y, i8 %shamt) {
 
 define <4 x i8> @and_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @and_ashr_not_vec(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr <4 x i8> [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr <4 x i8> [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor <4 x i8> [[Y_SHIFT]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT:    [[AND:%.*]] = and <4 x i8> [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret <4 x i8> [[AND]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -650,10 +647,9 @@ define <4 x i8> @and_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 
 define <4 x i8> @and_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @and_ashr_not_vec_commuted(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr <4 x i8> [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr <4 x i8> [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor <4 x i8> [[Y_SHIFT]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT:    [[AND:%.*]] = and <4 x i8> [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret <4 x i8> [[AND]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -665,10 +661,9 @@ define <4 x i8> @and_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s
 
 define <4 x i8> @and_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @and_ashr_not_vec_undef_1(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr <4 x i8> [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr <4 x i8> [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor <4 x i8> [[Y_SHIFT]], <i8 -1, i8 undef, i8 undef, i8 undef>
-; CHECK-NEXT:    [[AND:%.*]] = and <4 x i8> [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret <4 x i8> [[AND]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -693,10 +688,9 @@ define <4 x i8> @and_ashr_not_vec_undef_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sh
 
 define i8 @or_ashr_not(i8 %x, i8 %y, i8 %shamt) {
 ; CHECK-LABEL: @or_ashr_not(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor i8 [[Y_SHIFT]], -1
-; CHECK-NEXT:    [[OR:%.*]] = or i8 [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[OR:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret i8 [[OR]]
 ;
   %x.shift = ashr i8 %x, %shamt
@@ -708,10 +702,9 @@ define i8 @or_ashr_not(i8 %x, i8 %y, i8 %shamt) {
 
 define i8 @or_ashr_not_commuted(i8 %x, i8 %y, i8 %shamt) {
 ; CHECK-LABEL: @or_ashr_not_commuted(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor i8 [[Y_SHIFT]], -1
-; CHECK-NEXT:    [[OR:%.*]] = or i8 [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[OR:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret i8 [[OR]]
 ;
   %x.shift = ashr i8 %x, %shamt
@@ -774,10 +767,9 @@ define i8 @or_ashr_not_fail_invalid_xor_constant(i8 %x, i8 %y, i8 %shamt) {
 
 define <4 x i8> @or_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @or_ashr_not_vec(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr <4 x i8> [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr <4 x i8> [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor <4 x i8> [[Y_SHIFT]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT:    [[OR:%.*]] = or <4 x i8> [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret <4 x i8> [[OR]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -789,10 +781,9 @@ define <4 x i8> @or_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 
 define <4 x i8> @or_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @or_ashr_not_vec_commuted(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr <4 x i8> [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr <4 x i8> [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor <4 x i8> [[Y_SHIFT]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT:    [[OR:%.*]] = or <4 x i8> [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret <4 x i8> [[OR]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -804,10 +795,9 @@ define <4 x i8> @or_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sh
 
 define <4 x i8> @or_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @or_ashr_not_vec_undef_1(
-; CHECK-NEXT:    [[X_SHIFT:%.*]] = ashr <4 x i8> [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[Y_SHIFT:%.*]] = ashr <4 x i8> [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT:    [[Y_SHIFT_NOT:%.*]] = xor <4 x i8> [[Y_SHIFT]], <i8 -1, i8 undef, i8 undef, i8 undef>
-; CHECK-NEXT:    [[OR:%.*]] = or <4 x i8> [[X_SHIFT]], [[Y_SHIFT_NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
 ; CHECK-NEXT:    ret <4 x i8> [[OR]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -832,9 +822,9 @@ define <4 x i8> @or_ashr_not_vec_undef_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sha
 
 define i8 @xor_ashr_not(i8 %x, i8 %y, i8 %shamt) {
 ; CHECK-LABEL: @xor_ashr_not(
-; CHECK-NEXT:    [[Y_SHIFT1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr i8 [[Y_SHIFT1]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[TMP1]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[DOTNOT:%.*]] = ashr i8 [[TMP1]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[DOTNOT]], -1
 ; CHECK-NEXT:    ret i8 [[XOR]]
 ;
   %x.shift = ashr i8 %x, %shamt
@@ -846,9 +836,9 @@ define i8 @xor_ashr_not(i8 %x, i8 %y, i8 %shamt) {
 
 define i8 @xor_ashr_not_commuted(i8 %x, i8 %y, i8 %shamt) {
 ; CHECK-LABEL: @xor_ashr_not_commuted(
-; CHECK-NEXT:    [[Y_SHIFT1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr i8 [[Y_SHIFT1]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[TMP1]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[DOTNOT:%.*]] = ashr i8 [[TMP1]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[DOTNOT]], -1
 ; CHECK-NEXT:    ret i8 [[XOR]]
 ;
   %x.shift = ashr i8 %x, %shamt
@@ -910,9 +900,9 @@ define i8 @xor_ashr_not_fail_invalid_xor_constant(i8 %x, i8 %y, i8 %shamt) {
 
 define <4 x i8> @xor_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @xor_ashr_not_vec(
-; CHECK-NEXT:    [[Y_SHIFT1:%.*]] = xor <4 x i8> [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i8> [[Y_SHIFT1]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[DOTNOT:%.*]] = ashr <4 x i8> [[TMP1]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i8> [[DOTNOT]], <i8 -1, i8 -1, i8 -1, i8 -1>
 ; CHECK-NEXT:    ret <4 x i8> [[XOR]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -924,9 +914,9 @@ define <4 x i8> @xor_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 
 define <4 x i8> @xor_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @xor_ashr_not_vec_commuted(
-; CHECK-NEXT:    [[Y_SHIFT1:%.*]] = xor <4 x i8> [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i8> [[Y_SHIFT1]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1>
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[DOTNOT:%.*]] = ashr <4 x i8> [[TMP1]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i8> [[DOTNOT]], <i8 -1, i8 -1, i8 -1, i8 -1>
 ; CHECK-NEXT:    ret <4 x i8> [[XOR]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt
@@ -938,9 +928,9 @@ define <4 x i8> @xor_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s
 
 define <4 x i8> @xor_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
 ; CHECK-LABEL: @xor_ashr_not_vec_undef_1(
-; CHECK-NEXT:    [[Y_SHIFT1:%.*]] = xor <4 x i8> [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr <4 x i8> [[Y_SHIFT1]], [[SHAMT:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i8> [[TMP1]], <i8 -1, i8 undef, i8 undef, i8 undef>
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[DOTNOT:%.*]] = ashr <4 x i8> [[TMP1]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i8> [[DOTNOT]], <i8 -1, i8 -1, i8 -1, i8 -1>
 ; CHECK-NEXT:    ret <4 x i8> [[XOR]]
 ;
   %x.shift = ashr <4 x i8> %x, %shamt


        


More information about the llvm-commits mailing list