[llvm] ccf1a5f - [InstCombine] dropRedundantMaskingOfLeftShiftInput(): truncation (PR42563)

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 5 01:41:49 PST 2019


Author: Roman Lebedev
Date: 2019-11-05T12:41:26+03:00
New Revision: ccf1a5f4bbe680f20e26c29774d62bec6cb226da

URL: https://github.com/llvm/llvm-project/commit/ccf1a5f4bbe680f20e26c29774d62bec6cb226da
DIFF: https://github.com/llvm/llvm-project/commit/ccf1a5f4bbe680f20e26c29774d62bec6cb226da.diff

LOG: [InstCombine] dropRedundantMaskingOfLeftShiftInput(): truncation (PR42563)

Summary:
That fold keeps growing and growing :(
I think this may be one of the last pieces for it.

Since D67677/D67725, the fold knowns the general form
of the pattern - where some masking is needed:
https://rise4fun.com/Alive/F5R
https://rise4fun.com/Alive/gslRa

But there is one more huge piece missing - if you are extracting some bits,
it is not impossible that the origin is wider than the extraction,
i.e. there may be a truncation. And we don't deal with that yet.

But we can, and the generalization remains fully identical:
https://rise4fun.com/Alive/Uar
https://rise4fun.com/Alive/5SW

After a preparatory cleanup i think the diff looks rather clean.

One missing piece is that in some patterns (especially pat. b),
`-1` only needs to be `-1` in final type, but that is for later..

https://bugs.llvm.org/show_bug.cgi?id=42563

Reviewers: spatel, nikic

Reviewed By: spatel

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D69125

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
    llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll
    llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
    llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll
    llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
    llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll
    llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll
    llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
    llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll
    llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
    llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll
    llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index d28601ff9e29..eea890c815ca 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -162,10 +162,20 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
          "The input must be 'shl'!");
 
   Value *Masked, *ShiftShAmt;
-  match(OuterShift, m_Shift(m_Value(Masked), m_Value(ShiftShAmt)));
+  match(OuterShift,
+        m_Shift(m_Value(Masked), m_ZExtOrSelf(m_Value(ShiftShAmt))));
+
+  // *If* there is a truncation between an outer shift and a possibly-mask,
+  // then said truncation *must* be one-use, else we can't perform the fold.
+  Value *Trunc;
+  if (match(Masked, m_CombineAnd(m_Trunc(m_Value(Masked)), m_Value(Trunc))) &&
+      !Trunc->hasOneUse())
+    return nullptr;
 
   Type *NarrowestTy = OuterShift->getType();
   Type *WidestTy = Masked->getType();
+  bool HadTrunc = WidestTy != NarrowestTy;
+
   // The mask must be computed in a type twice as wide to ensure
   // that no bits are lost if the sum-of-shifts is wider than the base type.
   Type *ExtendedTy = WidestTy->getExtendedType();
@@ -186,6 +196,14 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
   Constant *NewMask;
 
   if (match(Masked, m_c_And(m_CombineOr(MaskA, MaskB), m_Value(X)))) {
+    // Peek through an optional zext of the shift amount.
+    match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
+
+    // We have two shift amounts from two 
diff erent shifts. The types of those
+    // shift amounts may not match. If that's the case let's bailout now.
+    if (MaskShAmt->getType() != ShiftShAmt->getType())
+      return nullptr;
+
     // Can we simplify (MaskShAmt+ShiftShAmt) ?
     auto *SumOfShAmts = dyn_cast_or_null<Constant>(SimplifyAddInst(
         MaskShAmt, ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
@@ -210,6 +228,14 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
   } else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) ||
              match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
                                  m_Deferred(MaskShAmt)))) {
+    // Peek through an optional zext of the shift amount.
+    match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
+
+    // We have two shift amounts from two 
diff erent shifts. The types of those
+    // shift amounts may not match. If that's the case let's bailout now.
+    if (MaskShAmt->getType() != ShiftShAmt->getType())
+      return nullptr;
+
     // Can we simplify (ShiftShAmt-MaskShAmt) ?
     auto *ShAmtsDiff = dyn_cast_or_null<Constant>(SimplifySubInst(
         ShiftShAmt, MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
@@ -254,10 +280,15 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
       return nullptr;
   }
 
+  // If we need to apply truncation, let's do it first, since we can.
+  // We have already ensured that the old truncation will go away.
+  if (HadTrunc)
+    X = Builder.CreateTrunc(X, NarrowestTy);
+
   // No 'NUW'/'NSW'! We no longer know that we won't shift-out non-0 bits.
+  // We didn't change the Type of this outermost shift, so we can just do it.
   auto *NewShift = BinaryOperator::Create(OuterShift->getOpcode(), X,
                                           OuterShift->getOperand(1));
-
   if (!NeedMask)
     return NewShift;
 

diff  --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll
index bfe563d46360..bc84e7a61afc 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll
@@ -26,9 +26,9 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T2]])
 ; CHECK-NEXT:    call void @use64(i64 [[T3]])
 ; CHECK-NEXT:    call void @use32(i32 [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
-; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and i32 [[TMP2]], 2147483647
 ; CHECK-NEXT:    ret i32 [[T7]]
 ;
   %t0 = add i32 %nbits, -1
@@ -66,9 +66,9 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
-; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T7]]
 ;
   %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -101,9 +101,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
-; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 undef, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T7]]
 ;
   %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
@@ -136,9 +136,9 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
-; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and <8 x i32> [[TMP2]], <i32 undef, i32 0, i32 1, i32 2147483647, i32 -1, i32 -1, i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <8 x i32> [[T7]]
 ;
   %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>

diff  --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
index b1d0aa029ed9..65bc2f244e0a 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
@@ -26,9 +26,9 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T2]])
 ; CHECK-NEXT:    call void @use64(i64 [[T3]])
 ; CHECK-NEXT:    call void @use32(i32 [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
-; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and i32 [[TMP2]], 2147483647
 ; CHECK-NEXT:    ret i32 [[T7]]
 ;
   %t0 = add i32 %nbits, -1
@@ -66,9 +66,9 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
-; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T7]]
 ;
   %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -101,9 +101,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
-; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 undef, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T7]]
 ;
   %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
@@ -136,9 +136,9 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
-; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
-; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T4]]
+; CHECK-NEXT:    [[T7:%.*]] = and <8 x i32> [[TMP2]], <i32 undef, i32 0, i32 1, i32 2147483647, i32 -1, i32 -1, i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <8 x i32> [[T7]]
 ;
   %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>

diff  --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll
index dcc789b66551..9203550b7db3 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll
@@ -22,9 +22,9 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T0]])
 ; CHECK-NEXT:    call void @use64(i64 [[T1]])
 ; CHECK-NEXT:    call void @use32(i32 [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
-; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and i32 [[TMP2]], 2147483647
 ; CHECK-NEXT:    ret i32 [[T5]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -54,9 +54,9 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -81,9 +81,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 undef, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -108,9 +108,9 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 undef, i32 1, i32 2147483647, i32 -1, i32 -1, i32 -1, i32 undef, i32 undef>
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
index 1f95f92ac2d8..5c08183b999a 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
@@ -24,9 +24,9 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T1]])
 ; CHECK-NEXT:    call void @use64(i64 [[T2]])
 ; CHECK-NEXT:    call void @use32(i32 [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
-; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
-; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], [[T3]]
+; CHECK-NEXT:    [[T6:%.*]] = and i32 [[TMP2]], 2147483647
 ; CHECK-NEXT:    ret i32 [[T6]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -60,9 +60,9 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
+; CHECK-NEXT:    [[T6:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -91,9 +91,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
+; CHECK-NEXT:    [[T6:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 undef, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -122,9 +122,9 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
+; CHECK-NEXT:    [[T6:%.*]] = and <8 x i32> [[TMP2]], <i32 undef, i32 1, i32 2147483647, i32 -1, i32 -1, i32 -1, i32 undef, i32 undef>
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll
index a9b3b766ac69..1bfd95587e93 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll
@@ -22,9 +22,9 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T0]])
 ; CHECK-NEXT:    call void @use64(i64 [[T1]])
 ; CHECK-NEXT:    call void @use32(i32 [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = lshr i64 [[T1]], [[T0]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
-; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and i32 [[TMP2]], 2147483647
 ; CHECK-NEXT:    ret i32 [[T5]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -54,9 +54,9 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -81,9 +81,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 undef, i32 2147483647>
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -108,9 +108,9 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
-; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 undef, i32 1, i32 2147483647, i32 -1, i32 -1, i32 -1, i32 undef, i32 undef>
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll
index 596ae1c6b1f7..d11a5c322176 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll
@@ -27,8 +27,8 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T2]])
 ; CHECK-NEXT:    call void @use32(i32 [[T3]])
 ; CHECK-NEXT:    call void @use64(i64 [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
-; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret i32 [[T6]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -67,8 +67,8 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -102,8 +102,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -137,8 +137,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
index 048db9a7fdd6..960f2c9e8aba 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
@@ -27,8 +27,8 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T2]])
 ; CHECK-NEXT:    call void @use32(i32 [[T3]])
 ; CHECK-NEXT:    call void @use64(i64 [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
-; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret i32 [[T6]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -67,8 +67,8 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -102,8 +102,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -138,8 +138,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T5]])
-; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
-; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[TMP1]], [[T4]]
 ; CHECK-NEXT:    ret <8 x i32> [[T7]]
 ;
   %t0 = add <8 x i32> %nbits, <i32 -1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0>

diff  --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll
index 67849aa2f56c..3cc3e1028ae1 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll
@@ -24,8 +24,8 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T1]])
 ; CHECK-NEXT:    call void @use32(i32 [[T2]])
 ; CHECK-NEXT:    call void @use64(i64 [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
-; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret i32 [[T5]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -58,8 +58,8 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -87,8 +87,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -116,8 +116,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
index eb8ca3926325..04a0966f1482 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
@@ -26,8 +26,8 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T2]])
 ; CHECK-NEXT:    call void @use32(i32 [[T3]])
 ; CHECK-NEXT:    call void @use64(i64 [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
-; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret i32 [[T6]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -64,8 +64,8 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -97,8 +97,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -130,8 +130,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
-; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
-; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
 ; CHECK-NEXT:    ret <8 x i32> [[T6]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll
index 8db0e36fa764..64e7c6bb670c 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll
@@ -24,8 +24,8 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T1]])
 ; CHECK-NEXT:    call void @use32(i32 [[T2]])
 ; CHECK-NEXT:    call void @use64(i64 [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
-; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret i32 [[T5]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -58,8 +58,8 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -87,8 +87,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -116,8 +116,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll
index e692b188e2a8..aaaeb43fd496 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll
@@ -24,8 +24,8 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
 ; CHECK-NEXT:    call void @use64(i64 [[T1]])
 ; CHECK-NEXT:    call void @use32(i32 [[T2]])
 ; CHECK-NEXT:    call void @use64(i64 [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
-; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret i32 [[T5]]
 ;
   %t0 = zext i32 %nbits to i64
@@ -58,8 +58,8 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -87,8 +87,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>
@@ -116,8 +116,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
 ; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
 ; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
-; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
 ; CHECK-NEXT:    ret <8 x i32> [[T5]]
 ;
   %t0 = zext <8 x i32> %nbits to <8 x i64>


        


More information about the llvm-commits mailing list